max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
sklearn/iris_training.py | tmatsuo/cloudml-samples | 1,552 | 11075797 | <filename>sklearn/iris_training.py
# This file is for training on AI Platform with scikit-learn.
# [START setup]
import datetime
import os
import subprocess
import sys
import pandas as pd
from sklearn import svm
from sklearn.externals import joblib
# Fill in your Cloud Storage bucket name
BUCKET_NAME = '<YOUR_BUCKET_NAME>'
# [END setup]
# [START download-data]
iris_data_filename = 'iris_data.csv'
iris_target_filename = 'iris_target.csv'
data_dir = 'gs://cloud-samples-data/ml-engine/iris'
# gsutil outputs everything to stderr so we need to divert it to stdout.
subprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,
iris_data_filename),
iris_data_filename], stderr=sys.stdout)
subprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,
iris_target_filename),
iris_target_filename], stderr=sys.stdout)
# [END download-data]
# [START load-into-pandas]
# Load data into pandas, then use `.values` to get NumPy arrays
iris_data = pd.read_csv(iris_data_filename).values
iris_target = pd.read_csv(iris_target_filename).values
# Convert one-column 2D array into 1D array for use with scikit-learn
iris_target = iris_target.reshape((iris_target.size,))
# [END load-into-pandas]
# [START train-and-save-model]
# Train the model
classifier = svm.SVC(gamma='auto', verbose=True)
classifier.fit(iris_data, iris_target)
# Export the classifier to a file
model_filename = 'model.joblib'
joblib.dump(classifier, model_filename)
# [END train-and-save-model]
# [START upload-model]
# Upload the saved model file to Cloud Storage
gcs_model_path = os.path.join('gs://', BUCKET_NAME,
datetime.datetime.now().strftime('iris_%Y%m%d_%H%M%S'), model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path],
stderr=sys.stdout)
# [END upload-model]
|
ntc_templates/parse.py | austind/ntc-templates | 817 | 11075803 | <gh_stars>100-1000
"""ntc_templates.parse."""
import os
# Due to TextFSM library issues on Windows, it is better to not fail on import
# Instead fail at runtime (i.e. if method is actually used).
try:
from textfsm import clitable
HAS_CLITABLE = True
except ImportError:
HAS_CLITABLE = False
def _get_template_dir():
template_dir = os.environ.get("NTC_TEMPLATES_DIR")
if template_dir is None:
package_dir = os.path.dirname(__file__)
template_dir = os.path.join(package_dir, "templates")
if not os.path.isdir(template_dir):
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(template_dir)))
template_dir = os.path.join(project_dir, "templates")
return template_dir
def _clitable_to_dict(cli_table):
"""Convert TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs
def parse_output(platform=None, command=None, data=None):
"""Return the structured data based on the output from a network device."""
if not HAS_CLITABLE:
msg = """
The TextFSM library is not currently supported on Windows. If you are NOT using Windows
you should be able to 'pip install textfsm' to fix this issue. If you are using Windows
then you will need to install the patch referenced here:
https://github.com/google/textfsm/pull/82
"""
raise ImportError(msg)
template_dir = _get_template_dir()
cli_table = clitable.CliTable("index", template_dir)
attrs = {"Command": command, "Platform": platform}
try:
cli_table.ParseCmd(data, attrs)
structured_data = _clitable_to_dict(cli_table)
except clitable.CliTableError as e:
raise Exception(
'Unable to parse command "{0}" on platform {1} - {2}'.format(
command, platform, str(e)
)
)
# Invalid or Missing template
# module.fail_json(msg='parsing error', error=str(e))
# rather than fail, fallback to return raw text
# structured_data = [data]
return structured_data
|
examples/rl/environments/cartpole/cart_pgpe.py | sveilleux1/pybrain | 2,208 | 11075819 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the CartPoleEnvironment
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
#########################################################################
__author__ = "<NAME>, <NAME>"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
batch=1 #number of samples per learning step
prnts=100 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
for runs in range(numbExp):
# create environment
env = CartPoleEnvironment()
# create task
task = BalanceTask(env, 200, desiredValue=None)
# create controller network
net = buildNetwork(4, 1, bias=False)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
|
myimports/goo/ber/goober.py | noirqs/PyImports | 118 | 11075826 | <reponame>noirqs/PyImports<filename>myimports/goo/ber/goober.py
#!/usr/bin/env python
class Goober(object):
def echo(self, v):
return v
|
roles/openshift_health_checker/test/action_plugin_test.py | shgriffi/openshift-ansible | 164 | 11075865 | import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
from openshift_checks import OpenShiftCheckException, FileToSave
def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
class FakeCheck(object):
name = _name
tags = _tags or []
def __init__(self, **_):
self.changed = False
self.failures = []
self.logs = run_logs or []
self.files_to_save = run_files or []
def is_active(self):
if isinstance(is_active, Exception):
raise is_active
return is_active
def run(self):
self.changed = changed
if run_exception is not None:
raise run_exception
return run_return
def get_var(*args, **_):
return get_var_return
def register_failure(self, exc):
self.failures.append(OpenShiftCheckException(str(exc)))
return
return FakeCheck
# Fixtures
@pytest.fixture
def plugin():
task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
plugin = ActionModule(task, None, PlayContext(), None, None, None)
return plugin
class FakeTask(object):
def __init__(self, action, args):
self.action = action
self.args = args
self.async = 0
@pytest.fixture
def task_vars():
return dict(openshift=dict(), ansible_host='unit-test-host')
# Assertion helpers
def failed(result, msg_has=None):
if msg_has is not None:
assert 'msg' in result
for term in msg_has:
assert term.lower() in result['msg'].lower()
return result.get('failed', False)
def changed(result):
return result.get('changed', False)
# tests whether task is skipped, not individual checks
def skipped(result):
return result.get('skipped', False)
# Tests
@pytest.mark.parametrize('task_vars', [
None,
{},
])
def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch):
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['openshift_facts'])
def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, monkeypatch):
FakeCheck1 = fake_check('duplicate_name')
FakeCheck2 = fake_check('duplicate_name')
checks = [FakeCheck1, FakeCheck2]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
@pytest.mark.parametrize('is_active, skipped_reason', [
(False, "Not active for this host"),
(Exception("borked"), "exception"),
])
def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'].get('skipped')
assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
@pytest.mark.parametrize('to_disable', [
'fake_check',
['fake_check', 'spam'],
'*,spam,eggs',
])
def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
assert not failed(result)
assert not changed(result)
assert not skipped(result)
def test_action_plugin_run_list_checks(monkeypatch):
task = FakeTask('openshift_health_check', {'checks': []})
plugin = ActionModule(task, None, PlayContext(), None, None, None)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run()
assert failed(result, msg_has="Available checks")
assert not changed(result)
assert not skipped(result)
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, run_files=[None])
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
assert not failed(result)
assert not changed(result)
assert not skipped(result)
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
assert changed(result['checks']['fake_check'])
assert not failed(result)
assert changed(result)
assert not skipped(result)
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
assert failed(result, msg_has=['failed'])
assert not changed(result)
assert not skipped(result)
@pytest.mark.parametrize('exc_class, expect_traceback', [
(OpenShiftCheckException, False),
(Exception, True),
])
def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
check_class = fake_check(
run_return={},
run_logs=[('thing', 'note')],
run_files=[
FileToSave('save.file', 'contents', None),
FileToSave('save.file', 'duplicate', None),
FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
],
)
task_vars['openshift_checks_output_dir'] = str(tmpdir)
check_class.get_var = lambda self, name, **_: task_vars.get(name)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
plugin.run(tmp=None, task_vars=task_vars)
assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
assert any(path.basename == 'save.file' for path in tmpdir.visit())
assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['unknown', 'name'])
assert not changed(result)
assert not skipped(result)
@pytest.mark.parametrize('names,all_checks,expected', [
([], [], set()),
(
['a', 'b'],
[
fake_check('a'),
fake_check('b'),
],
set(['a', 'b']),
),
(
['a', 'b', '@group'],
[
fake_check('from_group_1', ['group', 'another_group']),
fake_check('not_in_group', ['another_group']),
fake_check('from_group_2', ['preflight', 'group']),
fake_check('a'),
fake_check('b'),
],
set(['a', 'b', 'from_group_1', 'from_group_2']),
),
])
def test_resolve_checks_ok(names, all_checks, expected):
assert resolve_checks(names, all_checks) == expected
@pytest.mark.parametrize('names,all_checks,words_in_exception', [
(
['testA', 'testB'],
[],
['check', 'name', 'testA', 'testB'],
),
(
['@group'],
[],
['tag', 'name', 'group'],
),
(
['testA', 'testB', '@group'],
[],
['check', 'name', 'testA', 'testB', 'tag', 'group'],
),
(
['testA', 'testB', '@group'],
[
fake_check('from_group_1', ['group', 'another_group']),
fake_check('not_in_group', ['another_group']),
fake_check('from_group_2', ['preflight', 'group']),
],
['check', 'name', 'testA', 'testB'],
),
])
def test_resolve_checks_failure(names, all_checks, words_in_exception):
with pytest.raises(Exception) as excinfo:
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
@pytest.mark.parametrize('give_output_dir, result, expect_file', [
(False, None, False),
(True, dict(content="c3BhbQo=", encoding="base64"), True),
(True, dict(content="encoding error", encoding="base64"), False),
(True, dict(content="spam", no_encoding=None), True),
(True, dict(failed=True, msg="could not slurp"), False),
])
def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
check = fake_check()()
check.execute_module = lambda *args, **_: result
copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
class Spam(object):
def __str__(self):
raise Exception("break str")
test = {1: object(), 2: Spam()}
test[3] = test
write_result_to_output_dir(str(tmpdir), test)
assert "Error writing" in test["output_files"]
output_dir = tmpdir.join("eggs")
output_dir.write("spam") # so now it's not a dir
write_to_output_file(str(output_dir), "somefile", "somedata")
assert "Could not write" in capsys.readouterr()[1]
monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
write_result_to_output_dir(str(tmpdir), test)
assert "Error creating" in test["output_files"]
|
veles/external/hog.py | AkshayJainG/veles | 1,007 | 11075889 | <reponame>AkshayJainG/veles
"""
Unless otherwise specified by LICENSE.txt files in individual
directories, all code is
Copyright (C) 2011, the scikit-image team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of skimage nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from scipy import sqrt, pi, arctan2, cos, sin
from scipy.ndimage import uniform_filter
def hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), visualise=False, normalize=False):
"""Extract Histogram of Oriented Gradients (HOG) for a given image.
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalization
2. computing the gradient image in x and y
3. computing gradient histograms
4. normalizing across blocks
5. flattening into a feature vector
Parameters
----------
image : (M, N) ndarray
Input image (greyscale).
orientations : int
Number of orientation bins.
pixels_per_cell : 2 tuple (int, int)
Size (in pixels) of a cell.
cells_per_block : 2 tuple (int,int)
Number of cells in each block.
visualise : bool, optional
Also return an image of the HOG.
normalize : bool, optional
Apply power law compression to normalize the image before
processing.
Returns
-------
newarr : ndarray
HOG for the image as a 1D (flattened) array.
hog_image : ndarray (if visualise=True)
A visualisation of the HOG image.
References
----------
* http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
* <NAME> <NAME>, Histograms of Oriented Gradients for
Human Detection, IEEE Computer Society Conference on Computer
Vision and Pattern Recognition 2005 San Diego, CA, USA
"""
image = np.atleast_2d(image)
"""
The first stage applies an optional global image normalization
equalization that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each color channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
"""
if image.ndim > 3:
raise ValueError("Currently only supports grey-level images")
if normalize:
image = sqrt(image)
"""
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
color channel is used, which provides color invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
"""
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1)
gy[:-1, :] = np.diff(image, n=1, axis=0)
"""
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [Lowe 2004]
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
"""
magnitude = sqrt(gx ** 2 + gy ** 2)
orientation = arctan2(gy, (gx + 1e-15)) * (180 / pi) + 90
sy, sx = image.shape
cx, cy = pixels_per_cell
bx, by = cells_per_block
n_cellsx = int(np.floor(sx // cx)) # number of cells in x
n_cellsy = int(np.floor(sy // cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(orientation < 180 / orientations * (i + 1),
orientation, 0)
temp_ori = np.where(orientation >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, magnitude, 0)
orientation_histogram[:, :, i] = \
uniform_filter(temp_mag, size=(cy, cx))[cy / 2::cy, cx / 2::cx]
# now for each cell, compute the histogram
# orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
radius = min(cx, cy) // 2 - 1
hog_image = None
if visualise:
hog_image = np.zeros((sy, sx), dtype=float)
if visualise:
from skimage import draw
for x in range(n_cellsx):
for y in range(n_cellsy):
for o in range(orientations):
centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
dx = radius * cos(float(o) / orientations * np.pi)
dy = radius * sin(float(o) / orientations * np.pi)
rr, cc = draw.bresenham(centre[0] - dx, centre[1] - dy,
centre[0] + dx, centre[1] + dy)
hog_image[rr, cc] += orientation_histogram[y, x, o]
"""
The fourth stage computes normalization, which takes local groups of
cells and contrast normalizes their overall responses before passing
to next stage. Normalisation introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalize each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalizations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalizations. This may seem redundant but it improves the performance.
We refer to the normalized block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
"""
n_blocksx = (n_cellsx - bx) + 1
n_blocksy = (n_cellsy - by) + 1
normalized_blocks = np.zeros((n_blocksy, n_blocksx,
by, bx, orientations))
for x in range(n_blocksx):
for y in range(n_blocksy):
block = orientation_histogram[y:y + by, x:x + bx, :]
eps = 1e-5
normalized_blocks[y, x, :] = block / sqrt(block.sum() ** 2 + eps)
"""
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
"""
if visualise:
return normalized_blocks.ravel(), hog_image
else:
return normalized_blocks.ravel()
|
ServidorPython/python32_web/Lib/site-packages/nbformat/v3/nbjson.py | mak213k/Servidor_automatizado_python | 652 | 11075891 | <filename>ServidorPython/python32_web/Lib/site-packages/nbformat/v3/nbjson.py
"""Read and write notebooks in JSON format."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import copy
import json
from .nbbase import from_dict
from .rwbase import (
NotebookReader, NotebookWriter, restore_bytes, rejoin_lines, split_lines,
strip_transient,
)
from ipython_genutils import py3compat
class BytesEncoder(json.JSONEncoder):
"""A JSON encoder that accepts b64 (and other *ascii*) bytestrings."""
def default(self, obj):
if isinstance(obj, bytes):
return obj.decode('ascii')
return json.JSONEncoder.default(self, obj)
class JSONReader(NotebookReader):
def reads(self, s, **kwargs):
nb = json.loads(s, **kwargs)
nb = self.to_notebook(nb, **kwargs)
nb = strip_transient(nb)
return nb
def to_notebook(self, d, **kwargs):
return rejoin_lines(from_dict(d))
class JSONWriter(NotebookWriter):
def writes(self, nb, **kwargs):
kwargs['cls'] = BytesEncoder
kwargs['indent'] = 1
kwargs['sort_keys'] = True
kwargs['separators'] = (',',': ')
nb = copy.deepcopy(nb)
nb = strip_transient(nb)
if kwargs.pop('split_lines', True):
nb = split_lines(nb)
return py3compat.str_to_unicode(json.dumps(nb, **kwargs), 'utf-8')
_reader = JSONReader()
_writer = JSONWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
|
FukuML/L2RLogisticRegression.py | fukuball/fuku-ml | 319 | 11075901 | # encoding=utf8
# import operator
# import itertools
import numpy as np
import FukuML.LogisticRegression as logistic_regression
class L2RLogisticRegression(logistic_regression.LogisticRegression):
def __init__(self):
"""init"""
self.status = 'empty'
self.train_X = []
self.train_Y = []
self.W = []
self.data_num = 0
self.data_demension = 0
self.test_X = []
self.test_Y = []
self.feature_transform_mode = ''
self.feature_transform_degree = 1
self.feed_mode = 'batch'
self.step_eta = 0.126
self.updates = 2000
self.lambda_p = 0.0001
def load_train_data(self, input_data_file=''):
return super(L2RLogisticRegression, self).load_train_data(input_data_file)
def load_test_data(self, input_data_file=''):
return super(L2RLogisticRegression, self).load_test_data(input_data_file)
def set_param(self, feed_mode='batch', step_eta=0.126, updates=2000, lambda_p=0.0001):
# larger C => weaker regularization, smaller C => stronger regularization
# smaller lambda => weaker regularization, larger lambda => stronger regularization
self.feed_mode = feed_mode
self.step_eta = step_eta
self.updates = updates
self.lambda_p = lambda_p
return self.feed_mode, self.step_eta, self.updates, self.lambda_p
def init_W(self, mode='normal'):
return super(L2RLogisticRegression, self).init_W(mode)
def theta(self, s):
return super(L2RLogisticRegression, self).theta(s)
def score_function(self, x, W):
return super(L2RLogisticRegression, self).score_function(x, W)
def error_function(self, x, y, W):
return super(L2RLogisticRegression, self).error_function(x, y, W)
def calculate_gradient(self, X, Y, W):
if type(Y) is np.ndarray:
data_num = len(Y)
else:
data_num = 1
gradient_average = ((2 * self.lambda_p) / data_num) * self.W + np.dot(self.theta((-1) * Y * np.dot(W, X.transpose())) * ((-1) * Y), X) / data_num
return gradient_average
def calculate_avg_error(self, X, Y, W):
return super(L2RLogisticRegression, self).calculate_avg_error(X, Y, W)
def calculate_test_data_avg_error(self):
return super(L2RLogisticRegression, self).calculate_test_data_avg_error()
def train(self):
return super(L2RLogisticRegression, self).train()
def prediction(self, input_data='', mode='test_data'):
return super(L2RLogisticRegression, self).prediction(input_data, mode)
class BinaryClassifier(L2RLogisticRegression):
def __init__(self):
"""init"""
self.status = 'empty'
self.train_X = []
self.train_Y = []
self.W = []
self.data_num = 0
self.data_demension = 0
self.test_X = []
self.test_Y = []
self.feature_transform_mode = ''
self.feature_transform_degree = 1
self.feed_mode = 'batch'
self.step_eta = 0.126
self.updates = 2000
self.lambda_p = 0.0001
def load_train_data(self, input_data_file=''):
return super(BinaryClassifier, self).load_train_data(input_data_file)
def load_test_data(self, input_data_file=''):
return super(BinaryClassifier, self).load_test_data(input_data_file)
def set_param(self, feed_mode='batch', step_eta=0.126, updates=2000, lambda_p=0.0001):
return super(BinaryClassifier, self).set_param(feed_mode, step_eta, updates)
def init_W(self, mode='normal'):
return super(BinaryClassifier, self).init_W(mode)
def theta(self, s):
return super(BinaryClassifier, self).theta(s)
def score_function(self, x, W):
'''
Score function to calculate score
'''
score = super(BinaryClassifier, self).score_function(x, W)
if score >= 0.5:
score = 1.0
else:
score = -1.0
return score
def error_function(self, y_prediction, y_truth):
# need refector
'''
Error function to calculate error
'''
if y_prediction != y_truth:
return 1
else:
return 0
def calculate_gradient(self, X, Y, W):
return super(BinaryClassifier, self).calculate_gradient(X, Y, W)
def calculate_avg_error(self, X, Y, W):
data_num = len(Y)
error_num = 0
for i in range(data_num):
error_num = error_num + self.error_function(self.score_function(X[i], W), Y[i])
avg_error = error_num / float(data_num)
return avg_error
def calculate_test_data_avg_error(self):
return super(BinaryClassifier, self).calculate_test_data_avg_error()
def train(self):
return super(BinaryClassifier, self).train()
def prediction(self, input_data='', mode='test_data'):
return super(BinaryClassifier, self).prediction(input_data, mode)
|
NVLL/data/ng.py | jennhu/vmf_vae_nlp | 159 | 11075917 | <filename>NVLL/data/ng.py<gh_stars>100-1000
import os
import random
import numpy as np
class DataNg:
"""
Data for 20News group or RCV1.
Data is preprocessed by <NAME>.
"""
def __init__(self, args):
self.train = DataNg.read_data(os.path.join(args.root_path,
args.data_path, 'train.feat'))
self.test = DataNg.read_data(os.path.join(args.root_path,
args.data_path, 'test.feat'))
self.set_dev(1000) # No dev set, use a part of test as dev set.
self.test_batches = DataNg.create_batches(len(self.test[0]), args.eval_batch_size, shuffle=True)
self.dev_batches = DataNg.create_batches(len(self.dev[0]), args.eval_batch_size, shuffle=True)
self.read_vocab(os.path.join(args.root_path,
args.data_path, 'vocab.new'))
def read_vocab(self, path):
with open(path, 'r') as fd:
lines = fd.read().splitlines()
self.vocab_size = len(lines)
print("Vocab size: {}".format(len(lines)))
def set_dev(self, num=100):
l = list(range(len(self.test[0])))
random.shuffle(l)
l = l[:num]
dev, dev_cnt = [], []
for i in l:
dev.append(self.test[0][i])
dev_cnt.append(self.test[1][i])
self.dev = [dev, dev_cnt]
def set_train_batches(self, args):
self.train_batches = DataNg.create_batches(len(self.train[0]), args.batch_size, shuffle=True)
@staticmethod
def read_data(path_file):
"""
Read 20NG file
:param path_file: Path to file
:return: [data:a List with Dict{id:freq}, word_cnt: a List with #words in this doc]
"""
_id = 0
idx = []
data = []
word_count = []
fin = open(path_file)
while True:
line = fin.readline()
if not line:
break
id_freqs = line.split()
doc = {}
count = 0
for id_freq in id_freqs[1:]:
items = id_freq.split(':')
# python starts from 0
doc[int(items[0]) - 1] = int(items[1])
count += int(items[1])
if count > 0:
idx.append(_id)
_id += 1
data.append(doc)
word_count.append(count)
fin.close()
return [data, word_count]
@staticmethod
def create_batches(data_size, batch_size, shuffle=True):
"""create index by batches."""
batches = []
ids = list(range(data_size))
if shuffle:
random.shuffle(ids)
for i in range(int(data_size / batch_size)):
start = i * batch_size
end = (i + 1) * batch_size
batches.append(ids[start:end])
# the batch of which the length is less than batch_size
rest = data_size % batch_size
if rest > 0:
# batches.append(list(ids[-rest:]) + [-1] * (batch_size - rest)) # -1 as padding
batches.append(list(ids[-rest:])) # -1 as padding
return batches
@staticmethod
def fetch_data(data, count, idx_batch, vocab_size):
"""fetch input data by batch."""
batch_size = len(idx_batch)
data_batch = np.zeros((batch_size, vocab_size))
count_batch = []
for i, doc_id in enumerate(idx_batch):
if doc_id != -1:
for word_id, freq in data[doc_id].items():
data_batch[i, word_id] = freq
count_batch.append(count[doc_id])
else:
count_batch.append(0)
return data_batch, count_batch
|
homeassistant/components/zwave_me/binary_sensor.py | MrDelik/core | 30,023 | 11075940 | <reponame>MrDelik/core<gh_stars>1000+
"""Representation of a sensorBinary."""
from __future__ import annotations
from zwave_me_ws import ZWaveMeData
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ZWaveMeController, ZWaveMeEntity
from .const import DOMAIN, ZWaveMePlatform
BINARY_SENSORS_MAP: dict[str, BinarySensorEntityDescription] = {
"generic": BinarySensorEntityDescription(
key="generic",
),
"motion": BinarySensorEntityDescription(
key="motion",
device_class=DEVICE_CLASS_MOTION,
),
}
DEVICE_NAME = ZWaveMePlatform.BINARY_SENSOR
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the binary sensor platform."""
@callback
def add_new_device(new_device: ZWaveMeData) -> None:
controller: ZWaveMeController = hass.data[DOMAIN][config_entry.entry_id]
description = BINARY_SENSORS_MAP.get(
new_device.probeType, BINARY_SENSORS_MAP["generic"]
)
sensor = ZWaveMeBinarySensor(controller, new_device, description)
async_add_entities(
[
sensor,
]
)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, f"ZWAVE_ME_NEW_{DEVICE_NAME.upper()}", add_new_device
)
)
class ZWaveMeBinarySensor(ZWaveMeEntity, BinarySensorEntity):
"""Representation of a ZWaveMe binary sensor."""
def __init__(
self,
controller: ZWaveMeController,
device: ZWaveMeData,
description: BinarySensorEntityDescription,
) -> None:
"""Initialize the device."""
super().__init__(controller=controller, device=device)
self.entity_description = description
@property
def is_on(self) -> bool:
"""Return the state of the sensor."""
return self.device.level == "on"
|
checkov/terraform/checks/resource/azure/AppServiceDisallowCORS.py | jamesholland-uk/checkov | 4,013 | 11075943 | from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck
class AppServiceDisallowCORS(BaseResourceNegativeValueCheck):
def __init__(self):
name = "Ensure that CORS disallows every resource to access app services"
id = "CKV_AZURE_57"
supported_resources = ['azurerm_app_service']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, missing_attribute_result=CheckResult.PASSED)
def get_inspected_key(self):
return 'site_config/[0]/cors/[0]/allowed_origins'
def get_forbidden_values(self):
return [['*']]
check = AppServiceDisallowCORS()
|
shadowlands/__main__.py | kayagoban/shadowlands | 140 | 11075944 | #!/usr/bin/env python
import sys
from shadowlands.credstick import Credstick
from shadowlands.sl_config import SLConfig
from shadowlands.sl_node import Node
from shadowlands.tui.tui import Interface
import pdb
from shadowlands.tui.debug import debug
from shadowlands.credstick.trezor_ethdriver import TrezorEthDriver
from decimal import Decimal
from time import sleep
import os
import logging
try:
level = os.environ['SL_DEBUG']
if level == 'DEBUG':
logging.basicConfig(level = logging.DEBUG, filename = "/tmp/shadowlands.log")
elif level == 'INFO':
logging.basicConfig(level = logging.INFO, filename = "/tmp/shadowlands.log")
except KeyError:
pass
load_dapp = None
def main(mock_address=None, dapp=None, hdpath_base=None, hdpath_index=None):
if mock_address:
Credstick.mock_address = mock_address
global load_dapp
# Skip to hd path on detect credstick
if hdpath_base and hdpath_index:
Credstick.hdpath_base = hdpath_base
Credstick.hdpath_index = hdpath_index
if load_dapp:
load_dapp = dapp
# Read from config file
sl_config = SLConfig()
# Start network subsystem
eth_node = Node(sl_config=sl_config)
# create user interface
interface = Interface(eth_node, sl_config, preloaded_dapp=dapp)
# Begin interface
interface.load()
# Shut it all down.
if interface.credstick is not None:
interface.credstick.close()
print("Closing credstick poller...")
Credstick.stop_detect_thread()
print("Shutdown block listener")
try:
eth_node._block_listener.shut_down()
except AttributeError:
#shutdown might occur before _block_listener is defined
pass
print("Closing connection to ethereum node...")
eth_node.stop_thread()
sys.exit(0)
if __name__ == "__main__":
# execute only if run as a script
main()
|
paddle/vision/nn/mb_tiny.py | pradyumn25jain/Ultra-Light-Fast-Generic-Face-Detector-1MB | 6,602 | 11075961 | import paddle.nn as nn
import paddle.nn.functional as F
class Mb_Tiny(nn.Layer):
def __init__(self, num_classes=2):
super(Mb_Tiny, self).__init__()
self.base_channel = 8 * 2
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2D(inp, oup, 3, stride, 1, bias_attr=None),
nn.BatchNorm2D(oup),
nn.ReLU()
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2D(inp, inp, 3, stride, 1, groups=inp, bias_attr=None),
nn.BatchNorm2D(inp),
nn.ReLU(),
nn.Conv2D(inp, oup, 1, 1, 0, bias_attr=None),
nn.BatchNorm2D(oup),
nn.ReLU(),
)
self.model = nn.Sequential(
conv_bn(3, self.base_channel, 2), # 160*120
conv_dw(self.base_channel, self.base_channel * 2, 1),
conv_dw(self.base_channel * 2, self.base_channel * 2, 2), # 80*60
conv_dw(self.base_channel * 2, self.base_channel * 2, 1),
conv_dw(self.base_channel * 2, self.base_channel * 4, 2), # 40*30
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
conv_dw(self.base_channel * 4, self.base_channel * 8, 2), # 20*15
conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
conv_dw(self.base_channel * 8, self.base_channel * 16, 2), # 10*8
conv_dw(self.base_channel * 16, self.base_channel * 16, 1)
)
self.fc = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, 1024)
x = self.fc(x)
return x
|
example/testify_pytorch_to_caffe_example.py | YifeiCN/nn_tools | 370 | 11075962 | import caffe
import torch
import numpy as np
import argparse
from collections import OrderedDict
from torch.autograd import Variable
import torch.nn as nn
def arg_parse():
parser=argparse.ArgumentParser()
parser.add_argument('--model','-m',default='alexnet')
parser.add_argument('--decimal','-d',default=2)
parser.add_argument('--gpu','-gpu',action='store_true')
args=parser.parse_args()
return args
def generate_random(shape,gpu=False):
data_np=np.random.rand(np.prod(shape)).reshape(shape)
data_torch=Variable(torch.Tensor(data_np))
if gpu:
data_torch=data_torch.cuda()
return [data_np],[data_torch]
def get_input_size(caffe_net):
input_name = caffe_net.inputs[0]
return caffe_net.blobs[input_name].data.shape
def forward_torch(net,data):
blobs=OrderedDict()
module2name={}
for layer_name,m in net.named_modules():
layer_name=layer_name.replace('.','_')
module2name[m]=layer_name
# turn off all the inplace operation
if hasattr(m,'inplace'):
m.inplace=False
def forward_hook(m,i,o):
o_np = o.data.cpu().numpy()
blobs[module2name[m]]=o_np
for m in net.modules():
m.register_forward_hook(forward_hook)
output=net.forward(*data)
if isinstance(output,tuple):
outputs=[]
for o in output:
outputs.append(o.data.cpu().numpy())
else:
outputs=[output.data.cpu().numpy()]
return blobs,outputs
def forward_caffe(net,data):
for input_name,d in zip(net.inputs,data):
net.blobs[input_name].data[...] = d
rst=net.forward()
blobs=OrderedDict()
blob2layer={}
for layer_name,tops in net.top_names.items():
for top in tops:
blob2layer[top]=layer_name
for name,value in net.blobs.items():
layer_name=blob2layer[name]
value=value.data
if layer_name in blobs:
blobs[layer_name].append(value)
else:
blobs[layer_name]=[value]
outputs = []
for output_name in net.outputs:
outputs.append(rst[output_name])
return blobs,outputs
def test(net_caffe,net_torch,data_np,data_torch,args):
blobs_caffe, rsts_caffe = forward_caffe(net_caffe, data_np)
blobs_torch, rsts_torchs = forward_torch(net_torch, data_torch)
# test the output of every layer
for layer, value in blobs_caffe.items():
if layer in blobs_torch:
value_torch = blobs_torch[layer]
value = value[0]
if value.size!=value_torch.size:continue
if 'relu' in layer: continue
try:
np.testing.assert_almost_equal(value, value_torch, decimal=args.decimal)
print("TEST layer {}: PASS".format(layer))
except:
print("TEST layer {}: FAIL".format(layer))
# np.testing.assert_almost_equal(np.clip(value, min=0), np.clip(value_torch, min=0))
# test the output
print("TEST output")
for rst_caffe,rst_torch in zip(rsts_caffe,rsts_torchs):
np.testing.assert_almost_equal(rst_caffe, rst_torch, decimal=args.decimal)
print("TEST output: PASS")
if __name__=='__main__':
args=arg_parse()
if args.model=='alexnet':
# Alexnet example
from torchvision.models.alexnet import alexnet
net_torch = alexnet(True).eval()
if args.gpu:
net_torch.cuda()
try:
net_caffe = caffe.Net('alexnet.prototxt', 'alexnet.caffemodel', caffe.TEST)
except:
raise ("Please run alexnet_pytorch_to_caffe.py first")
shape=get_input_size(net_caffe)
data_np,data_torch=generate_random(shape,args.gpu)
test(net_caffe,net_torch,data_np,data_torch,args)
elif args.model=='resnet18':
# ResNet example
from torchvision.models.resnet import resnet18
net_torch = resnet18(True).eval()
if args.gpu:
net_torch.cuda()
net_caffe = caffe.Net('resnet18.prototxt', 'resnet18.caffemodel', caffe.TEST)
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe,net_torch,data_np,data_torch,args)
elif args.model=='inception_v3':
# Inception_v3 example
from torchvision.models.inception import inception_v3
net_torch = inception_v3(True,transform_input=False).eval()
if args.gpu:
net_torch.cuda()
net_caffe = caffe.Net('inception_v3.prototxt', 'inception_v3.caffemodel', caffe.TEST)
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe,net_torch,data_np,data_torch,args)
else:
raise NotImplementedError()
|
graphbrain/cognition/system.py | Mechachleopteryx/graphbrain | 412 | 11075976 | import json
import logging
import graphbrain.constants as const
from collections import defaultdict
from importlib import import_module
from graphbrain import hedge
from graphbrain.cognition.agent import Agent
from graphbrain.op import apply_op, create_op
from graphbrain.parsers import create_parser, parser_lang
def run_agent(agent, lang=None, parser_class=None, hg=None, infile=None,
indir=None, outfile=None, url=None, sequence=None,
progress_bar=True, corefs='resolve', logging_level=logging.INFO):
system = System(lang=lang, parser_class=parser_class, hg=hg, infile=infile,
indir=indir, outfile=outfile, url=url, sequence=sequence,
corefs=corefs, logging_level=logging_level)
if isinstance(agent, Agent):
agent_obj = agent
else:
agent_obj = create_agent(
agent, progress_bar=progress_bar, logging_level=logging_level)
system.add(agent_obj)
system.run()
def load_system(system_file, lang=None, parser_class=None, hg=None,
infile=None, indir=None, outfile=None, url=None, sequence=None,
progress_bar=True, corefs='resolve',
logging_level=logging.INFO):
with open(system_file, 'r') as f:
json_str = f.read()
system_json = json.loads(json_str)
system = System(name=system_file, lang=lang, parser_class=parser_class,
hg=hg, infile=infile, indir=indir, outfile=outfile,
url=url, sequence=sequence, corefs=corefs,
logging_level=logging_level)
for agent_name in system_json:
module_str = system_json[agent_name]['agent']
depends_on = None
input = None
write = True
if 'depends_on' in system_json[agent_name]:
depends_on = system_json[agent_name]['depends_on']
if 'input' in system_json[agent_name]:
input = system_json[agent_name]['input']
if 'write' in system_json[agent_name]:
write = system_json[agent_name]['write']
agent = create_agent(module_str, name=agent_name,
progress_bar=progress_bar,
logging_level=logging_level)
system.add(agent, input=input, depends_on=depends_on, write=write)
return system
def run_system(system_file, lang=None, parser_class=None, hg=None, infile=None,
indir=None, outfile=None, url=None, sequence=None,
progress_bar=True, corefs='resolve',
logging_level=logging.INFO):
system = load_system(system_file, lang=lang, parser_class=parser_class,
hg=hg, infile=infile, indir=indir, outfile=outfile,
url=url, sequence=sequence, progress_bar=progress_bar,
corefs=corefs, logging_level=logging_level)
system.run()
def create_agent(agent_module_str, name=None,
progress_bar=True, logging_level=logging.INFO):
if '.' in agent_module_str:
module_str = agent_module_str
else:
module_str = 'graphbrain.cognition.agents.{}'.format(agent_module_str)
class_name_parts = module_str.split('.')[-1].split('_')
class_name = ''.join([part.title() for part in class_name_parts])
class_obj = getattr(import_module(module_str), class_name)
agent_name = name if name else agent_module_str
return class_obj(
agent_name, progress_bar=progress_bar, logging_level=logging_level)
def processor(x, lang=None, parser_class=None, hg=None, infile=None,
indir=None, outfile=None, url=None, sequence=None,
corefs='resolve'):
if type(x) == str:
if x[-4:] == '.sys':
return load_system(x, lang=lang, parser_class=parser_class, hg=hg,
infile=infile, indir=indir, outfile=outfile,
url=url, sequence=sequence, corefs=corefs)
else:
system = System(lang=lang, parser_class=parser_class, hg=hg,
infile=infile, indir=indir, outfile=outfile,
url=url, sequence=sequence, corefs=corefs)
agent = create_agent(x, progress_bar=False)
system.add(agent)
return system
elif isinstance(x, Agent):
system = System(lang=lang, parser_class=parser_class, hg=hg,
infile=infile, indir=indir, outfile=outfile, url=url,
sequence=sequence)
system.add(x)
return system
elif isinstance(x, System):
if lang:
x.lang = lang
if hg:
x.hg = hg
if infile:
x.infile = infile
if outfile:
x.outfile = outfile
if url:
x.url = url
if sequence:
x.sequence = sequence
return x
else:
raise RuntimeError('Trying to create processor with invalid argument.')
class System(object):
def __init__(self, name=None, lang=None, parser_class=None, hg=None,
infile=None, indir=None, outfile=None, url=None,
sequence=None, corefs='resolve', logging_level=logging.INFO):
self.name = name
self.lang = lang
self.parser_class = parser_class
if parser_class:
plang = parser_lang(parser_class)
if lang:
if lang != plang:
msg = 'specified language ({}) and parser language ({}) '\
'do not match'.format(lang, plang)
raise RuntimeError(msg)
else:
self.lang = plang
self.hg = hg
self.infile = infile
self.indir = indir
self.outfile = outfile
self.url = url
self.sequence = sequence
self.sequence_pos = 0
self.corefs = corefs
logging.basicConfig()
self.logger = logging.getLogger('agent_system')
self.logger.setLevel(logging_level)
self.agents = {}
self.outputs = defaultdict(set)
self.dependants = defaultdict(set)
self.roots = set()
self.agent_seq = []
self.parser = None
self.counters = {}
self.write = {}
def add(self, agent, input=None, depends_on=None, write=True):
agent.system = self
self.agents[agent.name] = agent
if input:
self.outputs[input].add(agent.name)
if depends_on:
self.dependants[depends_on].add(agent.name)
# if agent has no inputs and depends on no other agents, then it is
# a root agent
if not (input or depends_on):
self.roots.add(agent.name)
self.write[agent.name] = write
def _end(self):
# terminate all agents
for agent_name in self.agent_seq:
agent = self.agents[agent_name]
self.logger.info('\nstopping agent "{}"...'.format(agent_name))
if agent.running:
for op in agent.on_end():
self._process_op(agent_name, op)
self.logger.info('[*] agent "{}" stopped.'.format(agent_name))
self.logger.info('{} edges were added.'.format(
self.counters[agent_name][0]))
self.logger.info('{} edges already existed.'.format(
self.counters[agent_name][1]))
report = agent.report()
if len(report) > 0:
self.logger.info(report)
def run(self):
# start by running the roots
for root in self.roots:
self._run_agent(root)
self._end()
if self.name:
self.logger.info('\nsystem "{}" stopped.'.format(self.name))
def _process(self, agent_name, edge):
agent = self._start_agent(agent_name)
ops = agent.input_edge(edge)
if ops:
for op in ops:
opedge = op['edge']
if agent_name in self.outputs:
for output in self.outputs[agent_name]:
for outedge in self._process(output, opedge):
yield outedge
else:
yield opedge
def process(self, edge):
for root in self.roots:
for edge in self._process(root, edge):
yield edge
def get_parser(self, agent):
if self.parser is None:
corefs = self.corefs in {'resolve', 'replace'}
self.parser = create_parser(lang=self.lang,
parser_class=self.parser_class,
lemmas=True,
resolve_corefs=corefs)
return self.parser
def get_infile(self, agent):
return self.infile
def get_indir(self, agent):
return self.indir
def get_outfile(self, agent):
return self.outfile
def get_url(self, agent):
return self.url
def get_hg(self, agent):
return self.hg
def get_sequence(self, agent):
return self.sequence
def parse_results2ops(self, parse_results):
for parse in parse_results['parses']:
if self.corefs == 'resolve':
main_edge = parse['main_edge']
resolved_edge = parse['resolved_corefs']
elif self.corefs == 'replace':
main_edge = parse['resolved_corefs']
resolved_edge = None
else:
main_edge = parse['main_edge']
resolved_edge = None
# add main edge
if main_edge:
# attach text to edge
text = parse['text']
attr = {'text': text}
if self.sequence:
yield create_op(main_edge, sequence=self.sequence,
position=self.sequence_pos,
attributes=attr)
self.sequence_pos += 1
else:
yield create_op(main_edge, attributes=attr)
if self.corefs == 'resolve':
yield create_op(resolved_edge, attributes=attr)
coref_res_edge = hedge(
(const.coref_res_pred, main_edge, resolved_edge))
yield create_op(coref_res_edge)
# add extra edges
for edge in parse['extra_edges']:
yield create_op(edge)
for edge in parse_results['inferred_edges']:
yield create_op(edge, count=True)
def _reset_counters(self, agent_name):
self.counters[agent_name] = [0, 0]
def _start_agent(self, agent_name):
agent = self.agents[agent_name]
if not agent.running:
self.agent_seq.append(agent_name)
self.logger.info('\n[>] agent "{}" started.'.format(agent.name))
self._reset_counters(agent_name)
agent.on_start()
agent.running = True
return agent
def _process_op(self, agent_name, op):
if self.write[agent_name]:
if apply_op(self.hg, op):
self.counters[agent_name][0] += 1
else:
self.counters[agent_name][1] += 1
for output in self.outputs[agent_name]:
self._start_agent(output)
downstream_ops = self.agents[output].input_edge(op['edge'])
if downstream_ops:
for dop in downstream_ops:
self._process_op(output, dop)
def _run_agent(self, agent_name):
agent = self.agents[agent_name]
self._start_agent(agent_name)
ops = agent.run()
if ops:
for op in ops:
self._process_op(agent_name, op)
ops = agent.on_end()
if ops:
for op in ops:
self._process_op(agent_name, op)
agent.running = False
for dependant in self.dependants[agent_name]:
self._run_agent(dependant)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if traceback is None:
self._end()
|
speakeasy/profiler.py | sacx/speakeasy | 816 | 11075990 | <reponame>sacx/speakeasy<filename>speakeasy/profiler.py
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Data format versioning
__report_version__ = '1.1.0'
import time
import json
import hashlib
from collections import deque
from base64 import b64encode
PROC_CREATE = 'create'
MEM_ALLOC = 'mem_alloc'
MEM_WRITE = 'mem_write'
MEM_READ = 'mem_read'
MEM_PROTECT = 'mem_protect'
THREAD_INJECT = 'thread_inject'
class ProfileError(Exception):
pass
class MemAccess(object):
"""
Represents a symbolicated chunk of memory that can be tracked
"""
def __init__(self, base=None, sym=None, size=0):
self.base = base
self.size = size
self.sym = sym
self.reads = 0
self.writes = 0
self.execs = 0
class Run(object):
"""
This class represents the basic execution primative for the emulation engine
A "run" can represent any form of execution: a thread, a callback, an exported function,
or even a child process.
"""
def __init__(self):
self.instr_cnt = 0
self.ret_val = None
self.apis = []
self.sym_access = {}
self.network = {'dns': [], 'traffic': []}
self.file_access = []
self.dropped_files = []
self.registry_access = []
self.process_events = []
self.mem_access = {}
self.dyn_code = {'mmap': [], 'base_addrs': set()}
self.process_context = None
self.thread = None
self.unique_apis = []
self.api_hash = hashlib.sha256()
self.handled_exceptions = []
self.stack = None
self.api_callbacks = []
self.exec_cache = deque(maxlen=4)
self.read_cache = deque(maxlen=4)
self.write_cache = deque(maxlen=4)
self.args = None
self.start_addr = None
self.type = None
self.error = {}
self.num_apis = 0
def get_api_count(self):
"""
Get the number of APIs that were called during the run
"""
return self.num_apis
class Profiler(object):
"""
The profiler class exists to generate an execution report
for all runs that occur within a binary emulation.
"""
def __init__(self):
super(Profiler, self).__init__()
self.start_time = 0
self.strings = {'ansi': [], 'unicode': []}
self.decoded_strings = {'ansi': [], 'unicode': []}
self.last_data = [0, 0]
self.last_event = {}
self.set_start_time()
self.runtime = 0
self.meta = {}
self.runs = []
def add_input_metadata(self, meta):
"""
Add top level profiler fields containing metadata for the
module that will be emulated
"""
self.meta = meta
def set_start_time(self):
"""
Get the start time for a sample so we can time the execution length
"""
self.start_time = time.time()
def get_run_time(self):
"""
Get the time spent emulating a specific "run"
"""
return time.time() - self.start_time
def stop_run_clock(self):
"""
Stop the runtime clock to include in the report
"""
self.runtime = self.get_run_time()
def get_epoch_time(self):
"""
Get the current time in epoch format
"""
return int(time.time())
def add_run(self, run):
"""
Add a new run to the captured run list
"""
self.runs.append(run)
def handle_binary_data(self, data):
"""
Compress and encode binary data to be included in a report
"""
return b64encode(data).decode('utf-8')
def log_error(self, error):
"""
Log a top level emulator error for the emulation report
"""
if not self.meta.get('errors'):
self.meta['errors'] = []
self.meta['errors'].append(error)
def log_dropped_files(self, run, files):
for f in files:
data = f.get_data()
if data is None:
continue
_hash = f.get_hash()
entry = {'path': f.get_path(), 'size': len(data), 'sha256': _hash}
run.dropped_files.append(entry)
def log_api(self, run, pc, name, ret, argv, ctx=[]):
"""
Log a call to an OS API. This includes arguments, return address, and return value
"""
run.num_apis += 1
if name not in run.unique_apis:
run.api_hash.update(name.lower().encode('utf-8'))
run.unique_apis.append(name)
if not run.apis:
run.apis = []
pc = hex(pc)
if ret is not None:
ret = hex(ret)
args = argv.copy()
for i, arg in enumerate(args):
if isinstance(arg, int):
args[i] = hex(arg)
entry = {'pc': pc, 'api_name': name, 'args': args, 'ret_val': ret}
if entry not in run.apis[-3:]:
run.apis.append(entry)
def log_file_access(self, run, path, event_type, data=None,
handle=0, disposition=[], access=[], buffer=0,
size=None):
"""
Log file access events. This will include things like handles being opened,
data reads, and data writes.
"""
enc = None
if data:
enc = self.handle_binary_data(data[:1024])
for et in ('write', 'read'):
if event_type == et:
for fa in run.file_access:
if path == fa.get('path') and fa['event'] == et:
if size:
fa['size'] += size
if enc:
fa["data"] += enc
return
event = {'event': event_type, 'path': path}
if enc:
event.update({'data': enc})
if handle:
event.update({'handle': handle})
if size is not None:
event.update({'size': size})
if buffer:
event.update({'buffer': hex(buffer)})
if disposition:
event.update({'open_flags': disposition})
if access:
event.update({'access_flags': access})
if event not in run.file_access:
run.file_access.append(event)
def log_registry_access(self, run, path, event_type, value_name=None, data=None,
handle=0, disposition=[], access=[], buffer=0,
size=None):
"""
Log registry access events. This includes values and keys being accessed and
being read/written
"""
enc = None
if data:
enc = self.handle_binary_data(data[:1024])
event = {'event': event_type, 'path': path}
if enc:
event.update({'data': enc})
if handle:
event.update({'handle': hex(handle)})
if value_name:
event.update({'value_name': value_name})
if size is not None:
event.update({'size': size})
if buffer:
event.update({'buffer': hex(buffer)})
if disposition:
event.update({'open_flags': disposition})
if access:
event.update({'access_flags': access})
if event not in run.registry_access:
run.registry_access.append(event)
def log_process_event(self, run, proc, event_type, kwargs):
"""
Log events related to a process accessing another process. This includes:
creating a child process, reading/writing to a process, or creating a thread
within another process.
"""
event = {}
if event_type == PROC_CREATE:
event.update({'event': event_type})
event.update({'pid': proc.get_id()})
event.update({'path': proc.get_process_path()})
event.update({'cmdline': proc.get_command_line()})
elif event_type == MEM_ALLOC:
event.update({'event': event_type})
event.update({'pid': proc.get_id()})
event.update({'path': proc.get_process_path()})
event.update(kwargs)
elif event_type == MEM_PROTECT:
event.update({'event': event_type})
event.update({'pid': proc.get_id()})
event.update({'path': proc.get_process_path()})
event.update(kwargs)
elif event_type == MEM_WRITE:
base = kwargs['base']
size = kwargs['size']
data = kwargs['data']
last_base, last_size = self.last_data
last_evt_type = self.last_event.get('event')
if event_type == last_evt_type and (last_base + last_size) == base:
self.last_event['data'] += data
self.last_event['size'] += len(data)
self.last_data = [base, size]
return
event.update({'event': event_type})
event.update({'pid': proc.get_id()})
event.update({'path': proc.get_process_path()})
data = kwargs['data']
event.update({'data': data})
event.update({'base': base})
event.update({'size': size})
self.last_data = [base, size]
elif event_type == MEM_READ:
base = kwargs['base']
size = kwargs['size']
data = kwargs['data']
last_base, last_size = self.last_data
last_evt_type = self.last_event.get('event')
if event_type == last_evt_type and (last_base + last_size) == base:
self.last_event['data'] += data
self.last_event['size'] += len(data)
self.last_data = [base, size]
return
event.update({'event': event_type})
event.update({'pid': proc.get_id()})
event.update({'path': proc.get_process_path()})
data = kwargs['data']
event.update({'data': data})
event.update({'size': size})
event.update({'base': base})
self.last_data = [base, size]
elif event_type == THREAD_INJECT:
event.update({'event': event_type})
event.update({'pid': proc.get_id()})
event.update({'path': proc.get_process_path()})
event.update({'start_addr': hex(kwargs['start_addr'])})
event.update({'param': hex(kwargs['param'])})
run.process_events.append(event)
self.last_event = event
def log_dns(self, run, domain, ip=''):
"""
Log DNS name lookups for the emulation report
"""
query = {"query": domain, "response": ip}
if query not in run.network['dns']:
run.network['dns'].append(query)
def log_http(self, run, server, port, proto='http',
headers='', body=b'', secure=False):
"""
Log HTTP traffic that occur during emulation
"""
conns = run.network['traffic']
proto = 'http'
if secure:
proto = 'https'
http_conn = {'server': server, 'proto': 'tcp.%s' % (proto), 'port': port,
'headers': headers}
if body:
data = self.handle_binary_data(body[:0x3000])
http_conn.update({'body': data})
if http_conn not in conns:
conns.append(http_conn)
def log_dyn_code(self, run, tag, base, size):
"""
Log code that is generated at runtime and then executed
"""
if base not in run.dyn_code['base_addrs']:
entry = {'tag': tag, 'base': hex(base), 'size': hex(size)}
run.dyn_code['mmap'].append(entry)
run.dyn_code['base_addrs'].add(base)
def log_network(self, run, server, port, typ='unknown', proto='unknown', data=b'', method=''):
"""
Log network activity for an emulation run
"""
conns = run.network['traffic']
conn = {'server': server, 'proto': proto, 'port': port}
if data:
data = self.handle_binary_data(data[:0x3000])
conn.update({'data': data})
if method:
conn.update({'method': method})
conn.update({'type': typ})
conns.append(conn)
def get_json_report(self):
"""
Retrieve the execution profile for the emulator as a json string
"""
profile = self.get_report()
return json.dumps(profile, indent=4, sort_keys=False)
def get_report(self):
"""
Retrieve the execution profile for the emulator
"""
profile = {}
meta = self.meta
meta.update({'report_version': __report_version__})
meta.update({'emulation_total_runtime': round(self.runtime, 3)})
meta.update({'timestamp': int(self.start_time)})
# For now, we only support single file emulation
exec_paths = []
for r in self.runs:
if r.ret_val is not None:
ret = hex(r.ret_val)
else:
ret = None
args = []
for a in r.args:
if isinstance(a, int):
args.append(hex(a))
else:
args.append(a)
ep = {'ep_type': r.type,
'start_addr': hex(r.start_addr),
'ep_args': args,
}
if r.instr_cnt:
ep.update({'instr_count': r.instr_cnt})
ep.update(
{
'apihash': r.api_hash.hexdigest(),
'apis': r.apis,
'ret_val': ret,
'error': r.error
}
)
if r.handled_exceptions:
ep.update({"handled_exceptions": r.handled_exceptions})
if r.network and (r.network.get('dns', []) or
r.network.get('traffic', {})):
ep.update({'network_events': r.network})
if r.file_access:
ep.update({'file_access': r.file_access})
if r.registry_access:
ep.update({'registry_access': r.registry_access})
if r.process_events:
for evt in r.process_events:
if evt.get('event') in (MEM_WRITE, MEM_READ):
evt['data'] = self.handle_binary_data(evt['data'][:1024])
if evt.get('base'):
evt['base'] = hex(evt['base'])
ep.update({'process_events': r.process_events})
if r.mem_access:
mem_accesses = []
for mmap, maccess in r.mem_access.items():
mem_accesses.append({'tag': mmap.get_tag(),
'base': hex(mmap.get_base()),
'reads': maccess.reads,
'writes': maccess.writes,
'execs': maccess.execs})
ep.update({'mem_access': mem_accesses})
sym_accesses = []
for address, maccess in r.sym_access.items():
sym_accesses.append({'symbol': maccess.sym,
'reads': maccess.reads,
'writes': maccess.writes,
'execs': maccess.execs})
if sym_accesses:
ep.update({'sym_accesses': sym_accesses})
if r.dyn_code:
ep.update({'dynamic_code_segments': r.dyn_code['mmap']})
exec_paths.append(ep)
if r.dropped_files:
ep.update({'dropped_files': r.dropped_files})
if (self.strings['ansi'] or self.strings['unicode'] or
self.decoded_strings['ansi'] or self.decoded_strings['unicode']):
meta.update({'strings': {'static':self.strings, 'in_memory': self.decoded_strings}}) # noqa
profile = {**profile, **meta}
profile.update({'entry_points': exec_paths})
return profile
|
example/pyctp/test/dac_test.py | mmmaaaggg/pyctp_lovelylain | 358 | 11075992 | # -*-coding:utf-8 -*-
import unittest
from ..dac import *
class ModuleTest(unittest.TestCase):
def test_xdiff(self):
self.assertEquals([],xdiff([]))
self.assertEquals([0,0,1,0,0,-1],xdiff([0,0,1,0,0,-1]))
self.assertEquals([0,0,1,0,0,-1],xdiff([0,0,1,1,0,-1]))
self.assertEquals([0,0,1,0,0,-1],xdiff([0,0,1,0,1,-1]))
self.assertEquals([0,0,1,0,-1,0],xdiff([0,0,1,1,-1,-1]))
def test_cexpma(self):
self.assertEquals([],cexpma([],6))
source = [25000,24875,24781,24594,24500,24625,25219,27250]
self.assertEquals([25000,24958,24899,24797,24698,24674,24856,25654],cexpma(source,5)) #相当于5日
def test_cexpma1(self):
#self.assertEquals(0,cexpma1([],6,[]))
#self.assertEquals(0,cexpma1([100],6,[0]))
source = [25000,24875,24781,24594,24500,24625,25219,27250]
target = [25000,24958,24899,24797,24698,24674,24856,0]
#self.assertEquals(25654,cexpma1(source,5,target)) #相当于5日
#self.assertEquals(25654,target[-1])
self.assertEquals(25654,cexpma1(source[-1],5,target[-2])) #相当于5日
def test_tr(self):
self.assertEquals([],tr([],[],[]))
shigh = [200,250,200,400]
slow = [100,200,100,200]
sclose = [150,220,150,300]
self.assertEquals([100*XBASE,100*XBASE,120*XBASE,250*XBASE],tr(sclose,shigh,slow))
def test_tr1(self):
self.assertEquals(0,tr1([],[],[],[]))
self.assertEquals(0,tr1([200],[100],[150],[0]))
shigh = [200,250,200,400]
slow = [100,200,100,200]
sclose = [150,220,150,300]
self.assertEquals([100*XBASE,100*XBASE,120*XBASE,250*XBASE],tr(sclose,shigh,slow))
def test_atr(self):
shigh = [200,250,200,400]
slow = [100,200,100,200]
sclose = [150,220,150,300]
ltr = tr(sclose,shigh,slow)
self.assertEquals([100*XBASE,100*XBASE,120*XBASE,250*XBASE],atr(ltr,1))
def test_atr1(self):
self.assertEquals(0,atr1([],[],1))
self.assertEquals(0,atr1([100],[0],1))
shigh = [200,250,200,400]
slow = [100,200,100,200]
sclose = [150,220,150,300]
ltr = tr(sclose,shigh,slow)
latr = [100*XBASE,100*XBASE,120*XBASE,0]
self.assertEquals(250*XBASE,atr1(ltr,latr,1))
self.assertEquals(250*XBASE,latr[-1])
def test_xatr(self):
self.assertEquals([],xatr([],[]))
self.assertEquals([10*CBASE,20*CBASE/3],xatr([1000,2000],[100,300]))
def test_xatr1(self):
self.assertEquals(0,xatr1([],[],[]))
self.assertEquals(2*CBASE,xatr1([200],[100],[100]))
self.assertEquals(20*CBASE/3,xatr1([1000,2000],[100,300],[10000000,0]))
def test_sdiff(self):
self.assertEquals([],sdiff([],[]))
self.assertEquals([101,2],sdiff([111,12],[10,10]))
def test_rsdiff(self):
self.assertEquals([],rsdiff([],[],10))
self.assertEquals([106,2],rsdiff([111,12],[5,10],0))
self.assertEquals([106,7],rsdiff([111,12],[5,10],1))
self.assertEquals([106,7],rsdiff([111,12],[5,10],2))
def test_accumulate(self):
self.assertEquals([],accumulate([]))
self.assertEquals([1,11,111],accumulate([1,10,100]))
def test_accumulate1(self):
self.assertEquals(0,accumulate1([],[]))
self.assertEquals(100,accumulate1([100],[0]))
self.assertEquals(300,accumulate1([100,200],[100,0]))
def test_ma(self):
self.assertEquals([],ma([],3))
a= [1,2,3,4,5,6,7,8,9,0]
self.assertEquals([0,0,2,3,4,5,6,7,8,6],ma(a,3))
def test_ma1(self):
self.assertEquals(0,ma1([],3,[]))
a= [1,2,3,4,5,6,7,8,9,0]
t = [0,0,2,3,4,5,6,7,8,0]
self.assertEquals(6,ma1(a,3,t))
self.assertEquals(6,t[-1])
def test_strend2(self):
self.assertEquals([],strend2([]))
self.assertEquals([0],strend2([1]))
source = [10,20,30,30,40,50,40,30,20,20,10,20]
self.assertEquals([0,1,2,3,4,5,-1,-2,-3,-4,-5,1],strend2(source))
def test_strend2_1(self):
self.assertEquals(0,strend2_1([],[]))
self.assertEquals(0,strend2_1([1],[0]))
source = [10,20,30,30,40,50,40,30,20,20,10,20]
target = [0,1,2,3,4,5,-1,-2,-3,-4,-5,0]
self.assertEquals(1,strend2_1(source,target))
self.assertEquals(1,target[-1])
if __name__ == "__main__":
import logging
logging.basicConfig(filename="test.log",level=logging.DEBUG,format='%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s')
unittest.main()
|
Tests/ttLib/tables/_m_e_t_a_test.py | twardoch/fonttools-py27 | 240 | 11076003 | <reponame>twardoch/fonttools-py27
from __future__ import print_function, division, absolute_import, unicode_literals
from fontTools.misc.py23 import *
from fontTools.misc.testTools import parseXML
from fontTools.misc.textTools import deHexStr
from fontTools.misc.xmlWriter import XMLWriter
from fontTools.ttLib import TTLibError
from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a
import unittest
# From a real font on MacOS X, but substituted 'bild' tag by 'TEST',
# and shortened the payload.
META_DATA = deHexStr(
"00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 "
"54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF")
# The 'dlng' and 'slng' tag with text data containing "augmented" BCP 47
# comma-separated or comma-space-separated tags. These should be UTF-8 encoded
# text.
META_DATA_TEXT = deHexStr(
"00 00 00 01 00 00 00 00 00 00 00 28 00 00 00 02 "
"64 6C 6E 67 00 00 00 28 00 00 00 0E 73 6C 6E 67 "
"00 00 00 36 00 00 00 0E 4C 61 74 6E 2C 47 72 65 "
"6B 2C 43 79 72 6C 4C 61 74 6E 2C 47 72 65 6B 2C "
"43 79 72 6C")
class MetaTableTest(unittest.TestCase):
def test_decompile(self):
table = table__m_e_t_a()
table.decompile(META_DATA, ttFont={"meta": table})
self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data)
def test_compile(self):
table = table__m_e_t_a()
table.data["TEST"] = b"\xCA\xFE\xBE\xEF"
self.assertEqual(META_DATA, table.compile(ttFont={"meta": table}))
def test_decompile_text(self):
table = table__m_e_t_a()
table.decompile(META_DATA_TEXT, ttFont={"meta": table})
self.assertEqual({"dlng": u"Latn,Grek,Cyrl",
"slng": u"Latn,Grek,Cyrl"}, table.data)
def test_compile_text(self):
table = table__m_e_t_a()
table.data["dlng"] = u"Latn,Grek,Cyrl"
table.data["slng"] = u"Latn,Grek,Cyrl"
self.assertEqual(META_DATA_TEXT, table.compile(ttFont={"meta": table}))
def test_toXML(self):
table = table__m_e_t_a()
table.data["TEST"] = b"\xCA\xFE\xBE\xEF"
writer = XMLWriter(BytesIO())
table.toXML(writer, {"meta": table})
xml = writer.file.getvalue().decode("utf-8")
self.assertEqual([
'<hexdata tag="TEST">',
'cafebeef',
'</hexdata>'
], [line.strip() for line in xml.splitlines()][1:])
def test_fromXML(self):
table = table__m_e_t_a()
for name, attrs, content in parseXML(
'<hexdata tag="TEST">'
' cafebeef'
'</hexdata>'):
table.fromXML(name, attrs, content, ttFont=None)
self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data)
def test_toXML_text(self):
table = table__m_e_t_a()
table.data["dlng"] = u"Latn,Grek,Cyrl"
writer = XMLWriter(BytesIO())
table.toXML(writer, {"meta": table})
xml = writer.file.getvalue().decode("utf-8")
self.assertEqual([
'<text tag="dlng">',
'Latn,Grek,Cyrl',
'</text>'
], [line.strip() for line in xml.splitlines()][1:])
def test_fromXML_text(self):
table = table__m_e_t_a()
for name, attrs, content in parseXML(
'<text tag="dlng">'
' Latn,Grek,Cyrl'
'</text>'):
table.fromXML(name, attrs, content, ttFont=None)
self.assertEqual({"dlng": u"Latn,Grek,Cyrl"}, table.data)
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
pgoapi/protos/pogoprotos/data/gym/gym_status_and_defenders_pb2.py | aroo135/pgoapi | 842 | 11076004 | <filename>pgoapi/protos/pogoprotos/data/gym/gym_status_and_defenders_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/gym/gym_status_and_defenders.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.map.fort import fort_data_pb2 as pogoprotos_dot_map_dot_fort_dot_fort__data__pb2
from pogoprotos.data.gym import gym_defender_pb2 as pogoprotos_dot_data_dot_gym_dot_gym__defender__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/gym/gym_status_and_defenders.proto',
package='pogoprotos.data.gym',
syntax='proto3',
serialized_pb=_b('\n2pogoprotos/data/gym/gym_status_and_defenders.proto\x12\x13pogoprotos.data.gym\x1a#pogoprotos/map/fort/fort_data.proto\x1a&pogoprotos/data/gym/gym_defender.proto\"\x8a\x01\n\x15GymStatusAndDefenders\x12\x39\n\x12pokemon_fort_proto\x18\x01 \x01(\x0b\x32\x1d.pogoprotos.map.fort.FortData\x12\x36\n\x0cgym_defender\x18\x02 \x03(\x0b\x32 .pogoprotos.data.gym.GymDefenderb\x06proto3')
,
dependencies=[pogoprotos_dot_map_dot_fort_dot_fort__data__pb2.DESCRIPTOR,pogoprotos_dot_data_dot_gym_dot_gym__defender__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GYMSTATUSANDDEFENDERS = _descriptor.Descriptor(
name='GymStatusAndDefenders',
full_name='pogoprotos.data.gym.GymStatusAndDefenders',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokemon_fort_proto', full_name='pogoprotos.data.gym.GymStatusAndDefenders.pokemon_fort_proto', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gym_defender', full_name='pogoprotos.data.gym.GymStatusAndDefenders.gym_defender', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=291,
)
_GYMSTATUSANDDEFENDERS.fields_by_name['pokemon_fort_proto'].message_type = pogoprotos_dot_map_dot_fort_dot_fort__data__pb2._FORTDATA
_GYMSTATUSANDDEFENDERS.fields_by_name['gym_defender'].message_type = pogoprotos_dot_data_dot_gym_dot_gym__defender__pb2._GYMDEFENDER
DESCRIPTOR.message_types_by_name['GymStatusAndDefenders'] = _GYMSTATUSANDDEFENDERS
GymStatusAndDefenders = _reflection.GeneratedProtocolMessageType('GymStatusAndDefenders', (_message.Message,), dict(
DESCRIPTOR = _GYMSTATUSANDDEFENDERS,
__module__ = 'pogoprotos.data.gym.gym_status_and_defenders_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.gym.GymStatusAndDefenders)
))
_sym_db.RegisterMessage(GymStatusAndDefenders)
# @@protoc_insertion_point(module_scope)
|
saleor/product/migrations/0080_collection_published_date.py | elwoodxblues/saleor | 15,337 | 11076005 | <reponame>elwoodxblues/saleor
# Generated by Django 2.1.3 on 2018-12-03 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("product", "0079_default_tax_rate_instead_of_empty_field")]
operations = [
migrations.AddField(
model_name="collection",
name="published_date",
field=models.DateField(blank=True, null=True),
)
]
|
tests/detection/test_errorsloaderdetector.py | rcap107/holoclean | 468 | 11076038 | import csv
import pytest
from tempfile import NamedTemporaryFile
from detect.errorloaderdetector import ErrorsLoaderDetector
def test_errors_loader_valid_csv_file():
tmp_file = NamedTemporaryFile(delete=False)
with open(tmp_file.name, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['_tid_', 'attribute']) # Header.
csv_writer.writerow([1, 'attr1'])
csv_writer.writerow([1, 'attr2'])
csv_writer.writerow([2, 'attr1'])
csv_writer.writerow([3, 'attr2'])
errors_loader_detector = ErrorsLoaderDetector(fpath=tmp_file.name)
errors_df = errors_loader_detector.detect_noisy_cells()
assert errors_df is not None
assert errors_df.columns.tolist() == ['_tid_', 'attribute']
assert len(errors_df) == 4
def test_errors_loader_invalid_csv_file():
tmp_file = NamedTemporaryFile(delete=False)
with open(tmp_file.name, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['_tid_', 'invalid_column']) # Header.
csv_writer.writerow([1, 'val1'])
with pytest.raises(Exception) as invalid_file_error:
errors_loader_detector = ErrorsLoaderDetector(fpath=tmp_file.name)
assert 'The loaded errors table does not match the expected schema' in str(invalid_file_error.value)
|
kafka/tools/assigner/models/replica_election.py | akashvacher/kafka-tools | 578 | 11076057 | <reponame>akashvacher/kafka-tools
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
from tempfile import NamedTemporaryFile
from kafka.tools.models import BaseModel
class ReplicaElection(BaseModel):
equality_attrs = ['partitions']
def __init__(self, partitions, pause_time=300):
self.partitions = partitions
self.pause_time = pause_time
def __repr__(self):
return json.dumps(self.dict_for_replica_election())
def dict_for_replica_election(self):
ple = {'partitions': []}
for partition in self.partitions:
ple['partitions'].append(partition.dict_for_replica_election())
return ple
def execute(self, num, total, zookeeper, tools_path, plugins=[], dry_run=True):
if not dry_run:
with NamedTemporaryFile(mode='w') as assignfile:
json.dump(self.dict_for_replica_election(), assignfile)
assignfile.flush()
FNULL = open(os.devnull, 'w')
subprocess.call(['{0}/kafka-preferred-replica-election.sh'.format(tools_path),
'--zookeeper', zookeeper,
'--path-to-json-file', assignfile.name],
stdout=FNULL, stderr=FNULL)
|
src/main/bin/python/python2.7/site-packages/PIL/PsdImagePlugin.py | otrack/serverless-shell | 132 | 11076065 | <reponame>otrack/serverless-shell<gh_stars>100-1000
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by <NAME>
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
from PIL import Image, ImageFile, ImagePalette, _binary
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3)
}
#
# helpers
i8 = _binary.i8
i16 = _binary.i16be
i32 = _binary.i32be
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self.size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
signature = read(4)
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if (len(data) & 1):
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 0
def seek(self, layer):
# seek to given layer (1..max)
if layer == self.frame:
return
try:
if layer <= 0:
raise IndexError
name, mode, bbox, tile = self.layers[layer-1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4)); x0 = i32(read(4))
y1 = i32(read(4)); x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
filler = read(12)
name = ""
size = i32(read(4)) # length of the extra data field
combined = 0
if size:
data_end = file.tell() + size
length = i32(read(4))
if length:
mask_y = i32(read(4)); mask_x = i32(read(4))
mask_h = i32(read(4)) - mask_y; mask_w = i32(read(4)) - mask_x
file.seek(length - 16, 1)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, 1)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding, Latin-1 should be a good guess
name = read(length).decode('latin-1', 'replace')
combined += length + 1
file.seek(data_end)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i = i + 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer = layer + ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize*ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer = layer + ";I"
tile.append(
("packbits", bbox, offset, layer)
)
for y in range(ysize):
offset = offset + i16(bytecount[i:i+2])
i = i + 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open("PSD", PsdImageFile, _accept)
Image.register_extension("PSD", ".psd")
|
paddlespeech/t2s/training/optimizer.py | JiehangXie/PaddleSpeech | 1,540 | 11076070 | <filename>paddlespeech/t2s/training/optimizer.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
optim_classes = dict(
adadelta=paddle.optimizer.Adadelta,
adagrad=paddle.optimizer.Adagrad,
adam=paddle.optimizer.Adam,
adamax=paddle.optimizer.Adamax,
adamw=paddle.optimizer.AdamW,
lamb=paddle.optimizer.Lamb,
momentum=paddle.optimizer.Momentum,
rmsprop=paddle.optimizer.RMSProp,
sgd=paddle.optimizer.SGD, )
def build_optimizers(model: nn.Layer,
optim='adadelta',
max_grad_norm=None,
learning_rate=0.01) -> paddle.optimizer:
optim_class = optim_classes.get(optim)
if optim_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {optim}")
else:
grad_clip = None
if max_grad_norm:
grad_clip = paddle.nn.ClipGradByGlobalNorm(max_grad_norm)
optim = optim_class(
parameters=model.parameters(),
learning_rate=learning_rate,
grad_clip=grad_clip)
optimizers = optim
return optimizers
|
Imaging/Core/Testing/Python/TestOpenClose3D.py | txwhhny/vtk | 1,755 | 11076081 | <reponame>txwhhny/vtk
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Tst the OpenClose3D filter.
# Image pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
thresh = vtk.vtkImageThreshold()
thresh.SetInputConnection(reader.GetOutputPort())
thresh.SetOutputScalarTypeToUnsignedChar()
thresh.ThresholdByUpper(2000.0)
thresh.SetInValue(255)
thresh.SetOutValue(0)
thresh.ReleaseDataFlagOff()
my_close = vtk.vtkImageOpenClose3D()
my_close.SetInputConnection(thresh.GetOutputPort())
my_close.SetOpenValue(0)
my_close.SetCloseValue(255)
my_close.SetKernelSize(5,5,3)
my_close.ReleaseDataFlagOff()
# for coverage (we could compare results to see if they are correct).
my_close.DebugOn()
my_close.DebugOff()
my_close.GetOutput()
my_close.GetCloseValue()
my_close.GetOpenValue()
#my_close AddObserver ProgressEvent {set pro [my_close GetProgress]; puts "Completed $pro"; flush stdout}
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(my_close.GetOutputPort())
viewer.SetColorWindow(255)
viewer.SetColorLevel(127.5)
viewer.Render()
# --- end of script --
|
src/visitpy/visit_utils/src/builtin/convert2to3.py | visit-dav/vis | 226 | 11076084 | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
###############################################################################
# file: convert2to3.py
# Purpose: Python helper function for converting a python 2 script string
# to be python 3 compatible using lib2to3
#
#
# Programmer: <NAME>
# Creation: Tue Jul 21 16:53:00 PDT 2020
#
#
# Modifications:
#
#
###############################################################################
from lib2to3 import refactor
# this class holds static state
class AutoPy2to3Flag:
enabled = False
def SetAutoPy2to3(val):
AutoPy2to3Flag.enabled = val
def GetAutoPy2to3():
return AutoPy2to3Flag.enabled
def ConvertPy2to3(py_script_text):
"""
Converts contents of input string py_script_text to python 3 using lib2to3
and returns the result as a string.
"""
# As far as I can tell, lib2to3 isn't documented that well but once
# you find the right recipe it's very easy to use for this case.
# ref:
# https://stackoverflow.com/questions/30340151/using-2to3-on-in-memory-scripts
fixes = refactor.get_fixers_from_package('lib2to3.fixes')
converter = refactor.RefactoringTool(fixes)
ast = converter.refactor_string(py_script_text, '<script>')
return str(ast)
|
housekeeping/upsource-webhook.py | akshatkarani/iroha | 1,467 | 11076109 | <filename>housekeeping/upsource-webhook.py
#!/usr/bin/python3
import argparse
import json
import os
import requests
from flask import Flask, request
app = Flask(__name__)
class State:
in_progress = "in_progress"
failed = "failed"
success = "success"
def submit_ci_status(key = "IROHA",
state = State.in_progress,
url = "null",
name = "null",
description = "null",
revision = "null"):
upsource_url = "http://upsource.soramitsu.co.jp/~buildStatus"
project = "iroha"
post_body = {
"key": key,
"state": state,
"url": url,
"name": name,
"description": description,
"project": project,
"revision": revision
}
# fails if token is not present
TOKEN = os.environ["UPSOURCE_TOKEN"]
post_headers = {
"Content-Type": "application/json; charset=UTF-8",
"Authorization": "Basic {}".format(TOKEN)
}
r = requests.post(
upsource_url,
headers=post_headers,
data=json.dumps(post_body)
)
print("status code: {}".format(r.status_code))
def process_json(parsed_json):
options = {}
try:
pl = parsed_json["payload"]
options["committer_login"] = pl["all_commit_details"][0]["committer_login"]
options["commit"] = pl["all_commit_details"][0]["commit"]
options["build_num"] = pl["build_num"]
options["build_url"] = pl["build_url"]
options["outcome"] = pl["outcome"]
steps = pl["steps"]
for step in steps:
actions = step["actions"][0]
if actions["failed"]: # not None
options["failed_step"] = step["name"]
return options
except:
return None
def prepare_key(s):
return "IROHA-{}".format(s)
def prepare_state(s):
return State.success if s == "success" else State.failed
def prepare_name(s):
return str(s)
def prepare_description(s):
return "By {}".format(s)
def in_progress_update():
print('in progress update')
try:
# try to get these environment variables
# throw, if at least one is missing
build_num = str(os.environ["CIRCLE_BUILD_NUM"])
build_url = str(os.environ["CIRCLE_BUILD_URL"])
commit = os.environ["CIRCLE_SHA1"]
username = os.environ["CIRCLE_USERNAME"]
submit_ci_status(
key=prepare_key(build_num),
state=State.in_progress,
url=build_url,
name=build_num,
description=prepare_name(username),
revision=commit
)
except Exception as e:
# just print exception and quit with no errcode
print("exception occurred: {}".format(e))
@app.route("/", methods=['POST'])
def recv_json():
try:
if len(request.data) > 10 * 1024**2: # 10 MB
return "request is too big"
options = process_json(request.get_json())
if not options:
return "can not parse json body"
submit_ci_status(
key = prepare_key(options["build_num"]),
state = prepare_state(options["outcome"]),
url = options["build_url"],
name = prepare_name(options["build_num"]),
description = prepare_description(options["committer_login"]),
revision = options["commit"]
)
return "ok"
except Exception as e:
return "error occurred: {}".format(e)
def main():
parser = argparse.ArgumentParser(description='Update upsource CI status')
parser.add_argument('--in-progress', action='store_true',
help='run script once in circle ci, notify upsource about "in progress" status of current commit')
parser.add_argument('--server', dest='port',
help='run script as a server on specified interface and port. it processes failed/succeeded commits')
args = parser.parse_args()
if not args.port and not args.in_progress:
print("use -h for help")
exit(0)
elif args.port:
try:
port = int(args.port)
except:
print("can not parse port")
exit(1)
app.run(host='0.0.0.0', port=port)
elif args.in_progress:
in_progress_update()
if __name__ == '__main__':
main()
|
Interview Preparation Kit/03 - Dictionaries and Hashmaps/05 - Frequency Queries.py | srgeyK87/Hacker-Rank-30-days-challlenge | 275 | 11076125 | <filename>Interview Preparation Kit/03 - Dictionaries and Hashmaps/05 - Frequency Queries.py
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/frequency-queries/problem
# Difficulty: Medium
# Max Score: 40
# Language: Python
# ========================
# Solution
# ========================
import os
# Complete the freqQuery function below.
def freqQuery(queries):
count = dict()
result = list()
for q in queries:
if q[0] == 1:
try:
count[q[1]] += 1
except:
count[q[1]] = 1
elif q[0] == 2:
try:
count[q[1]] -= 1
if count[q[1]] == 0:
del count[q[1]]
except:
continue
else:
if q[1] in set(count.values()):
result.append('1')
else:
result.append('0')
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
ans = freqQuery(queries)
fptr.write('\n'.join(map(str, ans)))
fptr.write('\n')
fptr.close()
|
matchzoo/preprocessors/__init__.py | MiaoBao/MatchZoo-py | 468 | 11076140 | from . import units
from .naive_preprocessor import NaivePreprocessor
from .basic_preprocessor import BasicPreprocessor
from .bert_preprocessor import BertPreprocessor
def list_available() -> list:
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.utils import list_recursive_concrete_subclasses
return list_recursive_concrete_subclasses(BasePreprocessor)
|
openspeech/datasets/librispeech/preprocess/preprocess.py | CanYouImagine/openspeech | 207 | 11076150 | <gh_stars>100-1000
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
LIBRI_SPEECH_DATASETS = [
'train-960',
'dev-clean',
'dev-other',
'test-clean',
'test-other',
]
def collect_transcripts(dataset_path):
""" Collect librispeech transcripts """
transcripts_collection = list()
for dataset in LIBRI_SPEECH_DATASETS:
dataset_transcripts = list()
for subfolder1 in os.listdir(os.path.join(dataset_path, dataset)):
for subfolder2 in os.listdir(os.path.join(dataset_path, dataset, subfolder1)):
for file in os.listdir(os.path.join(dataset_path, dataset, subfolder1, subfolder2)):
if file.endswith('txt'):
with open(os.path.join(dataset_path, dataset, subfolder1, subfolder2, file)) as f:
for line in f.readlines():
tokens = line.split()
audio_path = os.path.join(dataset, subfolder1, subfolder2, tokens[0])
audio_path = f"{audio_path}.flac"
transcript = " ".join(tokens[1:])
dataset_transcripts.append('%s|%s' % (audio_path, transcript))
else:
continue
transcripts_collection.append(dataset_transcripts)
return transcripts_collection
|
Algo and DSA/LeetCode-Solutions-master/Python/ransom-note.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11076159 | # Time: O(n)
# Space: O(1)
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
counts = [0] * 26
letters = 0
for c in ransomNote:
if counts[ord(c) - ord('a')] == 0:
letters += 1
counts[ord(c) - ord('a')] += 1
for c in magazine:
counts[ord(c) - ord('a')] -= 1
if counts[ord(c) - ord('a')] == 0:
letters -= 1
if letters == 0:
break
return letters == 0
# Time: O(n)
# Space: O(1)
import collections
class Solution2(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
return not collections.Counter(ransomNote) - collections.Counter(magazine)
|
sciwx/text/__init__.py | dada1437903138/imagepy | 1,178 | 11076168 | from .mdutil import md2html
from .mdpad import MDPad, MDFrame, MDNoteBook, MDNoteFrame
from .textpad import TextPad, TextFrame, TextNoteBook, TextNoteFrame |
lib/spack/spack/test/cmd/extensions.py | LiamBindle/spack | 2,360 | 11076208 | <reponame>LiamBindle/spack
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
from spack.main import SpackCommand, SpackCommandError
from spack.spec import Spec
extensions = SpackCommand('extensions')
@pytest.fixture
def python_database(mock_packages, mutable_database):
specs = [Spec(s).concretized() for s in [
'python',
'py-extension1',
'py-extension2',
]]
for spec in specs:
spec.package.do_install(fake=True, explicit=True)
yield
@pytest.mark.db
def test_extensions(mock_packages, python_database, config, capsys):
ext2 = Spec("py-extension2").concretized()
def check_output(ni, na):
with capsys.disabled():
output = extensions("python")
packages = extensions("-s", "packages", "python")
installed = extensions("-s", "installed", "python")
activated = extensions("-s", "activated", "python")
assert "==> [email protected]" in output
assert "==> 3 extensions" in output
assert "flake8" in output
assert "py-extension1" in output
assert "py-extension2" in output
assert "==> 3 extensions" in packages
assert "flake8" in packages
assert "py-extension1" in packages
assert "py-extension2" in packages
assert "installed" not in packages
assert "activated" not in packages
assert ("%s installed" % (ni if ni else "None")) in output
assert ("%s activated" % (na if na else "None")) in output
assert ("%s installed" % (ni if ni else "None")) in installed
assert ("%s activated" % (na if na else "None")) in activated
check_output(2, 0)
ext2.package.do_activate()
check_output(2, 2)
ext2.package.do_deactivate(force=True)
check_output(2, 1)
ext2.package.do_activate()
check_output(2, 2)
ext2.package.do_uninstall(force=True)
check_output(1, 1)
def test_extensions_no_arguments(mock_packages):
out = extensions()
assert 'python' in out
def test_extensions_raises_if_not_extendable(mock_packages):
with pytest.raises(SpackCommandError):
extensions("flake8")
def test_extensions_raises_if_multiple_specs(mock_packages):
with pytest.raises(SpackCommandError):
extensions("python", "flake8")
|
venv/Lib/site-packages/zmq/green/eventloop/zmqstream.py | mirosa25/ITI-202-Final-Project | 652 | 11076217 | from zmq.eventloop.zmqstream import *
from zmq.green.eventloop.ioloop import IOLoop
RealZMQStream = ZMQStream
class ZMQStream(RealZMQStream):
def __init__(self, socket, io_loop=None):
io_loop = io_loop or IOLoop.instance()
super(ZMQStream, self).__init__(socket, io_loop=io_loop)
|
tasks/ansible_2420/test_runner.py | 745184532/cmdb | 251 | 11076230 | # -*- coding: utf-8 -*-
from runner import AdHocRunner, CommandRunner
from inventory import BaseInventory
def TestAdHocRunner():
"""
以yml的形式 执行多个命令
:return:
"""
host_data = [
{
"hostname": "server",
"ip": "192.168.5.224",
"port": 22,
"username": "root",
"password": "",
"private_key": '/root/.ssh/id_rsa',
"become": {
"method": "sudo",
"user": "root",
"pass": None,
}
},
]
inventory = BaseInventory(host_data)
runner = AdHocRunner(inventory)
tasks = [
{"action": {"module": "shell", "args": "hostname"}, "name": "run_whoami"},
]
ret = runner.run(tasks, "all")
print(ret.results_summary)
print(ret.results_raw)
def TestCommandRunner():
"""
执行单个命令,返回结果
:return:
"""
host_data = [
{
"hostname": "testserver",
"ip": "192.168.10.100",
"port": 22,
"username": "root",
"password": "<PASSWORD>",
},
]
inventory = BaseInventory(host_data)
runner = CommandRunner(inventory)
res = runner.execute('pwd', 'all')
print(res.results_command)
print(res.results_raw)
print(res.results_command['testserver']['stdout'])
if __name__ == "__main__":
TestAdHocRunner()
# TestCommandRunner()
|
misc/scripts/frontend/utils/normalize_lab_for_merlin.py | G-Thor/merlin | 1,305 | 11076263 | <reponame>G-Thor/merlin
import sys,os
import numpy as np
def divide_into_states(st_dur, fn_dur, num_states):
state_dur = np.zeros((2, num_states), np.int64)
state_dur[0][0] = st_dur
state_dur[1][num_states-1] = fn_dur
num_of_frames = (fn_dur-st_dur)/50000
nof_each_state = num_of_frames/num_states
#if nof_each_state<1:
# print 'warning: some states are with zero duration'
for k in range(num_states-1):
state_dur[1][k] = state_dur[0][k]+(nof_each_state*50000)
state_dur[0][k+1] = state_dur[1][k]
return state_dur
def normalize_dur(dur):
rem_t = dur%50000
if rem_t<=25000:
dur = dur - rem_t
else:
dur = dur + (50000-rem_t)
return dur
def normalize_label_files(in_lab_file, out_lab_file, label_style, write_time_stamps):
out_f = open(out_lab_file,'w')
in_f = open(in_lab_file,'r')
data = in_f.readlines()
in_f.close()
ph_arr=[]
for i in data:
fstr = i.strip().split()
ftag = fstr[2]
ph = ftag[ftag.index('-')+1:ftag.index('+')]
if(ph=='pau'):
continue;
ph_arr.append(ph)
count=0;prev_ph=''
merged_data = [[],[],[]]
for i in data:
fstr = i.strip().split()
start_time = fstr[0]
end_time = fstr[1]
ftag = fstr[2]
mid_indx = ftag.index(':')
p1 = ftag[0:mid_indx]
p2 = ftag[mid_indx:]
ph = ftag[ftag.index('-')+1:ftag.index('+')]
#print ph
if(ph!='pau'):
count=count+1
if(prev_ph=='pau' and ph=='pau'):
continue;
if(count<=2 and 'pau' in p1) or (count>len(ph_arr)-2 and 'pau' in p1):
p1 = p1.replace('pau','sil')
ftag = p1+p2
if(count>=1 and count<len(ph_arr)):
if '-sil+' in ftag:
ftag = ftag.replace('-sil+','-pau+')
merged_data[0].append(start_time)
merged_data[1].append(end_time)
merged_data[2].append(ftag)
prev_ph=ph
num_states = 5
tot_num_ph = len(merged_data[0])
for j in range(tot_num_ph):
if j<tot_num_ph-1:
ph_end = normalize_dur(int(merged_data[0][j+1]))
merged_data[0][j+1] = str(ph_end)
merged_data[1][j] = merged_data[0][j+1]
else:
end_time = normalize_dur(int(end_time))
merged_data[1][j]=str(end_time)
if (int(merged_data[1][j])-int(merged_data[0][j]))==0:
print('Error: zero duration for this phone')
raise
if label_style == "phone_align":
if write_time_stamps:
out_f.write(merged_data[0][j]+' '+merged_data[1][j]+' '+merged_data[2][j]+'\n')
else:
out_f.write(merged_data[2][j]+'\n')
elif label_style == "state_align":
if write_time_stamps:
for k in range(num_states):
state_dur = divide_into_states(int(merged_data[0][j]), int(merged_data[1][j]), num_states)
out_f.write(str(state_dur[0][k])+' '+str(state_dur[1][k])+' '+merged_data[2][j]+'['+str(k+2)+']\n')
else:
out_f.write(merged_data[2][j]+'\n')
out_f.close()
if __name__ == "__main__":
if len(sys.argv)<5:
print('Usage: python normalize_lab_for_merlin.py <input_lab_dir> <output_lab_dir> <label_style> <file_id_list_scp> <optional: write_time_stamps (1/0)>\n')
sys.exit(0)
in_lab_dir = sys.argv[1]
out_lab_dir = sys.argv[2]
label_style = sys.argv[3]
file_id_list = sys.argv[4]
write_time_stamps = True
if len(sys.argv)==6:
if int(sys.argv[5])==0:
write_time_stamps = False
if label_style!="phone_align" and label_style!="state_align":
print("These labels %s are not supported as of now...please use state_align or phone_align!!" % (label_style))
sys.exit(0)
if not os.path.exists(out_lab_dir):
os.makedirs(out_lab_dir)
in_f = open(file_id_list,'r')
for i in in_f.readlines():
filename = i.strip()+'.lab'
print(filename)
in_lab_file = os.path.join(in_lab_dir, filename)
out_lab_file = os.path.join(out_lab_dir, filename)
normalize_label_files(in_lab_file, out_lab_file, label_style, write_time_stamps)
#break;
in_f.close()
|
tests/basics/bytes_compare.py | learnforpractice/micropython-cpp | 13,648 | 11076272 | <reponame>learnforpractice/micropython-cpp
print(b"" == b"")
print(b"" > b"")
print(b"" < b"")
print(b"" == b"1")
print(b"1" == b"")
print("==")
print(b"" > b"1")
print(b"1" > b"")
print(b"" < b"1")
print(b"1" < b"")
print(b"" >= b"1")
print(b"1" >= b"")
print(b"" <= b"1")
print(b"1" <= b"")
print(b"1" == b"1")
print(b"1" != b"1")
print(b"1" == b"2")
print(b"1" == b"10")
print(b"1" > b"1")
print(b"1" > b"2")
print(b"2" > b"1")
print(b"10" > b"1")
print(b"1/" > b"1")
print(b"1" > b"10")
print(b"1" > b"1/")
print(b"1" < b"1")
print(b"2" < b"1")
print(b"1" < b"2")
print(b"1" < b"10")
print(b"1" < b"1/")
print(b"10" < b"1")
print(b"1/" < b"1")
print(b"1" >= b"1")
print(b"1" >= b"2")
print(b"2" >= b"1")
print(b"10" >= b"1")
print(b"1/" >= b"1")
print(b"1" >= b"10")
print(b"1" >= b"1/")
print(b"1" <= b"1")
print(b"2" <= b"1")
print(b"1" <= b"2")
print(b"1" <= b"10")
print(b"1" <= b"1/")
print(b"10" <= b"1")
print(b"1/" <= b"1")
print(b'o' == b'\n')
|
FaceDetection/cascade.py | onkarsiyag/face | 165 | 11076283 | <gh_stars>100-1000
"""
Programmer : EOF
File : cascade.py
Date : 2016.01.17
E-mail : <EMAIL>
License : MIT License
"""
from config import POSITIVE_SAMPLE
from config import NEGATIVE_SAMPLE
from config import TRAINING_IMG_HEIGHT
from config import TRAINING_IMG_WIDTH
from config import FEATURE_FILE_TRAINING
from config import FEATURE_NUM
from config import ADABOOST_LIMIT
from config import ADABOOST_CACHE_FILE
from config import DEBUG_MODEL
from haarFeature import Feature
from image import ImageSet
from adaboost import AdaBoost
from adaboost import getCachedAdaBoost
import os
import numpy
class Cascade:
def __init__(self, face_dir = "", nonface_dir = "", train = True, limit = 30):
#tot_samples = 0
self.Face = ImageSet(face_dir, sampleNum = POSITIVE_SAMPLE)
self.nonFace = ImageSet(nonface_dir, sampleNum = NEGATIVE_SAMPLE)
tot_samples = self.Face.sampleNum + self.nonFace.sampleNum
self.classifier = AdaBoost
self.haar = Feature(TRAINING_IMG_WIDTH, TRAINING_IMG_HEIGHT)
if os.path.isfile(FEATURE_FILE_TRAINING + ".npy"):
self._mat = numpy.load(FEATURE_FILE_TRAINING + ".npy")
else:
if DEBUG_MODEL is True:
self._mat = numpy.zeros((self.haar.featuresNum, tot_samples))
for i in xrange(self.Face.sampleNum):
featureVec = self.haar.calFeatureForImg(self.Face.images[i])
for j in xrange(self.haar.featuresNum):
self._mat[j][i ] = featureVec[j]
for i in xrange(self.nonFace.sampleNum):
featureVec = self.haar.calFeatureForImg(self.nonFace.images[i])
for j in xrange(self.haar.featuresNum):
self._mat[j][i + self.Face.sampleNum] = featureVec[j]
numpy.save(FEATURE_FILE_TRAINING, self._mat)
else:
from mapReduce import map
from mapReduce import reduce
map(self.Face, self.nonFace)
self._mat = reduce()
featureNum, sampleNum = self._mat.shape
assert sampleNum == (POSITIVE_SAMPLE + NEGATIVE_SAMPLE)
assert featureNum == FEATURE_NUM
Label_Face = [+1 for i in xrange(POSITIVE_SAMPLE)]
Label_NonFace = [-1 for i in xrange(NEGATIVE_SAMPLE)]
self._label = numpy.array(Label_Face + Label_NonFace)
self.limit = limit
self.classifierNum = 0
self.strong_classifier = [None for i in xrange(limit)]
def train(self):
raise ("Unfinished")
detection_rate = 0
from config import EXPECTED_FPR_PRE_LAYYER
from config import EXPECTED_FPR
from config import LABEL_NEGATIVE
cur_fpr = 1.0
mat = self._mat
label = self._label
for i in xrange(self.limit):
if cur_fpr < EXPECTED_FPR:
break
else:
cache_filename = ADABOOST_CACHE_FILE + str(i)
if os.path.isfile(cache_filename):
self.strong_classifier[i] = getCachedAdaBoost(mat = self._mat,
label = self._label,
filename= cache_filename,
limit = ADABOOST_LIMIT)
else:
self.strong_classifier[i] = AdaBoost(mat, label, limit = ADABOOST_LIMIT)
output, fpr = self.strong_classifier[i].train()
cur_fpr *= fpr
fp_num = fpr * numpy.count_nonzero(label == LABEL_NEGATIVE)
self.strong_classifier[i].saveModel(cache_filename)
mat, label = self.updateTrainingDate(mat, output, fp_num)
self.classifierNum += 1
def updateTrainingDate(self, mat, output, fp_num):
fp_num = int(fp_num)
assert len(output) == self._label.size
_mat = numpy.zeros((FEATURE_NUM, POSITIVE_SAMPLE + fp_num), dtype=numpy.float16)
_mat[:, :POSITIVE_SAMPLE] = mat[:, :POSITIVE_SAMPLE]
"""
for i in xrange(POSITIVE_SAMPLE):
for j in xrange(FEATURE_NUM):
mat[j][i] = self._mat[j][i]
"""
counter = 0
# only reserve negative samples which are classified wrong
for i in xrange(POSITIVE_SAMPLE, self._label.size):
if output[i] != self._label[i]:
for j in xrange(FEATURE_NUM):
_mat[j][POSITIVE_SAMPLE + counter] = mat[j][i]
counter += 1
assert counter == fp_num
Label_Face = [+1 for i in xrange(POSITIVE_SAMPLE)]
Label_NonFace = [-1 for i in xrange(fp_num)]
_label = numpy.array(Label_Face + Label_NonFace)
return _mat, _label
def predict(self):
output = numpy.zeros(POSITIVE_SAMPLE + NEGATIVE_SAMPLE, dtype= numpy.float16)
for i in xrange(self.classifierNum):
self.strong_classifier[i].prediction(mat, th = 0)
"""unfinished"""
def save(self):
pass
def is_goodenough(self):
pass
|
src/etc/gdb_lookup.py | mbc-git/rust | 66,762 | 11076286 | <filename>src/etc/gdb_lookup.py
import gdb
import re
from gdb_providers import *
from rust_types import *
_gdb_version_matched = re.search('([0-9]+)\\.([0-9]+)', gdb.VERSION)
gdb_version = [int(num) for num in _gdb_version_matched.groups()] if _gdb_version_matched else []
def register_printers(objfile):
objfile.pretty_printers.append(lookup)
# BACKCOMPAT: rust 1.35
def is_hashbrown_hashmap(hash_map):
return len(hash_map.type.fields()) == 1
def classify_rust_type(type):
type_class = type.code
if type_class == gdb.TYPE_CODE_STRUCT:
return classify_struct(type.tag, type.fields())
if type_class == gdb.TYPE_CODE_UNION:
return classify_union(type.fields())
return RustType.OTHER
def check_enum_discriminant(valobj):
content = valobj[valobj.type.fields()[0]]
fields = content.type.fields()
if len(fields) > 1:
discriminant = int(content[fields[0]]) + 1
if discriminant > len(fields):
# invalid discriminant
return False
return True
def lookup(valobj):
rust_type = classify_rust_type(valobj.type)
if rust_type == RustType.ENUM:
# use enum provider only for GDB <7.12
if gdb_version[0] < 7 or (gdb_version[0] == 7 and gdb_version[1] < 12):
if check_enum_discriminant(valobj):
return EnumProvider(valobj)
if rust_type == RustType.STD_STRING:
return StdStringProvider(valobj)
if rust_type == RustType.STD_OS_STRING:
return StdOsStringProvider(valobj)
if rust_type == RustType.STD_STR:
return StdStrProvider(valobj)
if rust_type == RustType.STD_SLICE:
return StdSliceProvider(valobj)
if rust_type == RustType.STD_VEC:
return StdVecProvider(valobj)
if rust_type == RustType.STD_VEC_DEQUE:
return StdVecDequeProvider(valobj)
if rust_type == RustType.STD_BTREE_SET:
return StdBTreeSetProvider(valobj)
if rust_type == RustType.STD_BTREE_MAP:
return StdBTreeMapProvider(valobj)
if rust_type == RustType.STD_HASH_MAP:
if is_hashbrown_hashmap(valobj):
return StdHashMapProvider(valobj)
else:
return StdOldHashMapProvider(valobj)
if rust_type == RustType.STD_HASH_SET:
hash_map = valobj[valobj.type.fields()[0]]
if is_hashbrown_hashmap(hash_map):
return StdHashMapProvider(valobj, show_values=False)
else:
return StdOldHashMapProvider(hash_map, show_values=False)
if rust_type == RustType.STD_RC:
return StdRcProvider(valobj)
if rust_type == RustType.STD_ARC:
return StdRcProvider(valobj, is_atomic=True)
if rust_type == RustType.STD_CELL:
return StdCellProvider(valobj)
if rust_type == RustType.STD_REF:
return StdRefProvider(valobj)
if rust_type == RustType.STD_REF_MUT:
return StdRefProvider(valobj)
if rust_type == RustType.STD_REF_CELL:
return StdRefCellProvider(valobj)
return None
|
tools/Vitis-AI-Runtime/VART/vart/trace/vaitrace/python/vaitrace_py/setup.py | hito0512/Vitis-AI | 848 | 11076300 |
# Copyright 2019 Xilinx Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from shutil import copy
setup(
name='vaitrace_py',
version='1.3',
description="",
py_modules=["__init__", "vaitrace_py", "tracepoint"],
python_requires='>=3.5',
zip_safe=False,
)
|
jltc/templatetags/tags.py | yangjourney/JMeter-Control-Center | 166 | 11076305 | <gh_stars>100-1000
import datetime
from django import template
register = template.Library()
@register.simple_tag()
def get_percentage(a1, a2, *args, **kwargs):
try:
return round(100 * a1 / a2, 1)
except (TypeError, ZeroDivisionError) as e:
return 0
@register.simple_tag()
def get_percentage_abs(a1, a2, *args, **kwargs):
try:
p = 0
if a2 > a1:
p = (a2 - a1) / a1 * 100
elif a2 < a1:
p = (a1 - a2) / a2 * 100
return abs(round(p, 1))
except (TypeError, ZeroDivisionError) as e:
return 0
@register.simple_tag()
def get_percentage_rel(a1, a2, *args, **kwargs):
try:
return round(100 - 100 * a1 / a2, 1)
except (TypeError, ZeroDivisionError) as e:
return 0
@register.simple_tag()
def subtract(a1, a2, *args, **kwargs):
return a1 - a2
@register.simple_tag()
def print_timestamp(timestamp, *args, **kwargs):
return datetime.datetime.fromtimestamp(timestamp / 1000)
@register.simple_tag()
def seconds_to_time(seconds, *args, **kwargs):
return str(datetime.timedelta(seconds=int(seconds)))
|
examples/StatisticalLearningMethod/adaboost.py | wwwy-binary/NP_ML | 237 | 11076306 | import numpy as np
from np_ml import AdaBoost, TrivialClassification
if __name__ == '__main__':
print("--------------------------------------------------------")
print("AdaBoost simple example!")
print("example in Statistical Learning Method(《统计学习方法》)")
print("--------------------------------------------------------")
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([1, 1, 1, -1, -1, -1, 1, 1, 1, -1])
print("x: {}".format(x))
print("y: {}".format(y))
print("")
adb = AdaBoost(TrivialClassification)
adb.fit(x, y, detailed=True)
print("y_pred: {}".format(adb.predict(x)))
|
recogym/envs/observation.py | philomenec/reco-gym | 413 | 11076351 | <reponame>philomenec/reco-gym
class Observation:
def __init__(self, context, sessions):
self.current_context = context
self.current_sessions = sessions
def context(self):
return self.current_context
def sessions(self):
return self.current_sessions
|
wren/data_ingestion/news_articles_crawler.py | tzano/wren | 245 | 11076377 | <reponame>tzano/wren<filename>wren/data_ingestion/news_articles_crawler.py
from core.scheduler import Scheduler
import logging
import time
from resources.constants import WSJ, VICE, USTODAY, GUARDIAN, THEGLOBAEANDMAIL, TELEGRAPH, REUTERS, NYTIMES, NYPOST, \
CNN, ALJAZEERA, BBC, MEDIA_TYPE_ARTICLES, EMPTY_STR, EMPTY_DICT, EMPTY_LIST, EN_LANG
from core.media_org import NewsMediaOrg
import json
import binascii
class NewsArticlesCrawler(Scheduler):
"""
News Articles class
"""
def get_news_articles(self):
"""
get news articles.
:return: generator
"""
news_sources = [
NewsMediaOrg(news_org=ALJAZEERA, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=BBC, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=CNN, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=NYPOST, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=NYTIMES, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=REUTERS, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=TELEGRAPH, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=THEGLOBAEANDMAIL, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=GUARDIAN, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=USTODAY, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=VICE, media_types=MEDIA_TYPE_ARTICLES),
NewsMediaOrg(news_org=WSJ, media_types=MEDIA_TYPE_ARTICLES)
]
for news_ingestor in news_sources:
logging.info("Getting articles from {}".format(news_ingestor))
for article in news_ingestor.parse_articles():
yield article
def run(self):
"""
run the thread
"""
while True:
try:
for article in self.get_news_articles():
if article:
self.kafka_producer.send(self.kafka_topic, str(article.json()))
time.sleep(self.seconds)
except Exception as e:
raise e
|
lightner/__main__.py | LiyuanLucasLiu/LightNER | 115 | 11076440 | <reponame>LiyuanLucasLiu/LightNER
#!/usr/bin/env python
import logging
import os
import sys
import codecs
import argparse
from lightner.commands.main import main
if __name__ == "__main__":
main()
|
src/pybel/io/gpickle.py | rpatil524/pybel | 103 | 11076506 | <reponame>rpatil524/pybel
# -*- coding: utf-8 -*-
"""Conversion functions for BEL graphs with bytes and Python pickles."""
import gzip
from io import BytesIO
from typing import BinaryIO, Union
from networkx.utils import open_file
from .utils import raise_for_not_bel, raise_for_old_graph
from ..struct.graph import BELGraph
try:
import pickle5 as pickle
except ImportError:
import pickle
__all__ = [
'to_bytes',
'from_bytes',
'to_bytes_gz',
'from_bytes_gz',
'to_pickle',
'to_pickle_gz',
'from_pickle',
'from_pickle_gz',
]
def to_bytes(graph: BELGraph, protocol: int = pickle.HIGHEST_PROTOCOL) -> bytes:
"""Convert a graph to bytes with pickle.
Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable
pickle, choose 0, 1, or 2.
:param graph: A BEL graph
:param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``.
.. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format
"""
raise_for_not_bel(graph)
return pickle.dumps(graph, protocol=protocol)
def from_bytes(bytes_graph: bytes, check_version: bool = True) -> BELGraph:
"""Read a graph from bytes (the result of pickling the graph).
:param bytes_graph: File or filename to write
:param check_version: Checks if the graph was produced by this version of PyBEL
"""
graph = pickle.loads(bytes_graph)
raise_for_not_bel(graph)
if check_version:
raise_for_old_graph(graph)
return graph
def to_bytes_gz(graph: BELGraph, protocol: int = pickle.HIGHEST_PROTOCOL) -> bytes:
"""Convert a graph to gzipped bytes with pickle.
:param graph: A BEL graph
:param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``.
"""
io = BytesIO()
with gzip.open(io, mode='wb') as file:
pickle.dump(graph, file, protocol=protocol)
return io.getvalue()
def from_bytes_gz(bytes_graph: bytes) -> BELGraph:
"""Read a graph from gzipped bytes (the result of pickling the graph).
:param bytes_graph: File or filename to write
"""
with gzip.GzipFile(fileobj=BytesIO(bytes_graph), mode='rb') as file:
return pickle.load(file)
@open_file(1, mode='wb')
def to_pickle(graph: BELGraph, path: Union[str, BinaryIO], protocol: int = pickle.HIGHEST_PROTOCOL) -> None:
"""Write this graph to a pickle file.
Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable
pickle, choose 0, 1, or 2.
:param graph: A BEL graph
:param path: A path or file-like
:param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``.
.. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format
"""
raise_for_not_bel(graph)
pickle.dump(graph, path, protocol)
def to_pickle_gz(graph: BELGraph, path: str, protocol: int = pickle.HIGHEST_PROTOCOL) -> None:
"""Write this graph to a gzipped pickle file."""
with gzip.open(path, 'wb') as file:
to_pickle(graph, file, protocol=protocol)
@open_file(0, mode='rb')
def from_pickle(path: Union[str, BinaryIO], check_version: bool = True) -> BELGraph:
"""Read a graph from a pickle file.
:param path: File or filename to read. Filenames ending in .gz or .bz2 will be uncompressed.
:param bool check_version: Checks if the graph was produced by this version of PyBEL
"""
graph = pickle.load(path)
raise_for_not_bel(graph)
if check_version:
raise_for_old_graph(graph)
return graph
def from_pickle_gz(path: str) -> BELGraph:
"""Read a graph from a gzipped pickle file."""
with gzip.open(path, 'rb') as file:
return from_pickle(file)
|
python/max_matching.py | EazyReal/codelibrary | 1,727 | 11076516 | def max_matching(graph, n2):
n1 = len(graph)
matching = [-1] * n2
matches = 0
for u in range(n1):
if find_path(graph, u, matching, [False] * n1):
matches += 1
return matches
def find_path(graph, u1, matching, vis):
vis[u1] = True
for v in range(len(matching)):
u2 = matching[v]
if graph[u1][v] and (u2 == -1 or not vis[u2] and find_path(graph, u2, matching, vis)):
matching[v] = u1
return True
return False
def test():
graph = [[False] * 2 for _ in range(2)]
graph[0][1] = True
graph[1][0] = True
graph[1][1] = True
assert 2 == max_matching(graph, 2)
test()
|
python3/tests/SMBConnectionTests/test_listpath.py | frafra/pysmb | 280 | 11076529 | # -*- coding: utf-8 -*-
from nose2.tools.decorators import with_setup, with_teardown
from smb.SMBConnection import SMBConnection
from smb.smb_constants import *
from smb import smb_structs
from .util import getConnectionInfo
conn = None
def setup_func_SMB1():
global conn
smb_structs.SUPPORT_SMB2 = False
info = getConnectionInfo()
conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True)
assert conn.connect(info['server_ip'], info['server_port'])
def setup_func_SMB2():
global conn
smb_structs.SUPPORT_SMB2 = True
info = getConnectionInfo()
conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True)
assert conn.connect(info['server_ip'], info['server_port'])
def teardown_func():
global conn
conn.close()
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPath_SMB1():
global conn
results = conn.listPath('smbtest', '/')
filenames = [( r.filename, r.isDirectory ) for r in results]
assert ( '\u6d4b\u8bd5\u6587\u4ef6\u5939', True ) in filenames # Test non-English folder names
assert ( 'Test Folder with Long Name', True ) in filenames # Test long English folder names
assert ( 'TestDir1', True ) in filenames # Test short English folder names
assert ( 'Implementing CIFS - SMB.html', False ) in filenames # Test long English file names
assert ( 'rfc1001.txt', False ) in filenames # Test short English file names
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listSubPath_SMB1():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name/')
filenames = [( r.filename, r.isDirectory ) for r in results]
assert ( 'Test File.txt', False ) in filenames
assert ( 'Test Folder', True ) in filenames
assert ( '子文件夹', True ) in filenames
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPathWithManyFiles_SMB1():
global conn
results = conn.listPath('smbtest', '/RFC Archive/')
filenames = map(lambda r: ( r.filename, r.isDirectory ), results)
assert len(list(filenames))==999
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPath_SMB2():
global conn
results = conn.listPath('smbtest', '/')
filenames = [( r.filename, r.isDirectory ) for r in results]
assert ( '\u6d4b\u8bd5\u6587\u4ef6\u5939', True ) in filenames # Test non-English folder names
assert ( 'Test Folder with Long Name', True ) in filenames # Test long English folder names
assert ( 'TestDir1', True ) in filenames # Test short English folder names
assert ( 'Implementing CIFS - SMB.html', False ) in filenames # Test long English file names
assert ( 'rfc1001.txt', False ) in filenames # Test short English file names
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listSubPath_SMB2():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name/')
filenames = [( r.filename, r.isDirectory ) for r in results]
assert ( 'Test File.txt', False ) in filenames
assert ( 'Test Folder', True ) in filenames
assert ( '子文件夹', True ) in filenames
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPathWithManyFiles_SMB2():
global conn
results = conn.listPath('smbtest', '/RFC Archive/')
filenames = map(lambda r: ( r.filename, r.isDirectory ), results)
assert len(list(filenames))==999
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPathFilterForDirectory_SMB1():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', search = SMB_FILE_ATTRIBUTE_DIRECTORY)
filenames = map(lambda r: ( r.filename, r.isDirectory ), results)
assert len(list(filenames)) > 0
for f, isDirectory in filenames:
assert isDirectory
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPathFilterForDirectory_SMB2():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', search = SMB_FILE_ATTRIBUTE_DIRECTORY)
filenames = map(lambda r: ( r.filename, r.isDirectory ), results)
assert len(list(filenames)) > 0
for f, isDirectory in filenames:
assert isDirectory
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPathFilterForFiles_SMB1():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_ARCHIVE | SMB_FILE_ATTRIBUTE_INCL_NORMAL)
filenames = map(lambda r: ( r.filename, r.isDirectory ), results)
assert len(list(filenames)) > 0
for f, isDirectory in filenames:
assert not isDirectory
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPathFilterForFiles_SMB2():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_ARCHIVE | SMB_FILE_ATTRIBUTE_INCL_NORMAL)
filenames = map(lambda r: ( r.filename, r.isDirectory ), results)
assert len(list(filenames)) > 0
for f, isDirectory in filenames:
assert not isDirectory
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPathFilterPattern_SMB1():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', pattern = 'Test*')
filenames = list(map(lambda r: ( r.filename, r.isDirectory ), results))
assert len(filenames) == 2
assert ( u'Test File.txt', False ) in filenames
assert ( u'Test Folder', True ) in filenames
assert ( u'子文件夹', True ) not in filenames
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPathFilterPattern_SMB2():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', pattern = 'Test*')
filenames = list(map(lambda r: ( r.filename, r.isDirectory ), results))
assert len(filenames) == 2
assert ( u'Test File.txt', False ) in filenames
assert ( u'Test Folder', True ) in filenames
assert ( u'子文件夹', True ) not in filenames
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPathFilterUnicodePattern_SMB1():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', pattern = u'*件夹')
filenames = list(map(lambda r: ( r.filename, r.isDirectory ), results))
assert len(filenames) == 1
assert ( u'Test File.txt', False ) not in filenames
assert ( u'Test Folder', True ) not in filenames
assert ( u'子文件夹', True ) in filenames
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPathFilterUnicodePattern_SMB2():
global conn
results = conn.listPath('smbtest', '/Test Folder with Long Name', pattern = u'*件夹')
filenames = list(map(lambda r: ( r.filename, r.isDirectory ), results))
assert len(filenames) == 1
assert ( u'Test File.txt', False ) not in filenames
assert ( u'Test Folder', True ) not in filenames
assert ( u'子文件夹', True ) in filenames
@with_setup(setup_func_SMB1)
@with_teardown(teardown_func)
def test_listPathFilterEmptyList_SMB1():
global conn
results = conn.listPath('smbtest', '/RFC Archive', pattern = '*.abc')
filenames = list(map(lambda r: ( r.filename, r.isDirectory ), results))
@with_setup(setup_func_SMB2)
@with_teardown(teardown_func)
def test_listPathFilterEmptyList_SMB2():
global conn
results = conn.listPath('smbtest', '/RFC Archive', pattern = '*.abc')
filenames = list(map(lambda r: ( r.filename, r.isDirectory ), results))
|
laspy/lasreader.py | CCInc/laspy | 240 | 11076541 | import abc
import io
import logging
from typing import Optional, BinaryIO, Iterable, Union
from . import errors
from .compression import LazBackend
from .header import LasHeader
from .lasdata import LasData
from .point import record
from .vlrs.known import LasZipVlr
from .vlrs.vlrlist import VLRList
try:
import lazrs
except ModuleNotFoundError:
pass
try:
import laszip
except ModuleNotFoundError:
pass
logger = logging.getLogger(__name__)
class LasReader:
"""The reader class handles LAS and LAZ via one of the supported backend"""
def __init__(
self,
source: BinaryIO,
closefd: bool = True,
laz_backend: Optional[Union[LazBackend, Iterable[LazBackend]]] = None,
):
"""
Initialize the LasReader
Parameters
----------
source: file_object
closefd: bool, default True
laz_backend: LazBackend or list of LazBackend, optional
"""
self.closefd = closefd
if laz_backend is None:
laz_backend = LazBackend.detect_available()
self.laz_backend = laz_backend
self.header = LasHeader.read_from(source)
if self.header.point_count > 0:
if self.header.are_points_compressed:
if not laz_backend:
raise errors.LaspyException(
"No LazBackend selected, cannot decompress data"
)
self.point_source = self._create_laz_backend(source)
if self.point_source is None:
raise errors.LaspyException(
"Data is compressed, but no LazBacked could be initialized"
)
else:
self.point_source = UncompressedPointReader(source, self.header)
else:
self.point_source = EmptyPointReader()
self.points_read = 0
def read_points(self, n: int) -> record.ScaleAwarePointRecord:
"""Read n points from the file
Will only read as many points as the header advertise.
That is, if you ask to read 50 points and there are only 45 points left
this function will only read 45 points.
If there are no points left to read, returns an empty point record.
Parameters
----------
n: The number of points to read
if n is less than 0, this function will read the remaining points
"""
points_left = self.header.point_count - self.points_read
if points_left <= 0:
return record.ScaleAwarePointRecord.empty(
self.header.point_format,
self.header.scales,
self.header.offsets,
)
if n < 0:
n = points_left
else:
n = min(n, points_left)
r = record.PackedPointRecord.from_buffer(
self.point_source.read_n_points(n), self.header.point_format
)
if len(r) < n:
logger.error(f"Could only read {len(r)} of the requested {n} points")
points = record.ScaleAwarePointRecord(
r.array, r.point_format, self.header.scales, self.header.offsets
)
self.points_read += n
return points
def read(self) -> LasData:
"""Reads all the points not read and returns a LasData object"""
points = self.read_points(-1)
las_data = LasData(header=self.header, points=points)
if self.header.version.minor >= 4:
if (
self.header.are_points_compressed
and not self.point_source.source.seekable()
):
# We explicitly require seekable stream because we have to seek
# past the chunk table of LAZ file
raise errors.LaspyException(
"source must be seekable, to read evlrs form LAZ file"
)
self.point_source.source.seek(self.header.start_of_first_evlr, io.SEEK_SET)
las_data.evlrs = self._read_evlrs(self.point_source.source, seekable=True)
return las_data
def seek(self, pos: int, whence: int = io.SEEK_SET) -> int:
"""Seeks to the start of the point at the given pos
Parameters
----------
pos: index of the point to seek to
whence: optional, controls how the pos parameter is interpreted:
io.SEEK_SET: (default) pos is the index of the point from the beginning
io.SEEK_CUR: pos is the point_index relative to the point_index of the last point read
io.SEEK_END: pos is the point_index relative to last point
Returns
-------
The index of the point the reader seeked to, relative to the first point
"""
if whence == io.SEEK_SET:
allowed_range = range(0, self.header.point_count)
point_index = pos
elif whence == io.SEEK_CUR:
allowed_range = range(
-self.points_read, self.header.point_count - self.points_read
)
point_index = self.points_read + pos
elif whence == io.SEEK_END:
allowed_range = range(-self.header.point_count, 0)
point_index = self.header.point_count + pos
else:
raise ValueError(f"Invalid value for whence: {whence}")
if pos not in allowed_range:
whence_str = ["start", "current point", "end"]
raise IndexError(
f"When seeking from the {whence_str[whence]}, pos must be in {allowed_range}"
)
self.point_source.seek(point_index)
self.points_read = point_index
return point_index
def chunk_iterator(self, points_per_iteration: int) -> "PointChunkIterator":
"""Returns an iterator, that will read points by chunks
of the requested size
:param points_per_iteration: number of points to be read with each iteration
:return:
"""
return PointChunkIterator(self, points_per_iteration)
def close(self) -> None:
"""closes the file object used by the reader"""
if self.closefd:
self.point_source.close()
def _create_laz_backend(self, source) -> Optional["IPointReader"]:
try:
backends = iter(self.laz_backend)
except TypeError:
backends = (self.laz_backend,)
laszip_vlr = self.header.vlrs.pop(self.header.vlrs.index("LasZipVlr"))
for backend in backends:
try:
if not backend.is_available():
raise errors.LaspyException(f"The '{backend}' is not available")
if backend == LazBackend.LazrsParallel:
return LazrsPointReader(source, laszip_vlr, parallel=True)
elif backend == LazBackend.Lazrs:
return LazrsPointReader(source, laszip_vlr, parallel=False)
elif backend == LazBackend.Laszip:
return LaszipPointReader(source, self.header)
else:
raise errors.LaspyException(
"Unknown LazBackend: {}".format(backend)
)
except errors.LazError as e:
logger.error(e)
def _read_evlrs(self, source, seekable=False) -> Optional[VLRList]:
"""Reads the EVLRs of the file, will fail if the file version
does not support evlrs
"""
if (
self.header.version.minor >= 4
and self.points_read == self.header.point_count
):
if seekable:
source.seek(self.header.start_of_first_evlr)
return VLRList.read_from(source, self.header.number_of_evlrs, extended=True)
else:
return None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class PointChunkIterator:
def __init__(self, reader: LasReader, points_per_iteration: int) -> None:
self.reader = reader
self.points_per_iteration = points_per_iteration
def __next__(self) -> record.ScaleAwarePointRecord:
points = self.reader.read_points(self.points_per_iteration)
if not points:
raise StopIteration
return points
def __iter__(self) -> "PointChunkIterator":
return self
class IPointReader(abc.ABC):
"""The interface to be implemented by the class that actually reads
points from as LAS/LAZ file so that the LasReader can use it.
It is used to manipulate LAS/LAZ (with different LAZ backends) in the
reader
"""
@abc.abstractmethod
def read_n_points(self, n: int) -> bytearray:
...
@abc.abstractmethod
def seek(self, point_index: int) -> None:
...
@abc.abstractmethod
def close(self) -> None:
...
class UncompressedPointReader(IPointReader):
"""Implementation of IPointReader for the simple uncompressed case"""
def __init__(self, source, header: LasHeader) -> None:
self.source = source
self.header = header
def read_n_points(self, n: int) -> bytearray:
try:
readinto = self.source.readinto
except AttributeError:
data = bytearray(self.source.read(n * self.header.point_format.size))
else:
data = bytearray(n * self.header.point_format.size)
num_read = readinto(data)
if num_read < len(data):
data = data[:num_read]
return data
def seek(self, point_index: int) -> None:
self.source.seek(
self.header.offset_to_point_data
+ (point_index * self.header.point_format.size)
)
def close(self):
self.source.close()
class LaszipPointReader(IPointReader):
"""Implementation for the laszip backend"""
def __init__(self, source: BinaryIO, header: LasHeader) -> None:
self.source = source
self.source.seek(0)
self.unzipper = laszip.LasUnZipper(source)
unzipper_header = self.unzipper.header
assert unzipper_header.point_data_format == header.point_format.id
assert unzipper_header.point_data_record_length == header.point_format.size
self.point_size = header.point_format.size
def read_n_points(self, n: int) -> bytearray:
points_data = bytearray(n * self.point_size)
self.unzipper.decompress_into(points_data)
return points_data
def seek(self, point_index: int) -> None:
self.unzipper.seek(point_index)
def close(self) -> None:
self.source.close()
class LazrsPointReader(IPointReader):
"""Implementation for the laz-rs backend, supports single-threaded decompression
as well as multi-threaded decompression
"""
def __init__(self, source, laszip_vlr: LasZipVlr, parallel: bool) -> None:
self.source = source
self.vlr = lazrs.LazVlr(laszip_vlr.record_data)
if parallel:
self.decompressor = lazrs.ParLasZipDecompressor(
source, laszip_vlr.record_data
)
else:
self.decompressor = lazrs.LasZipDecompressor(source, laszip_vlr.record_data)
def read_n_points(self, n: int) -> bytearray:
point_bytes = bytearray(n * self.vlr.item_size())
self.decompressor.decompress_many(point_bytes)
return point_bytes
def seek(self, point_index: int) -> None:
self.decompressor.seek(point_index)
def close(self) -> None:
self.source.close()
class EmptyPointReader(IPointReader):
"""Does nothing but returning empty bytes.
Used to make sure we handle empty LAS files in a robust way.
"""
def read_n_points(self, n: int) -> bytearray:
return bytearray()
def close(self) -> None:
pass
def seek(self, point_index: int) -> None:
pass
|
alipay/aop/api/domain/AlipayTradeFastpayEteDidiPayModel.py | snowxmas/alipay-sdk-python-all | 213 | 11076576 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayTradeFastpayEteDidiPayModel(object):
def __init__(self):
self._body = None
self._extend_params = None
self._login_id = None
self._login_passwd = None
self._mc_notify_url = None
self._out_trade_no = None
self._partner_id = None
self._pay_passwd = None
self._product_code = None
self._seller_id = None
self._subject = None
self._total_fee = None
self._user_id = None
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def login_id(self):
return self._login_id
@login_id.setter
def login_id(self, value):
self._login_id = value
@property
def login_passwd(self):
return self._login_passwd
@login_passwd.setter
def login_passwd(self, value):
self._login_passwd = value
@property
def mc_notify_url(self):
return self._mc_notify_url
@mc_notify_url.setter
def mc_notify_url(self, value):
self._mc_notify_url = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def pay_passwd(self):
return self._pay_passwd
@pay_passwd.setter
def pay_passwd(self, value):
self._pay_passwd = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def total_fee(self):
return self._total_fee
@total_fee.setter
def total_fee(self, value):
self._total_fee = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.body:
if hasattr(self.body, 'to_alipay_dict'):
params['body'] = self.body.to_alipay_dict()
else:
params['body'] = self.body
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.login_id:
if hasattr(self.login_id, 'to_alipay_dict'):
params['login_id'] = self.login_id.to_alipay_dict()
else:
params['login_id'] = self.login_id
if self.login_passwd:
if hasattr(self.login_passwd, 'to_alipay_dict'):
params['login_passwd'] = self.login_passwd.to_alipay_dict()
else:
params['login_passwd'] = self.<PASSWORD>
if self.mc_notify_url:
if hasattr(self.mc_notify_url, 'to_alipay_dict'):
params['mc_notify_url'] = self.mc_notify_url.to_alipay_dict()
else:
params['mc_notify_url'] = self.mc_notify_url
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.pay_passwd:
if hasattr(self.pay_passwd, 'to_alipay_dict'):
params['pay_passwd'] = self.pay_passwd.to_alipay_dict()
else:
params['pay_passwd'] = <PASSWORD>
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.total_fee:
if hasattr(self.total_fee, 'to_alipay_dict'):
params['total_fee'] = self.total_fee.to_alipay_dict()
else:
params['total_fee'] = self.total_fee
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeFastpayEteDidiPayModel()
if 'body' in d:
o.body = d['body']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'login_id' in d:
o.login_id = d['login_id']
if 'login_passwd' in d:
o.login_passwd = d['login_passwd']
if 'mc_notify_url' in d:
o.mc_notify_url = d['mc_notify_url']
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'pay_passwd' in d:
o.pay_passwd = d['<PASSWORD>']
if 'product_code' in d:
o.product_code = d['product_code']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'subject' in d:
o.subject = d['subject']
if 'total_fee' in d:
o.total_fee = d['total_fee']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
vcx/wrappers/python3/aries-test-server/utils.py | sklump/indy-sdk | 636 | 11076619 | import asyncio
import json
import random
import string
from ctypes import cdll
import platform
from vcx.api.utils import vcx_agent_provision
from vcx.api.vcx_init import vcx_init_with_config
EXTENSION = {"darwin": ".dylib", "linux": ".so", "win32": ".dll", 'windows': '.dll'}
def file_ext():
your_platform = platform.system().lower()
return EXTENSION[your_platform] if (your_platform in EXTENSION) else '.so'
def load_payment_plugin():
payment_plugin = cdll.LoadLibrary('libnullpay' + file_ext())
payment_plugin.nullpay_init()
def rand_string() -> str:
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(20))
async def init_vcx():
load_payment_plugin()
config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': '<KEY>',
'wallet_name': rand_string(),
'wallet_key': '123',
'payment_method': 'null',
'enterprise_seed': '000000000000000000000000Trustee1',
'protocol_type': '2.0',
'communication_method': 'aries'
}
print("Provision an agent and wallet, get back configuration details")
config = await vcx_agent_provision(json.dumps(config))
config = json.loads(config)
# Set some additional configuration options specific to faber
config['pool_name'] = rand_string()
config['institution_name'] = rand_string()
config['institution_logo_url'] = 'http://robohash.org/234'
config['genesis_path'] = 'docker.txn'
print("Initialize libvcx with new configuration")
await vcx_init_with_config(json.dumps(config))
def run_coroutine(coroutine, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
return loop.run_until_complete(coroutine())
def run_coroutine_in_new_loop(coroutine):
loop = asyncio.new_event_loop()
return loop.run_until_complete(coroutine())
|
envi/tests/msp430/irrc.py | rnui2k/vivisect | 716 | 11076647 | <reponame>rnui2k/vivisect
from envi.archs.msp430.regs import *
checks = [
# RRC
(
'RRC r15 (destination carry + negative)',
{ 'regs': [(REG_R15, 0x5555)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f10", 'data': "" },
{ 'regs': [(REG_R15, 0xaaaa)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f10", 'data': "" }
),
(
'RRC r15 (destination zero + carry)',
{ 'regs': [(REG_R15, 0x1)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f10", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 0)], 'code': "0f10", 'data': "" }
),
(
'RRC r15 (destination negative)',
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f10", 'data': "" },
{ 'regs': [(REG_R15, 0x4000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f10", 'data': "" }
),
# RRC.b
(
'RRC.b r15 (destination carry)',
{ 'regs': [(REG_R15, 0x1155)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f10", 'data': "" },
{ 'regs': [(REG_R15, 0xaa)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f10", 'data': "" }
),
(
'RRC.b r15 (destination zero + carry)',
{ 'regs': [(REG_R15, 0x1)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f10", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 0)], 'code': "4f10", 'data': "" }
),
(
'RRC.b r15 (destination negative)',
{ 'regs': [(REG_R15, 0x1180)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f10", 'data': "" },
{ 'regs': [(REG_R15, 0x40)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f10", 'data': "" }
),
]
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_ohlc.py | acrucetta/Chicago_COVI_WebApp | 11,750 | 11076672 | from plotly.graph_objs import Ohlc
|
cyverse_allocation/spoof_instance.py | simpsonw/atmosphere | 197 | 11076674 | <filename>cyverse_allocation/spoof_instance.py<gh_stars>100-1000
from core.models.instance_source import InstanceSource
from django.utils import timezone
from core.models.instance import Instance
from core.models.instance_history import InstanceStatusHistory, InstanceStatus
from core.models.allocation_source import AllocationSource
from core.models.size import Size
from core.models.event_table import EventTable
from core.models.identity import Identity
from core.models.provider import Provider, ProviderType, PlatformType
from api.tests.factories import UserFactory
import uuid
class UserWorkflow:
def __init__(self):
self.user = UserFactory.create()
provider_type = ProviderType(name='Test_%s' % self.user.username)
provider_type.save()
platform_type = PlatformType(
name='test-platform-%s' % self.user.username
)
platform_type.save()
provider = Provider(
location='BIO5', type=provider_type, virtualization=platform_type
)
provider.save()
active_status = InstanceStatus.objects.filter(name='active').last()
if not active_status:
active_status = InstanceStatus(name='active')
active_status.save()
suspended_status = InstanceStatus.objects.filter(name='suspended'
).last()
if not suspended_status:
suspended_status = InstanceStatus(name='suspended')
suspended_status.save()
self.provider = provider
self.active_status = active_status
self.suspended_status = suspended_status
def create_instance(self, start_date=None):
if not start_date:
start_date = timezone.now()
provider_alias = str(uuid.uuid4())
identity = Identity.objects.filter(created_by=self.user).last()
if not identity:
identity = Identity(created_by=self.user, provider=self.provider)
identity.save()
instance_source = InstanceSource(
provider=self.provider,
identifier=str(uuid.uuid4()),
created_by=self.user,
created_by_identity=identity
)
instance_source.save()
instance = Instance(
source=instance_source,
provider_alias=provider_alias,
created_by=self.user,
start_date=start_date
)
instance.save()
self.create_instance_status_history(
instance, start_date=start_date, status='active'
)
return instance
def assign_allocation_source_to_user(
self, allocation_source, timestamp=None
):
if not timestamp:
timestamp = timezone.now()
# Spoof UserAllocationSource
new_user_allocation_source = {
'source_id': allocation_source.source_id,
'username': self.user.username
}
EventTable.objects.create(
name='user_allocation_source_assigned',
payload=new_user_allocation_source,
entity_id=new_user_allocation_source['username'],
timestamp=timestamp
)
def assign_allocation_source_to_instance(
self, allocation_source, instance, timestamp=None
):
if not timestamp:
timestamp = timezone.now()
# Associate Instance with Allocation Source
if self.user != instance.created_by:
raise Exception(
'instance %s does not belong to user %s' %
(instance, self.user.username)
)
payload = {
"allocation_source_id": allocation_source.source_id,
"instance_id": instance.provider_alias
}
EventTable.objects.create(
name='instance_allocation_source_changed',
payload=payload,
entity_id=self.user.username,
timestamp=timestamp
)
def create_instance_status_history(
self, instance, start_date=None, status=None, cpu=None, end_date=None
):
# Spoof InstanceStatusHistory
if self.user != instance.created_by:
raise Exception(
'instance %s does not belong to user %s' %
(instance, self.user.username)
)
if not start_date:
start_date = timezone.now()
if not cpu:
cpu = 1
if status == 'active' or not status:
current_status = self.active_status
else:
current_status = self.suspended_status
size = Size(
alias=uuid.uuid4(),
name='small',
provider=self.provider,
cpu=cpu,
disk=1,
root=1,
mem=1
)
size.save()
# find last instance history and end date it
last_instance_history = InstanceStatusHistory.objects.filter(
instance=instance
).order_by('start_date').last()
if last_instance_history:
last_instance_history.end_date = start_date
last_instance_history.save()
instance_history1 = InstanceStatusHistory(
instance=instance,
size=size,
status=current_status,
start_date=start_date,
end_date=end_date
)
instance_history1.save()
def is_allocation_source_assigned_to_user(self):
query = EventTable.objects.filter(
name='user_allocation_source_assigned',
entity_id=self.user.username
)
return True if query else False
def create_allocation_source(
name, compute_allowed, renewal_strategy=None, timestamp=None
):
if not timestamp:
timestamp = timezone.now()
# Spoof Allocation Source creation
if not renewal_strategy:
renewal_strategy = 'default'
new_allocation_source = {
'source_id': str(uuid.uuid4()),
'name': name,
'compute_allowed': compute_allowed,
'renewal_strategy': renewal_strategy
}
EventTable.objects.create(
name='allocation_source_created',
payload=new_allocation_source,
entity_id=new_allocation_source['source_id'],
timestamp=timestamp
)
return AllocationSource.objects.filter(name=name).last()
def change_renewal_strategy(
allocation_source, renewal_strategy, timestamp=None
):
if not timestamp:
timestamp = timezone.now()
# Spoof Renewal Strategy change
if not renewal_strategy:
raise ('Please provide a renewal strategy to change to')
renewal_strategy_change_payload = {
"source_id": str(allocation_source.source_id),
"renewal_strategy": renewal_strategy
}
EventTable.objects.create(
name='allocation_source_renewal_strategy_changed',
payload=renewal_strategy_change_payload,
entity_id=renewal_strategy_change_payload['source_id'],
timestamp=timestamp
)
|
synth/components/oscillators/oscillators.py | Hanz-Tech/synth | 104 | 11076679 | <filename>synth/components/oscillators/oscillators.py
import math
from .base_oscillator import Oscillator
class SineOscillator(Oscillator):
def _post_freq_set(self):
self._step = (2 * math.pi * self._f) / self._sample_rate
def _post_phase_set(self):
self._p = (self._p / 360) * 2 * math.pi
def _initialize_osc(self):
self._i = 0
def __next__(self):
val = math.sin(self._i + self._p)
self._i = self._i + self._step
if self._wave_range is not (-1, 1):
val = self.squish_val(val, *self._wave_range)
return val * self._a
class SquareOscillator(SineOscillator):
def __init__(self, freq=440, phase=0, amp=1, \
sample_rate=44_100, wave_range=(-1, 1), threshold=0):
super().__init__(freq, phase, amp, sample_rate, wave_range)
self.threshold = threshold
def __next__(self):
val = math.sin(self._i + self._p)
self._i = self._i + self._step
if val < self.threshold:
val = self._wave_range[0]
else:
val = self._wave_range[1]
return val * self._a
class SawtoothOscillator(Oscillator):
def _post_freq_set(self):
self._period = self._sample_rate / self._f
self._post_phase_set
def _post_phase_set(self):
self._p = ((self._p + 90)/ 360) * self._period
def _initialize_osc(self):
self._i = 0
def __next__(self):
div = (self._i + self._p )/self._period
val = 2 * (div - math.floor(0.5 + div))
self._i = self._i + 1
if self._wave_range is not (-1, 1):
val = self.squish_val(val, *self._wave_range)
return val * self._a
class TriangleOscillator(SawtoothOscillator):
def __next__(self):
div = (self._i + self._p)/self._period
val = 2 * (div - math.floor(0.5 + div))
val = (abs(val) - 0.5) * 2
self._i = self._i + 1
if self._wave_range is not (-1, 1):
val = self.squish_val(val, *self._wave_range)
return val * self._a |
src/python/docs/sphinx/ci_script/gen_toc_yml.py | michaelgsharp/NimbusML | 134 | 11076700 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 13 13:54:06 2018
In this file, we update the reference/api section in the toc.yml to be
synced with the latest package.
We use the index.md generated by "build md" to create the reference/api
section in yaml format and update it in the toc.yml.
The "- href" will be pointing to the yml files in the docs-ref-autogen
folder.
"""
import argparse
import os
import re
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-input', help='path to index.md')
parser.add_argument('-temp', help='path to a temp file')
parser.add_argument('-output', help='path to final output toc.yml')
args = vars(parser.parse_args())
file_r = open(args['input'], "r")
file_w = open(args['temp'], "w")
ref_dict = {'API/Reference': ('modules.md', [])}
previous_level = 0
processing_start = False
for line in file_r:
if '[API/Reference]' in line:
processing_start = True
if '[Tutorials]' in line:
break
if processing_start:
current_level = line.find('*') / 2
if current_level >= 0:
# print(line)
# print(current_level)
if 'nimbusml.' in line:
name = re.search('(?<=\[\*)(.*)(?=\*\])', line)
if not name:
name = re.search(
'(?<=\[\*)(.*)(?=\*\:)', line).group(0)
else:
name = name.group(0)
path = 'docs-ref-autogen\\' + name + '.yml'
else:
name = re.search('(?<=\[)(.*)(?=\])', line).group(0)
path = re.search('(?<=\()(.*)(?=\))', line).group(0)
if current_level > previous_level:
file_w.write(
' ' * int(previous_level) + ' items: ' + '\n')
file_w.write(' ' * int(current_level) +
'- name: ' + name.split('.')[-1] + '\n')
file_w.write(
' ' *
int(current_level) +
' href: ' +
path +
'\n')
previous_level = current_level
file_r.close()
file_w.close()
import yaml
file_w = args['output']
stream = open(file_w, 'r')
data = yaml.load(stream)
data[-1]
file_r = args['temp']
stream_r = open(file_r, 'r')
data_r = yaml.load(stream_r)
data[-1] = data_r[-1]
with open(file_w, 'w') as yaml_file:
yaml_file.write(yaml.dump(data, default_flow_style=False))
stream_r.close()
stream.close()
os.remove(file_r)
|
src/models/backbones_3d/cfe/pillar_dsa.py | reinforcementdriving/SA-Det3D | 134 | 11076717 | <filename>src/models/backbones_3d/cfe/pillar_dsa.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from pcdet.models.backbones_3d.sa_block import SA_block, SA_block_def
class PillarContext3D_dsa(nn.Module):
def __init__(self, model_cfg, grid_size, voxel_size, point_cloud_range, dropout=0.3):
super().__init__()
self.model_cfg = model_cfg
self.nx, self.ny, self.nz = grid_size
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
# layers to deform + aggregate local features
mlps = self.model_cfg.LOCAL_CONTEXT.MLPS
for k in range(len(mlps)):
mlps[k] = [self.model_cfg.NUM_BEV_FEATURES] + mlps[k]
self.adapt_context = pointnet2_stack_modules.StackSAModuleMSGAdapt(
radii=self.model_cfg.LOCAL_CONTEXT.POOL_RADIUS,
deform_radii=self.model_cfg.LOCAL_CONTEXT.DEFORM_RADIUS,
nsamples=self.model_cfg.LOCAL_CONTEXT.NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method=self.model_cfg.LOCAL_CONTEXT.POOL_METHOD,
pc_range=self.point_cloud_range,
)
# UnPool layers
mlps_decode = self.model_cfg.DECODE.MLPS
for k in range(len(mlps_decode)):
mlps_decode[k] = [self.model_cfg.IN_DIM] + mlps_decode[k]
self.decode = pointnet2_stack_modules.StackSAModuleMSGDecode(
radii=self.model_cfg.DECODE.POOL_RADIUS,
nsamples=self.model_cfg.DECODE.NSAMPLE,
mlps=mlps_decode,
use_xyz=True,
pool_method=self.model_cfg.DECODE.POOL_METHOD,
)
# self-attention layers to operate on deformed pillars
self.self_full_fast_attn = SA_block(inplanes=self.model_cfg.IN_DIM, planes=self.model_cfg.IN_DIM)
self.reduce_dim = nn.Sequential(nn.Conv1d(2*self.model_cfg.IN_DIM, self.model_cfg.IN_DIM, kernel_size=1),
nn.BatchNorm1d(self.model_cfg.IN_DIM),
nn.ReLU(inplace=True),
nn.Conv1d(self.model_cfg.IN_DIM, self.model_cfg.IN_DIM, kernel_size=1),
nn.BatchNorm1d(self.model_cfg.IN_DIM),
nn.ReLU(inplace=True)
)
self.self_attn_ms1 = SA_block(inplanes=2*self.model_cfg.IN_DIM, planes=2*self.model_cfg.IN_DIM)
self.self_attn_ms2 = SA_block(inplanes=2*self.model_cfg.IN_DIM, planes=2*self.model_cfg.IN_DIM)
def get_keypoints(self, batch_size, coords, src_points):
"""
Select keypoints, i.e. a subset of pillar coords to deform, aggregate local features and then attend to.
:param batch_size:
:param coords:
:param src_points:
:return: B x num_keypoints x 3
"""
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (coords[:, 0] == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(sampled_points[:, :, 0:3],
self.model_cfg.NUM_KEYPOINTS).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_local_keypoint_features(self, keypoints, pillar_center, pillar_features, coords):
"""
Get local features of deformed pillar-subset/keypoints.
:param keypoints:
:param pillar_center:
:param pillar_features:
:param coords:
:return: B x num_keypoints X C
"""
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
xyz_batch_cnt = torch.zeros([batch_size]).int().cuda()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (coords[:, 0] == bs_idx).sum()
def_xyz, local_features = self.adapt_context(
xyz=pillar_center,
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=pillar_features
)
def_xyz = def_xyz.view(batch_size, num_keypoints, -1)
local_features = local_features.view(batch_size, num_keypoints, -1)
return def_xyz, local_features
def get_context_features(self, batch_size, local_features):
batch_global_features = []
for batch_idx in range(batch_size):
init_idx = batch_idx * self.model_cfg.NUM_KEYPOINTS
local_feat = local_features[init_idx:init_idx + self.model_cfg.NUM_KEYPOINTS, :].unsqueeze(0)
local_feat = local_feat.permute(0, 2, 1).contiguous()
global_feat = self.self_full_fast_attn(local_feat)
# SA-1
ms_feat1 = torch.cat([local_feat, global_feat], dim=1)
attn_feat1 = self.self_attn_ms1(ms_feat1)
attn_feat1 = self.reduce_dim(attn_feat1)
# SA-2
ms_feat2 = torch.cat([local_feat, attn_feat1], dim=1)
attn_feat2 = self.self_attn_ms2(ms_feat2)
attn_feat2 = self.reduce_dim(attn_feat2)
context_feat = attn_feat2.permute(0, 2, 1).contiguous().squeeze(0)
batch_global_features.append(context_feat)
batch_global_features = torch.cat(batch_global_features, 0)
return batch_global_features
def get_context_image(self, batch_size, keypoints, pillar_center, global_features, coords):
# pillar coordinates
new_xyz = pillar_center
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
batch_idx = coords[:, 0]
for k in range(batch_size):
new_xyz_batch_cnt[k] = (batch_idx == k).sum()
# keypoint coordinates and features
xyz = keypoints.view(-1, 3)
xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(keypoints.shape[1])
# UnPool to get global context enhanced pillar features for every pillar
pillar_features = self.decode(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz.contiguous(),
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=global_features,
) # (M1 + M2 ..., C)
# Create pseudo-image for self-attention pillar features
batch_context_features = []
for batch_idx in range(batch_size):
batch_mask = coords[:, 0] == batch_idx
pillars = pillar_features[batch_mask, :]
this_coords = coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
spatial_feature = torch.zeros(
self.model_cfg.NUM_BEV_FEATURES,
self.nz * self.nx * self.ny,
dtype=pillars.dtype,
device=pillars.device)
spatial_feature[:, indices] = pillars.t()
batch_context_features.append(spatial_feature)
context_pillar_features = torch.cat(batch_context_features, 0)
context_pillar_features = context_pillar_features.view(batch_size,
self.model_cfg.NUM_BEV_FEATURES * self.nz, self.ny,
self.nx)
return context_pillar_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
context_pillar_features: (N, C)
"""
batch_size = batch_dict['batch_size']
pillars = batch_dict['pillar_features']
coords = batch_dict['voxel_coords']
# Preprocessing for pillar locations
pillar_center = torch.zeros_like(coords[:, :3])
# front-back (X); left-right (Y); up-down (Z)
pillar_center[:, 0] = coords[:, 3] * self.voxel_x + self.x_offset
pillar_center[:, 1] = coords[:, 2] * self.voxel_y + self.y_offset
pillar_center[:, 2] = coords[:, 1] * self.voxel_z + self.z_offset
# Get keypoints
keypoints = self.get_keypoints(batch_size, coords, pillar_center)
# Get deformed and aggregated keypoint feature from pillars
def_xyz, local_keypoint_feats = self.get_local_keypoint_features(keypoints, pillar_center, pillars, coords)
local_keypoint_feats = local_keypoint_feats.view(batch_size*self.model_cfg.NUM_KEYPOINTS, -1).contiguous()
# Get context for a subset of selected and deformed pillars
context_features = self.get_context_features(batch_size, local_keypoint_feats)
# Get context enhanced pseudo image - UnPool step here
context_pillar_features = self.get_context_image(batch_size, def_xyz, pillar_center, context_features, coords)
# generate down-sampled SA-features to concatenate with Conv in decoder2d
pillar_context = [F.interpolate(context_pillar_features, scale_factor=0.5, mode='bilinear'),
F.interpolate(context_pillar_features, scale_factor=0.25, mode='bilinear'),
F.interpolate(context_pillar_features, scale_factor=0.125, mode='bilinear')]
batch_dict['pillar_context'] = pillar_context
return batch_dict
class PillarContext3D_def(nn.Module):
"""Up-sampling method based on Set-transformer (ICML 2019)"""
def __init__(self, model_cfg, grid_size, voxel_size, point_cloud_range, dropout=0.3):
super().__init__()
self.model_cfg = model_cfg
self.nx, self.ny, self.nz = grid_size
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
# layers to deform + aggregate local features
mlps = self.model_cfg.LOCAL_CONTEXT.MLPS
for k in range(len(mlps)):
mlps[k] = [self.model_cfg.NUM_BEV_FEATURES] + mlps[k]
self.adapt_context = pointnet2_stack_modules.StackSAModuleMSGAdapt(
radii=self.model_cfg.LOCAL_CONTEXT.POOL_RADIUS,
deform_radii=self.model_cfg.LOCAL_CONTEXT.DEFORM_RADIUS,
nsamples=self.model_cfg.LOCAL_CONTEXT.NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method=self.model_cfg.LOCAL_CONTEXT.POOL_METHOD,
pc_range=self.point_cloud_range,
)
self.self_full_fast_attn = SA_block(inplanes=self.model_cfg.IN_DIM, planes=self.model_cfg.IN_DIM)
self.self_attn1 = SA_block_def(inplanes=self.model_cfg.IN_DIM, planes=self.model_cfg.IN_DIM)
self.self_attn2 = SA_block_def(inplanes=self.model_cfg.IN_DIM, planes=self.model_cfg.IN_DIM)
def get_keypoints(self, batch_size, coords, src_points):
"""
Select keypoints, i.e. a subset of pillar coords to deform, aggregate local features and then attend to.
:param batch_size:
:param coords:
:param src_points:
:return: B x num_keypoints x 3
"""
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (coords[:, 0] == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(sampled_points[:, :, 0:3],
self.model_cfg.NUM_KEYPOINTS).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_local_keypoint_features(self, keypoints, pillar_center, pillar_features, coords):
"""
Get local features of deformed pillar-subset/keypoints.
:param keypoints:
:param pillar_center:
:param pillar_features:
:param coords:
:return: B x num_keypoints X C
"""
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
xyz_batch_cnt = torch.zeros([batch_size]).int().cuda()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (coords[:, 0] == bs_idx).sum()
def_xyz, local_features = self.adapt_context(
xyz=pillar_center,
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=pillar_features
)
def_xyz = def_xyz.view(batch_size, num_keypoints, -1)
local_features = local_features.view(batch_size, num_keypoints, -1)
return def_xyz, local_features
def get_context_features(self, batch_size, pillars, local_features, coords):
batch_global_features = []
for batch_idx in range(batch_size):
init_idx = batch_idx * self.model_cfg.NUM_KEYPOINTS
local_feat = local_features[init_idx:init_idx + self.model_cfg.NUM_KEYPOINTS, :].unsqueeze(0)
local_feat = local_feat.permute(0, 2, 1).contiguous()
local_sa_feat = self.self_full_fast_attn(local_feat)
batch_mask = coords[:, 0] == batch_idx
pillar_feat = pillars[batch_mask, :].unsqueeze(0).permute(0, 2, 1).contiguous()
attn_feat1 = self.self_attn1(pillar_feat, local_sa_feat)
attn_feat2 = self.self_attn2(attn_feat1, local_sa_feat)
context_pillar = attn_feat2.permute(0, 2, 1).contiguous().squeeze(0)
this_coords = coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
spatial_feature = torch.zeros(
self.model_cfg.NUM_BEV_FEATURES,
self.nz * self.nx * self.ny,
dtype=context_pillar.dtype,
device=context_pillar.device)
spatial_feature[:, indices] = context_pillar.t()
batch_global_features.append(spatial_feature)
context_pillar_features = torch.cat(batch_global_features, 0)
context_pillar_features = context_pillar_features.view(batch_size, self.model_cfg.NUM_BEV_FEATURES * self.nz, self.ny, self.nx)
return context_pillar_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
context_pillar_features: (N, C)
"""
batch_size = batch_dict['batch_size']
pillars = batch_dict['pillar_features']
coords = batch_dict['voxel_coords']
# Preprocessing for pillar locations
pillar_center = torch.zeros_like(coords[:, :3])
# front-back (X); left-right (Y); up-down (Z)
pillar_center[:, 0] = coords[:, 3] * self.voxel_x + self.x_offset
pillar_center[:, 1] = coords[:, 2] * self.voxel_y + self.y_offset
pillar_center[:, 2] = coords[:, 1] * self.voxel_z + self.z_offset
# Get keypoints
keypoints = self.get_keypoints(batch_size, coords, pillar_center)
# Get deformed and aggregated keypoint feature from pillars
def_xyz, local_keypoint_feats = self.get_local_keypoint_features(keypoints, pillar_center, pillars, coords)
local_keypoint_feats = local_keypoint_feats.view(batch_size * self.model_cfg.NUM_KEYPOINTS, -1).contiguous()
# Get context for a subset of selected and deformed pillars
context_pillar_features = self.get_context_features(batch_size, pillars, local_keypoint_feats, coords)
# generate down-sampled SA-features to concatenate with Conv in decoder2d
pillar_context = [F.interpolate(context_pillar_features, scale_factor=0.5, mode='bilinear'),
F.interpolate(context_pillar_features, scale_factor=0.25, mode='bilinear'),
F.interpolate(context_pillar_features, scale_factor=0.125, mode='bilinear')]
batch_dict['pillar_context'] = pillar_context
return batch_dict
|
trader/binance_future_trader.py | xmrio/binance_grid_trader | 351 | 11076721 | <reponame>xmrio/binance_grid_trader
"""
币安推荐码: 返佣10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
币安合约推荐码: 返佣10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
网格交易: 适合币圈的高波动率的品种,适合现货, 如果交易合约,需要注意防止极端行情爆仓。
服务器购买地址: https://www.ucloud.cn/site/global.html?invitation_code=C1x2EA81CD79B8C#dongjing
"""
from gateway import BinanceFutureHttp, OrderStatus, OrderType, OrderSide
from utils import config
from utils import utility, round_to
from enum import Enum
import logging
from datetime import datetime
class BinanceFutureTrader(object):
def __init__(self):
"""
the binance future trader, 币安合约交易的网格交易,
the grid trading in Future will endure a lot of risk, use it before you understand the risk and grid strategy.
网格交易在合约上会有很大的风险,请注意风险
"""
self.http_client = BinanceFutureHttp(api_key=config.api_key, secret=config.api_secret,
proxy_host=config.proxy_host, proxy_port=config.proxy_port)
self.buy_orders = [] # 买单. buy orders
self.sell_orders = [] # 卖单. sell orders
self.symbols_dict = {} # all the symbols exchange info and filters.
def get_exchange_info(self):
data = self.http_client.exchangeInfo()
if isinstance(data, dict):
items = data.get('symbols', [])
for item in items:
if item.get('status') == "TRADING":
symbol = item['symbol']
symbol_data = {"symbol": symbol}
for filters in item['filters']:
if filters['filterType'] == 'PRICE_FILTER':
symbol_data['min_price'] = float(filters['tickSize'])
elif filters['filterType'] == 'LOT_SIZE':
symbol_data['min_qty'] = float(filters['stepSize'])
elif filters['filterType'] == 'MIN_NOTIONAL':
symbol_data['min_notional'] = float(filters['notional'])
self.symbols_dict[symbol] = symbol_data
# print(len(self.symbols),self.symbols) # 129 个交易对.
def get_bid_ask_price(self):
ticker = self.http_client.get_ticker(config.symbol)
bid_price = 0
ask_price = 0
if ticker:
bid_price = float(ticker.get('bidPrice', 0))
ask_price = float(ticker.get('askPrice', 0))
return bid_price, ask_price
def start(self):
"""
执行核心逻辑,网格交易的逻辑.
the grid trading logic
:return:
"""
symbol_data = self.symbols_dict.get(config.symbol, None)
if symbol_data is None:
self.get_exchange_info()
symbol_data = self.symbols_dict.get(config.symbol, None)
if symbol_data is None:
return None
min_price = symbol_data.get('min_price', 0)
min_qty = symbol_data.get('min_qty', 0)
if min_price <= 0 and min_qty <= 0:
return None
bid_price, ask_price = self.get_bid_ask_price()
print(f"bid_price: {bid_price}, ask_price: {ask_price}, time: {datetime.now()}")
quantity = round_to(float(config.quantity), float(min_qty))
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
self.sell_orders.sort(key=lambda x: float(x['price']), reverse=True) # 最高价到最低价.
buy_delete_orders = [] # 需要删除买单
sell_delete_orders = [] # 需要删除的卖单
# 买单逻辑,检查成交的情况.
for buy_order in self.buy_orders:
check_order = self.http_client.get_order(buy_order.get('symbol', config.symbol),
client_order_id=buy_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
buy_delete_orders.append(buy_order)
print(f"buy order was canceled: {check_order.get('status')}, time: {datetime.now()}")
elif check_order.get('status') == OrderStatus.FILLED.value:
# 买单成交,挂卖单.
print(f"buy order was filled, time: {datetime.now()}")
logging.info(
f"buy order was filled, price: {check_order.get('price')}, qty: {check_order.get('origQty')}, time: {datetime.now()}")
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)), min_price)
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, float(min_price))
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity,
price=sell_price)
if new_sell_order:
print(
f"buy order was filled and place the sell order: {new_sell_order}, time: {datetime.now()}")
buy_delete_orders.append(buy_order)
self.sell_orders.append(new_sell_order)
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)), min_price)
if buy_price > bid_price > 0:
buy_price = round_to(buy_price, min_price)
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity,
price=buy_price)
if new_buy_order:
print(f"买单成交,下了更低价的买单: {new_buy_order}, 时间: {datetime.now()}")
self.buy_orders.append(new_buy_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print(f"buy order status is: New , time: {datetime.now()}")
else:
print(f"buy order status is not above options: {check_order.get('status')}, time: {datetime.now()}")
# 过期或者拒绝的订单删除掉.
for delete_order in buy_delete_orders:
self.buy_orders.remove(delete_order)
# 卖单逻辑, 检查卖单成交情况.
for sell_order in self.sell_orders:
check_order = self.http_client.get_order(sell_order.get('symbol', config.symbol),
client_order_id=sell_order.get('clientOrderId'))
if check_order:
if check_order.get('status') == OrderStatus.CANCELED.value:
sell_delete_orders.append(sell_order)
print(f"sell order was canceled: {check_order.get('status')}, time: {datetime.now()}")
elif check_order.get('status') == OrderStatus.FILLED.value:
logging.info(
f"sell order was filled, price: {check_order.get('price')}, qty: {check_order.get('origQty')}, time:: {datetime.now()}")
# 卖单成交,先下买单.
buy_price = round_to(float(check_order.get("price")) * (1 - float(config.gap_percent)),
min_price)
if buy_price > bid_price > 0:
buy_price = round_to(buy_price, min_price)
new_buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity,
price=buy_price)
if new_buy_order:
print(f"sell order was filled, place buy order: {new_buy_order}, time: {datetime.now()}")
sell_delete_orders.append(sell_order)
self.buy_orders.append(new_buy_order)
sell_price = round_to(float(check_order.get("price")) * (1 + float(config.gap_percent)),
min_price)
if 0 < sell_price < ask_price:
# 防止价格
sell_price = round_to(ask_price, min_price)
new_sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity,
price=sell_price)
if new_sell_order:
print(f"卖单成交,下了更高价的卖单: {new_sell_order},time: {datetime.now()}")
self.sell_orders.append(new_sell_order)
elif check_order.get('status') == OrderStatus.NEW.value:
print(f"sell order is: New, time: {datetime.now()}")
else:
print(
f"sell order status is not in above options: {check_order.get('status')}, 时间: {datetime.now()}")
# 过期或者拒绝的订单删除掉.
for delete_order in sell_delete_orders:
self.sell_orders.remove(delete_order)
# 没有买单的时候.
if len(self.buy_orders) <= 0:
if bid_price > 0:
price = round_to(bid_price * (1 - float(config.gap_percent)), min_price)
buy_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.BUY,
order_type=OrderType.LIMIT, quantity=quantity, price=price)
print(f'没有买单,根据盘口下买单: {buy_order}, time: {datetime.now()}')
if buy_order:
self.buy_orders.append(buy_order)
else:
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=False) # 最低价到最高价
delete_orders = []
for i in range(len(self.buy_orders) - 1):
order = self.buy_orders[i]
next_order = self.buy_orders[i + 1]
if float(next_order['price']) / float(order['price']) - 1 < 0.001:
print(f"买单之间价差太小,撤销订单:{next_order}, time: {datetime.now()}")
cancel_order = self.http_client.cancel_order(next_order.get('symbol'),
client_order_id=next_order.get('clientOrderId'))
if cancel_order:
delete_orders.append(next_order)
for order in delete_orders:
self.buy_orders.remove(order)
if len(self.buy_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.buy_orders.sort(key=lambda x: float(x['price']), reverse=False) # 最低价到最高价
delete_order = self.buy_orders[0]
print(f"订单太多了,撤销最低价的买单:{delete_order}, time: {datetime.now()}")
order = self.http_client.cancel_order(delete_order.get('symbol'),
client_order_id=delete_order.get('clientOrderId'))
if order:
self.buy_orders.remove(delete_order)
# 没有卖单的时候.
if len(self.sell_orders) <= 0:
if ask_price > 0:
price = round_to(ask_price * (1 + float(config.gap_percent)), float(min_price))
sell_order = self.http_client.place_order(symbol=config.symbol, order_side=OrderSide.SELL,
order_type=OrderType.LIMIT, quantity=quantity, price=price)
print(f'没有卖单,根据盘口下卖单:{sell_order} , time: {datetime.now()}')
if sell_order:
self.sell_orders.append(sell_order)
else:
self.sell_orders.sort(key=lambda x: float(x['price']), reverse=False) # 最低价到最高价
delete_orders = []
for i in range(len(self.sell_orders) - 1):
order = self.sell_orders[i]
next_order = self.sell_orders[i + 1]
if float(next_order['price']) / float(order['price']) - 1 < 0.001:
print(f"卖单之间价差太小,撤销订单:{next_order}, time: {datetime.now()}")
cancel_order = self.http_client.cancel_order(next_order.get('symbol'),
client_order_id=next_order.get('clientOrderId'))
if cancel_order:
delete_orders.append(next_order)
for order in delete_orders:
self.sell_orders.remove(order)
if len(self.sell_orders) > int(config.max_orders): # 最多允许的挂单数量.
# 订单数量比较多的时候.
self.sell_orders.sort(key=lambda x: x['price'], reverse=True) # 最高价到最低价
delete_order = self.sell_orders[0]
print(f"订单太多了,撤销最高价的卖单:{delete_order}, time:{datetime.now()}")
order = self.http_client.cancel_order(delete_order.get('symbol'),
client_order_id=delete_order.get('clientOrderId'))
if order:
self.sell_orders.remove(delete_order)
|
test/programytest/storage/stores/sql/store/test_learnf.py | cdoebler1/AIML2 | 345 | 11076722 | <reponame>cdoebler1/AIML2
import unittest
import programytest.storage.engines as Engines
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.store.learnf import SQLLearnfStore
from programytest.storage.asserts.store.assert_learnf import LearnfStoreAsserts
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.learn import LearnCategory
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.client import TestClient
class SQLLearnfStoreTests(LearnfStoreAsserts):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLLearnfStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_save_learnf(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLLearnfStore(engine)
self.assert_save_learnf(store)
def test_get_all(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLLearnfStore(engine)
with self.assertRaises(Exception):
store._get_all()
|
pypy/module/micronumpy/nditer.py | nanjekyejoannah/pypy | 381 | 11076726 | from rpython.rlib import jit
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.error import OperationError, oefmt
from pypy.module.micronumpy import support, concrete
from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject
from pypy.module.micronumpy.descriptor import decode_w_dtype
from pypy.module.micronumpy.iterators import ArrayIter
from pypy.module.micronumpy.strides import (calculate_broadcast_strides,
shape_agreement, shape_agreement_multiple)
from pypy.module.micronumpy.casting import (find_binop_result_dtype,
can_cast_array, can_cast_type)
import pypy.module.micronumpy.constants as NPY
from pypy.module.micronumpy.converters import order_converter
def parse_op_arg(space, name, w_op_flags, n, parse_one_arg):
if space.is_w(w_op_flags, space.w_None):
w_op_flags = space.newtuple([space.newtext('readonly')])
if not space.isinstance_w(w_op_flags, space.w_tuple) and not \
space.isinstance_w(w_op_flags, space.w_list):
raise oefmt(space.w_ValueError,
'%s must be a tuple or array of per-op flag-tuples',
name)
ret = []
w_lst = space.listview(w_op_flags)
if space.isinstance_w(w_lst[0], space.w_tuple) or \
space.isinstance_w(w_lst[0], space.w_list):
if len(w_lst) != n:
raise oefmt(space.w_ValueError,
'%s must be a tuple or array of per-op flag-tuples',
name)
for item in w_lst:
ret.append(parse_one_arg(space, space.listview(item)))
else:
op_flag = parse_one_arg(space, w_lst)
for i in range(n):
ret.append(op_flag)
return ret
class OpFlag(object):
def __init__(self):
self.rw = ''
self.broadcast = True
self.force_contig = False
self.force_align = False
self.native_byte_order = False
self.tmp_copy = ''
self.allocate = False
def parse_op_flag(space, lst):
op_flag = OpFlag()
for w_item in lst:
item = space.text_w(w_item)
if item == 'readonly':
op_flag.rw = 'r'
elif item == 'readwrite':
op_flag.rw = 'rw'
elif item == 'writeonly':
op_flag.rw = 'w'
elif item == 'no_broadcast':
op_flag.broadcast = False
elif item == 'contig':
op_flag.force_contig = True
elif item == 'aligned':
op_flag.force_align = True
elif item == 'nbo':
op_flag.native_byte_order = True
elif item == 'copy':
op_flag.tmp_copy = 'r'
elif item == 'updateifcopy':
op_flag.tmp_copy = 'rw'
elif item == 'allocate':
op_flag.allocate = True
elif item == 'no_subtype':
raise oefmt(space.w_NotImplementedError,
'"no_subtype" op_flag not implemented yet')
elif item == 'arraymask':
raise oefmt(space.w_NotImplementedError,
'"arraymask" op_flag not implemented yet')
elif item == 'writemask':
raise oefmt(space.w_NotImplementedError,
'"writemask" op_flag not implemented yet')
else:
raise oefmt(space.w_ValueError,
'op_flags must be a tuple or array of per-op flag-tuples')
if op_flag.rw == '':
raise oefmt(space.w_ValueError,
"None of the iterator flags READWRITE, READONLY, or "
"WRITEONLY were specified for an operand")
return op_flag
def parse_func_flags(space, nditer, w_flags):
if space.is_w(w_flags, space.w_None):
return
elif not space.isinstance_w(w_flags, space.w_tuple) and not \
space.isinstance_w(w_flags, space.w_list):
raise oefmt(space.w_ValueError,
'Iter global flags must be a list or tuple of strings')
lst = space.listview(w_flags)
for w_item in lst:
if not space.isinstance_w(w_item, space.w_bytes) and not \
space.isinstance_w(w_item, space.w_unicode):
raise oefmt(space.w_TypeError,
"expected string or Unicode object, %T found",
w_item)
item = space.text_w(w_item)
if item == 'external_loop':
nditer.external_loop = True
elif item == 'buffered':
# Each iterator should be 1d
nditer.buffered = True
elif item == 'c_index':
nditer.tracked_index = 'C'
elif item == 'f_index':
nditer.tracked_index = 'F'
elif item == 'multi_index':
nditer.tracked_index = 'multi'
elif item == 'common_dtype':
nditer.common_dtype = True
elif item == 'delay_bufalloc':
nditer.delay_bufalloc = True
elif item == 'grow_inner':
nditer.grow_inner = True
elif item == 'ranged':
nditer.ranged = True
elif item == 'refs_ok':
nditer.refs_ok = True
elif item == 'reduce_ok':
raise oefmt(space.w_NotImplementedError,
'nditer reduce_ok not implemented yet')
nditer.reduce_ok = True
elif item == 'zerosize_ok':
nditer.zerosize_ok = True
else:
raise oefmt(space.w_ValueError,
'Unexpected iterator global flag "%s"',
item)
if nditer.tracked_index and nditer.external_loop:
raise oefmt(space.w_ValueError,
'Iterator flag EXTERNAL_LOOP cannot be used if an index or '
'multi-index is being tracked')
def is_backward(imp_order, order):
if imp_order == order:
return False
if order == NPY.KEEPORDER:
return False
else:
return True
class OperandIter(ArrayIter):
_immutable_fields_ = ['slice_shape', 'slice_stride', 'slice_backstride',
'operand_type', 'base']
def getitem(self, state):
# cannot be called - must return a boxed value
assert False
def getitem_bool(self, state):
# cannot be called - must return a boxed value
assert False
def setitem(self, state, elem):
# cannot be called - must return a boxed value
assert False
class ConcreteIter(OperandIter):
def __init__(self, array, size, shape, strides, backstrides,
op_flags, base):
OperandIter.__init__(self, array, size, shape, strides, backstrides)
self.slice_shape =[]
self.slice_stride = []
self.slice_backstride = []
if op_flags.rw == 'r':
self.operand_type = concrete.ConcreteNonWritableArrayWithBase
else:
self.operand_type = concrete.ConcreteArrayWithBase
self.base = base
def getoperand(self, state):
assert state.iterator is self
impl = self.operand_type
res = impl([], self.array.dtype, self.array.order, [], [],
self.array.storage, self.base)
res.start = state.offset
return res
class SliceIter(OperandIter):
def __init__(self, array, size, shape, strides, backstrides, slice_shape,
slice_stride, slice_backstride, op_flags, base):
OperandIter.__init__(self, array, size, shape, strides, backstrides)
self.slice_shape = slice_shape
self.slice_stride = slice_stride
self.slice_backstride = slice_backstride
if op_flags.rw == 'r':
self.operand_type = concrete.NonWritableSliceArray
else:
self.operand_type = concrete.SliceArray
self.base = base
def getoperand(self, state):
assert state.iterator is self
impl = self.operand_type
arr = impl(state.offset, self.slice_stride, self.slice_backstride,
self.slice_shape, self.array, self.base)
return arr
def calculate_ndim(op_in, oa_ndim):
if oa_ndim >=0:
return oa_ndim
else:
ndim = 0
for op in op_in:
if op is None:
continue
assert isinstance(op, W_NDimArray)
ndim = max(ndim, op.ndims())
return ndim
def coalesce_axes(it, space):
# Copy logic from npyiter_coalesce_axes, used in ufunc iterators
# and in nditer's with 'external_loop' flag
can_coalesce = True
for idim in range(it.ndim - 1):
for op_it, _ in it.iters:
if op_it is None:
continue
assert isinstance(op_it, ArrayIter)
indx = len(op_it.strides)
if it.order == NPY.FORTRANORDER:
indx = len(op_it.array.strides) - indx
assert indx >=0
astrides = op_it.array.strides[indx:]
else:
astrides = op_it.array.strides[:indx]
# does op_it iters over array "naturally"
if astrides != op_it.strides:
can_coalesce = False
break
if can_coalesce:
for i in range(len(it.iters)):
new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it,
it.order)
it.iters[i] = (new_iter, new_iter.reset())
if len(it.shape) > 1:
if it.order == NPY.FORTRANORDER:
it.shape = it.shape[1:]
else:
it.shape = it.shape[:-1]
else:
it.shape = [1]
else:
break
# Always coalesce at least one
for i in range(len(it.iters)):
new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, NPY.CORDER)
it.iters[i] = (new_iter, new_iter.reset())
if len(it.shape) > 1:
if it.order == NPY.FORTRANORDER:
it.shape = it.shape[1:]
else:
it.shape = it.shape[:-1]
else:
it.shape = [1]
def coalesce_iter(old_iter, op_flags, it, order, flat=True):
'''
We usually iterate through an array one value at a time.
But after coalesce(), getoperand() will return a slice by removing
the fastest varying dimension(s) from the beginning or end of the shape.
If flat is true, then the slice will be 1d, otherwise stack up the shape of
the fastest varying dimension in the slice, so an iterator of a 'C' array
of shape (2,4,3) after two calls to coalesce will iterate 2 times over a slice
of shape (4,3) by setting the offset to the beginning of the data at each iteration
'''
shape = [s+1 for s in old_iter.shape_m1]
if len(shape) < 1:
return old_iter
strides = old_iter.strides
backstrides = old_iter.backstrides
if order == NPY.FORTRANORDER:
new_shape = shape[1:]
new_strides = strides[1:]
new_backstrides = backstrides[1:]
_stride = old_iter.slice_stride + [strides[0]]
_shape = old_iter.slice_shape + [shape[0]]
_backstride = old_iter.slice_backstride + [strides[0] * (shape[0] - 1)]
fastest = shape[0]
else:
new_shape = shape[:-1]
new_strides = strides[:-1]
new_backstrides = backstrides[:-1]
# use the operand's iterator's rightmost stride,
# even if it is not the fastest (for 'F' or swapped axis)
_stride = [strides[-1]] + old_iter.slice_stride
_shape = [shape[-1]] + old_iter.slice_shape
_backstride = [(shape[-1] - 1) * strides[-1]] + old_iter.slice_backstride
fastest = shape[-1]
if fastest == 0:
return old_iter
if flat:
_shape = [support.product(_shape)]
if len(_stride) > 1:
_stride = [min(_stride[0], _stride[1])]
_backstride = [(shape[0] - 1) * _stride[0]]
return SliceIter(old_iter.array, old_iter.size / fastest,
new_shape, new_strides, new_backstrides,
_shape, _stride, _backstride, op_flags, it)
class IndexIterator(object):
def __init__(self, shape, backward=False):
self.shape = shape
self.index = [0] * len(shape)
self.backward = backward
@jit.unroll_safe
def next(self):
for i in range(len(self.shape) - 1, -1, -1):
if self.index[i] < self.shape[i] - 1:
self.index[i] += 1
break
else:
self.index[i] = 0
def getvalue(self):
if not self.backward:
ret = self.index[-1]
for i in range(len(self.shape) - 2, -1, -1):
ret += self.index[i] * self.shape[i - 1]
else:
ret = self.index[0]
for i in range(1, len(self.shape)):
ret += self.index[i] * self.shape[i - 1]
return ret
class W_NDIter(W_NumpyObject):
_immutable_fields_ = ['ndim', ]
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes,
w_casting, w_op_axes, w_itershape, buffersize=0,
order=NPY.KEEPORDER, allow_backward=True):
self.external_loop = False
self.buffered = False
self.tracked_index = ''
self.common_dtype = False
self.delay_bufalloc = False
self.grow_inner = False
self.ranged = False
self.refs_ok = False
self.reduce_ok = False
self.zerosize_ok = False
self.index_iter = None
self.done = False
self.first_next = True
self.op_axes = []
self.allow_backward = allow_backward
if not space.is_w(w_casting, space.w_None):
self.casting = space.text_w(w_casting)
else:
self.casting = 'safe'
# convert w_seq operands to a list of W_NDimArray
if space.isinstance_w(w_seq, space.w_tuple) or \
space.isinstance_w(w_seq, space.w_list):
w_seq_as_list = space.listview(w_seq)
self.seq = [convert_to_array(space, w_elem)
if not space.is_none(w_elem) else None
for w_elem in w_seq_as_list]
else:
self.seq = [convert_to_array(space, w_seq)]
if order == NPY.ANYORDER:
# 'A' means "'F' order if all the arrays are Fortran contiguous,
# 'C' order otherwise"
order = NPY.CORDER
for s in self.seq:
if s and not(s.get_flags() & NPY.ARRAY_F_CONTIGUOUS):
break
else:
order = NPY.FORTRANORDER
elif order == NPY.KEEPORDER:
# 'K' means "as close to the order the array elements appear in
# memory as possible", so match self.order to seq.order
order = NPY.CORDER
for s in self.seq:
if s and not(s.get_order() == NPY.FORTRANORDER):
break
else:
order = NPY.FORTRANORDER
self.order = order
parse_func_flags(space, self, w_flags)
self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags,
len(self.seq), parse_op_flag)
# handle w_op_axes
oa_ndim = -1
if not space.is_none(w_op_axes):
oa_ndim = self.set_op_axes(space, w_op_axes)
self.ndim = calculate_ndim(self.seq, oa_ndim)
# handle w_op_dtypes part 1: creating self.dtypes list from input
if not space.is_none(w_op_dtypes):
w_seq_as_list = space.listview(w_op_dtypes)
self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list]
if len(self.dtypes) != len(self.seq):
raise oefmt(space.w_ValueError,
"op_dtypes must be a tuple/list matching the number of ops")
else:
self.dtypes = []
# handle None or writable operands, calculate my shape
outargs = [i for i in range(len(self.seq))
if self.seq[i] is None or self.op_flags[i].rw == 'w']
if len(outargs) > 0:
out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs])
else:
out_shape = None
if space.isinstance_w(w_itershape, space.w_tuple) or \
space.isinstance_w(w_itershape, space.w_list):
self.shape = [space.int_w(i) for i in space.listview(w_itershape)]
else:
self.shape = shape_agreement_multiple(space, self.seq,
shape=out_shape)
if len(outargs) > 0:
# Make None operands writeonly and flagged for allocation
if len(self.dtypes) > 0:
out_dtype = self.dtypes[outargs[0]]
else:
out_dtype = None
for i in range(len(self.seq)):
if self.seq[i] is None:
self.op_flags[i].allocate = True
continue
if self.op_flags[i].rw == 'w':
continue
out_dtype = find_binop_result_dtype(
space, self.seq[i].get_dtype(), out_dtype)
for i in outargs:
if self.seq[i] is None:
# XXX can we postpone allocation to later?
self.seq[i] = W_NDimArray.from_shape(space, self.shape, out_dtype)
else:
if not self.op_flags[i].broadcast:
# Raises if output cannot be broadcast
try:
shape_agreement(space, self.shape, self.seq[i], False)
except OperationError as e:
raise oefmt(space.w_ValueError, "non-broadcastable"
" output operand with shape %s doesn't match "
"the broadcast shape %s",
str(self.seq[i].get_shape()),
str(self.shape))
if self.tracked_index != "":
order = self.order
if order == NPY.KEEPORDER:
order = self.seq[0].implementation.order
if self.tracked_index == "multi":
backward = False
else:
backward = ((
order == NPY.CORDER and self.tracked_index != 'C') or (
order == NPY.FORTRANORDER and self.tracked_index != 'F'))
self.index_iter = IndexIterator(self.shape, backward=backward)
# handle w_op_dtypes part 2: copy where needed if possible
if len(self.dtypes) > 0:
for i in range(len(self.seq)):
self_d = self.dtypes[i]
seq_d = self.seq[i].get_dtype()
if not self_d:
self.dtypes[i] = seq_d
elif self_d != seq_d:
impl = self.seq[i].implementation
if self.buffered or 'r' in self.op_flags[i].tmp_copy:
if not can_cast_array(
space, self.seq[i], self_d, self.casting):
raise oefmt(space.w_TypeError, "Iterator operand %d"
" dtype could not be cast from %R to %R"
" according to the rule '%s'",
i, seq_d, self_d, self.casting)
order = support.get_order_as_CF(impl.order, self.order)
new_impl = impl.astype(space, self_d, order).copy(space)
self.seq[i] = W_NDimArray(new_impl)
else:
raise oefmt(space.w_TypeError, "Iterator "
"operand required copying or buffering, "
"but neither copying nor buffering was "
"enabled")
if 'w' in self.op_flags[i].rw:
if not can_cast_type(
space, self_d, seq_d, self.casting):
raise oefmt(space.w_TypeError, "Iterator"
" requested dtype could not be cast from "
" %R to %R, the operand %d dtype, accord"
"ing to the rule '%s'",
self_d, seq_d, i, self.casting)
elif self.buffered and not (self.external_loop and len(self.seq)<2):
for i in range(len(self.seq)):
if i not in outargs:
self.seq[i] = self.seq[i].descr_copy(space,
w_order=space.newint(self.order))
self.dtypes = [s.get_dtype() for s in self.seq]
else:
#copy them from seq
self.dtypes = [s.get_dtype() for s in self.seq]
# create an iterator for each operand
self.iters = []
for i in range(len(self.seq)):
it = self.get_iter(space, i)
it.contiguous = False
self.iters.append((it, it.reset()))
if self.external_loop:
coalesce_axes(self, space)
def get_iter(self, space, i):
arr = self.seq[i]
imp = arr.implementation
if arr.is_scalar():
return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self)
shape = self.shape
if (self.external_loop and len(self.seq)<2 and self.buffered):
# Special case, always return a memory-ordered iterator
stride = imp.dtype.elsize
backstride = imp.size * stride - stride
return ConcreteIter(imp, imp.get_size(),
[support.product(shape)], [stride], [backstride],
self.op_flags[i], self)
backward = imp.order != self.order
# XXX cleanup needed
strides = imp.strides
backstrides = imp.backstrides
if self.allow_backward:
if ((abs(imp.strides[0]) < abs(imp.strides[-1]) and not backward) or \
(abs(imp.strides[0]) > abs(imp.strides[-1]) and backward)):
# flip the strides. Is this always true for multidimension?
strides = imp.strides[:]
backstrides = imp.backstrides[:]
shape = imp.shape[:]
strides.reverse()
backstrides.reverse()
shape.reverse()
r = calculate_broadcast_strides(strides, backstrides, imp.shape,
shape, backward)
iter_shape = shape
if len(shape) != len(r[0]):
# shape can be shorter when using an external loop, just return a view
iter_shape = imp.shape
return ConcreteIter(imp, imp.get_size(), iter_shape, r[0], r[1],
self.op_flags[i], self)
def set_op_axes(self, space, w_op_axes):
if space.len_w(w_op_axes) != len(self.seq):
raise oefmt(space.w_ValueError,
"op_axes must be a tuple/list matching the number of ops")
op_axes = space.listview(w_op_axes)
oa_ndim = -1
for w_axis in op_axes:
if not space.is_none(w_axis):
axis_len = space.len_w(w_axis)
if oa_ndim == -1:
oa_ndim = axis_len
elif axis_len != oa_ndim:
raise oefmt(space.w_ValueError,
"Each entry of op_axes must have the same size")
self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1
for x in space.listview(w_axis)])
if oa_ndim == -1:
raise oefmt(space.w_ValueError,
"If op_axes is provided, at least one list of axes "
"must be contained within it")
raise oefmt(space.w_NotImplementedError, "op_axis not finished yet")
# Check that values make sense:
# - in bounds for each operand
# ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions
# - no repeat axis
# ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0
return oa_ndim
def descr_iter(self, space):
return self
def getitem(self, it, st):
w_res = W_NDimArray(it.getoperand(st))
return w_res
def descr_getitem(self, space, w_idx):
idx = space.int_w(w_idx)
try:
it, st = self.iters[idx]
except IndexError:
raise oefmt(space.w_IndexError,
"Iterator operand index %d is out of bounds", idx)
return self.getitem(it, st)
def descr_setitem(self, space, w_idx, w_value):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_len(self, space):
space.newint(len(self.iters))
@jit.unroll_safe
def descr_next(self, space):
for it, st in self.iters:
if not it.done(st):
break
else:
self.done = True
raise OperationError(space.w_StopIteration, space.w_None)
res = []
if self.index_iter:
if not self.first_next:
self.index_iter.next()
else:
self.first_next = False
for i, (it, st) in enumerate(self.iters):
res.append(self.getitem(it, st))
self.iters[i] = (it, it.next(st))
if len(res) < 2:
return res[0]
return space.newtuple(res)
def iternext(self):
if self.index_iter:
self.index_iter.next()
for i, (it, st) in enumerate(self.iters):
self.iters[i] = (it, it.next(st))
for it, st in self.iters:
if not it.done(st):
break
else:
self.done = True
return self.done
return self.done
def descr_iternext(self, space):
return space.newbool(self.iternext())
def descr_copy(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_debug_print(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_enable_external_loop(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
@unwrap_spec(axis=int)
def descr_remove_axis(self, space, axis):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_remove_multi_index(self, space, w_multi_index):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_reset(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_operands(self, space):
l_w = []
for op in self.seq:
l_w.append(op.descr_view(space))
return space.newlist(l_w)
def descr_get_dtypes(self, space):
res = [None] * len(self.seq)
for i in range(len(self.seq)):
res[i] = self.seq[i].descr_get_dtype(space)
return space.newtuple(res)
def descr_get_finished(self, space):
return space.newbool(self.done)
def descr_get_has_delayed_bufalloc(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_has_index(self, space):
return space.newbool(self.tracked_index in ["C", "F"])
def descr_get_index(self, space):
if not self.tracked_index in ["C", "F"]:
raise oefmt(space.w_ValueError, "Iterator does not have an index")
if self.done:
raise oefmt(space.w_ValueError, "Iterator is past the end")
return space.newint(self.index_iter.getvalue())
def descr_get_has_multi_index(self, space):
return space.newbool(self.tracked_index == "multi")
def descr_get_multi_index(self, space):
if not self.tracked_index == "multi":
raise oefmt(space.w_ValueError, "Iterator is not tracking a multi-index")
if self.done:
raise oefmt(space.w_ValueError, "Iterator is past the end")
return space.newtuple([space.newint(x) for x in self.index_iter.index])
def descr_get_iterationneedsapi(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_iterindex(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_itersize(self, space):
return space.newint(support.product(self.shape))
def descr_get_itviews(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_ndim(self, space):
return space.newint(self.ndim)
def descr_get_nop(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_shape(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
def descr_get_value(self, space):
raise oefmt(space.w_NotImplementedError, "not implemented yet")
@unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None),
w_op_dtypes=WrappedDefault(None), w_order=WrappedDefault(None),
w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None),
w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(0))
def descr_new_nditer(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes,
w_casting, w_op_axes, w_itershape, w_buffersize, w_order):
npy_order = order_converter(space, w_order, NPY.KEEPORDER)
buffersize = space.int_w(w_buffersize)
return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes,
w_itershape, buffersize, npy_order)
W_NDIter.typedef = TypeDef('numpy.nditer',
__new__ = interp2app(descr_new_nditer),
__iter__ = interp2app(W_NDIter.descr_iter),
__getitem__ = interp2app(W_NDIter.descr_getitem),
__setitem__ = interp2app(W_NDIter.descr_setitem),
__len__ = interp2app(W_NDIter.descr_len),
next = interp2app(W_NDIter.descr_next),
iternext = interp2app(W_NDIter.descr_iternext),
copy = interp2app(W_NDIter.descr_copy),
debug_print = interp2app(W_NDIter.descr_debug_print),
enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop),
remove_axis = interp2app(W_NDIter.descr_remove_axis),
remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index),
reset = interp2app(W_NDIter.descr_reset),
operands = GetSetProperty(W_NDIter.descr_get_operands),
dtypes = GetSetProperty(W_NDIter.descr_get_dtypes),
finished = GetSetProperty(W_NDIter.descr_get_finished),
has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc),
has_index = GetSetProperty(W_NDIter.descr_get_has_index),
index = GetSetProperty(W_NDIter.descr_get_index),
has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index),
multi_index = GetSetProperty(W_NDIter.descr_get_multi_index),
iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi),
iterindex = GetSetProperty(W_NDIter.descr_get_iterindex),
itersize = GetSetProperty(W_NDIter.descr_get_itersize),
itviews = GetSetProperty(W_NDIter.descr_get_itviews),
ndim = GetSetProperty(W_NDIter.descr_get_ndim),
nop = GetSetProperty(W_NDIter.descr_get_nop),
shape = GetSetProperty(W_NDIter.descr_get_shape),
value = GetSetProperty(W_NDIter.descr_get_value),
)
W_NDIter.typedef.acceptable_as_base_class = False
|
tests/layers/test_convolution.py | FrostByte266/neupy | 801 | 11076727 | import random
from itertools import product
from collections import namedtuple
import numpy as np
import tensorflow as tf
from neupy import layers
from neupy.utils import asfloat, shape_to_tuple
from neupy.layers.convolutions import conv_output_shape, deconv_output_shape
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class ConvLayersTestCase(BaseTestCase):
def get_shape(self, value):
shape = self.eval(tf.shape(value))
return tuple(shape)
def test_convolution_params(self):
inp = layers.Input((5, 5, 1))
conv = layers.Convolution((2, 2, 6))
# Propagate data through the network in
# order to trigger initialization
(inp >> conv).outputs
self.assertEqual((2, 2, 1, 6), self.get_shape(conv.weight))
self.assertEqual((6,), self.get_shape(conv.bias))
def test_conv_shapes(self):
paddings = ['valid', 'same']
strides = [(1, 1), (2, 1), (2, 2)]
x = asfloat(np.random.random((20, 12, 11, 2)))
for stride, padding in product(strides, paddings):
network = layers.join(
layers.Input((12, 11, 2)),
layers.Convolution((3, 4, 5), padding=padding, stride=stride),
)
y = self.eval(network.output(x))
self.assertShapesEqual(
y.shape[1:],
network.output_shape[1:],
msg='padding={} and stride={}'.format(padding, stride),
)
def test_valid_strides(self):
Case = namedtuple("Case", "stride expected_output")
testcases = (
Case(stride=(4, 4), expected_output=(4, 4)),
Case(stride=(4,), expected_output=(4, 1)),
Case(stride=4, expected_output=(4, 4)),
)
for testcase in testcases:
conv = layers.Convolution(
(2, 3, 1), stride=testcase.stride)
msg = "Input stride size: {}".format(testcase.stride)
self.assertEqual(
testcase.expected_output, conv.stride, msg=msg)
def test_conv_invalid_strides(self):
invalid_strides = (
(4, 4, 4),
-10,
(-5, -5),
(-5, 5),
(-5, 0),
)
for stride in invalid_strides:
msg = "Input stride size: {}".format(stride)
with self.assertRaises(ValueError, msg=msg):
layers.Convolution((2, 3, 1), stride=stride)
def test_valid_padding(self):
valid_paddings = ('VALID', 'SAME', 'same', 'valid', 10, 1, (7, 1))
for padding in valid_paddings:
layers.Convolution((2, 3, 1), padding=padding)
def test_invalid_padding(self):
invalid_paddings = ('invalid mode', -10, (10, -5))
for padding in invalid_paddings:
msg = "Padding: {}".format(padding)
with self.assertRaises(ValueError, msg=msg):
layers.Convolution((2, 3, 1), padding=padding)
def test_conv_output_shape_func_exceptions(self):
with self.assertRaises(ValueError):
# Wrong stride value
conv_output_shape(
dimension_size=5, filter_size=5,
padding='VALID', stride='not int')
with self.assertRaises(ValueError):
# Wrong filter size value
conv_output_shape(
dimension_size=5, filter_size='not int',
padding='SAME', stride=5)
with self.assertRaisesRegexp(ValueError, "unknown \S+ padding value"):
# Wrong padding value
conv_output_shape(
dimension_size=5, filter_size=5,
padding=1.5, stride=5,
)
def test_conv_output_shape_int_padding(self):
output_shape = conv_output_shape(
dimension_size=10,
padding=3,
filter_size=5,
stride=5,
)
self.assertEqual(output_shape, 3)
def test_conv_unknown_dim_size(self):
shape = conv_output_shape(
dimension_size=None, filter_size=5,
padding='VALID', stride=5,
)
self.assertEqual(shape, None)
def test_conv_invalid_padding_exception(self):
error_msg = "greater or equal to zero"
with self.assertRaisesRegexp(ValueError, error_msg):
layers.Convolution((1, 3, 3), padding=-1)
error_msg = "Tuple .+ greater or equal to zero"
with self.assertRaisesRegexp(ValueError, error_msg):
layers.Convolution((1, 3, 3), padding=(2, -1))
with self.assertRaisesRegexp(ValueError, "invalid string value"):
layers.Convolution((1, 3, 3), padding='NOT_SAME')
with self.assertRaisesRegexp(ValueError, "contains two elements"):
layers.Convolution((1, 3, 3), padding=(3, 3, 3))
def test_conv_invalid_input_shape(self):
with self.assertRaises(LayerConnectionError):
layers.join(
layers.Input(10),
layers.Convolution((1, 3, 3)),
)
def test_conv_with_custom_int_padding(self):
network = layers.join(
layers.Input((5, 5, 1)),
layers.Convolution((3, 3, 1), bias=0, weight=1, padding=2),
)
x = asfloat(np.ones((1, 5, 5, 1)))
expected_output = np.array([
[1, 2, 3, 3, 3, 2, 1],
[2, 4, 6, 6, 6, 4, 2],
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
[2, 4, 6, 6, 6, 4, 2],
[1, 2, 3, 3, 3, 2, 1],
]).reshape((1, 7, 7, 1))
actual_output = self.eval(network.output(x))
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_conv_with_custom_tuple_padding(self):
inp = layers.Input((5, 5, 1))
conv = layers.Convolution((3, 3, 1), bias=0, weight=1, padding=(0, 2))
network = (inp >> conv)
network.outputs
x = asfloat(np.ones((1, 5, 5, 1)))
expected_output = np.array([
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
[3, 6, 9, 9, 9, 6, 3],
]).reshape((1, 3, 7, 1))
actual_output = self.eval(network.output(x))
np.testing.assert_array_almost_equal(expected_output, actual_output)
self.assertShapesEqual(network.output_shape, (None, 3, 7, 1))
def test_conv_without_bias(self):
inp = layers.Input((5, 5, 1))
conv = layers.Convolution((3, 3, 1), bias=None, weight=1)
network = inp >> conv
network.outputs
x = asfloat(np.ones((1, 5, 5, 1)))
expected_output = 9 * np.ones((1, 3, 3, 1))
actual_output = self.eval(network.output(x))
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_conv_unknown_input_width_and_height(self):
network = layers.join(
layers.Input((None, None, 3)),
layers.Convolution((3, 3, 5)),
)
self.assertShapesEqual(network.output_shape, (None, None, None, 5))
input_value = asfloat(np.ones((1, 12, 12, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 5))
input_value = asfloat(np.ones((1, 21, 21, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 19, 19, 5))
def test_dilated_convolution(self):
network = layers.join(
layers.Input((6, 6, 1)),
layers.Convolution((3, 3, 1), dilation=2, weight=1, bias=None),
)
input_value = asfloat(np.arange(36).reshape(1, 6, 6, 1))
actual_output = self.eval(network.output(input_value))
self.assertShapesEqual(actual_output.shape, (1, 2, 2, 1))
self.assertShapesEqual(
actual_output.shape[1:],
network.output_shape[1:])
actual_output = actual_output[0, :, :, 0]
expected_output = np.array([
[126, 135], # every row value adds +1 per filter value (+9)
[180, 189], # every col value adds +6 per filter value (+54)
])
np.testing.assert_array_almost_equal(actual_output, expected_output)
def test_convolution_repr(self):
layer = layers.Convolution((3, 3, 10), name='conv')
self.assertEqual(
str(layer),
(
"Convolution((3, 3, 10), padding='VALID', stride=(1, 1), "
"dilation=(1, 1), weight=HeNormal(gain=2), bias=Constant(0), "
"name='conv')"
)
)
def test_conv_output_shape_when_input_unknown(self):
block = layers.join(
layers.Convolution((3, 3, 32)),
layers.Relu(),
layers.BatchNorm(),
)
self.assertShapesEqual(block.input_shape, None)
self.assertShapesEqual(block.output_shape, (None, None, None, 32))
class DeconvolutionTestCase(BaseTestCase):
def test_deconvolution(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7)),
layers.Deconvolution((3, 3, 4)),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 8, 8, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconvolution_same_padding(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7), padding='same'),
layers.Deconvolution((3, 3, 4), padding='same'),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 10, 10, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconvolution_int_padding(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7), padding=9),
layers.Deconvolution((3, 3, 4), padding=9),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 26, 26, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconvolution_tuple_padding(self):
network = layers.join(
layers.Input((10, 10, 3)),
layers.Convolution((3, 3, 7), padding=(9, 3)),
layers.Deconvolution((3, 3, 4), padding=(9, 3)),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertSequenceEqual(
shapes, {
network.layers[0]: (None, 10, 10, 3),
network.layers[1]: (None, 26, 14, 7),
network.layers[2]: (None, 10, 10, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
def test_deconv_unknown_input_width_and_height(self):
network = layers.join(
layers.Input((None, None, 3)),
layers.Convolution((3, 3, 7)),
layers.Deconvolution((3, 3, 4)),
)
shapes = network.output_shapes_per_layer
shapes = {l: shape_to_tuple(s) for l, s in shapes.items()}
self.assertDictEqual(
shapes, {
network.layers[0]: (None, None, None, 3),
network.layers[1]: (None, None, None, 7),
network.layers[2]: (None, None, None, 4),
}
)
input_value = asfloat(np.random.random((1, 10, 10, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 10, 10, 4))
input_value = asfloat(np.random.random((1, 7, 7, 3)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape, (1, 7, 7, 4))
def test_deconv_output_shape(self):
self.assertEqual(None, deconv_output_shape(None, 3, 'same', 1))
self.assertEqual(12, deconv_output_shape(10, 3, 'valid', 1))
self.assertEqual(16, deconv_output_shape(10, 7, 'valid', 1))
self.assertEqual(10, deconv_output_shape(10, 3, 'same', 1))
self.assertEqual(14, deconv_output_shape(4, 5, 'valid', 3))
self.assertEqual(12, deconv_output_shape(4, 3, 'same', 3))
self.assertEqual(12, deconv_output_shape(4, 7, 'same', 3))
def test_deconv_output_shape_exception(self):
with self.assertRaisesRegexp(ValueError, "unknown \S+ padding"):
deconv_output_shape(10, 3, padding='xxx', stride=1)
with self.assertRaisesRegexp(ValueError, "doesn't support dilation"):
deconv_output_shape(10, 3, padding='valid', stride=1, dilation=2)
def test_deconvolution_for_random_cases(self):
# A few random cases will check if output shape computed from
# the network is the same as the shape that we get after we
# propagated input through the network.
for test_id in range(30):
width = random.randint(7, 20)
height = random.randint(7, 20)
fh = random.randint(1, 7)
fw = random.randint(1, 7)
pad = random.choice([
'valid',
'same',
random.randint(0, 10),
(
random.randint(0, 10),
random.randint(0, 10),
),
])
stride = random.choice([
random.randint(1, 4),
(
random.randint(1, 4),
random.randint(1, 4),
),
])
print('\n------------')
print("Test case #{}".format(test_id))
print('------------')
print("Image shape: {}x{}".format(height, width))
print("Filter shape: {}x{}".format(fh, fw))
print("Padding: {}".format(pad))
print("Stride: {}".format(stride))
network = layers.join(
layers.Input((height, width, 1)),
layers.Convolution((fh, fw, 2), padding=pad, stride=stride),
layers.Deconvolution((fh, fw, 1), padding=pad, stride=stride),
)
input_value = asfloat(np.random.random((1, height, width, 1)))
actual_output = self.eval(network.output(input_value))
self.assertEqual(actual_output.shape[1:], network.output_shape[1:])
def test_deconvolution_repr(self):
layer = layers.Deconvolution((3, 3, 10), name='deconv')
self.assertEqual(
str(layer),
(
"Deconvolution((3, 3, 10), padding='VALID', stride=(1, 1), "
"weight=HeNormal(gain=2), bias=Constant(0), name='deconv')"
)
)
|
code/run_example.py | binbin-xu/DeeperInverseCompositionalAlgorithm | 154 | 11076736 | """
An extremely simple example to show how to run the algorithm
@author: <NAME>
@date: May 2019
"""
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as func
import models.LeastSquareTracking as ICtracking
from tqdm import tqdm
from torch.utils.data import DataLoader
from train_utils import check_cuda
from data.SimpleLoader import SimpleLoader
def resize(img0, img1, depth0, depth1, K_in, resizeH, resizeW):
H, W = img0.shape[-2:]
I0 = func.interpolate(img0, (resizeH,resizeW), mode='bilinear', align_corners=True)
I1 = func.interpolate(img1, (resizeH,resizeW), mode='bilinear', align_corners=True)
D0 = func.interpolate(depth0, (resizeH,resizeW), mode='nearest')
D1 = func.interpolate(depth1, (resizeH,resizeW), mode='nearest')
sx = resizeH / H
sy = resizeW / W
K = K_in.clone()
K[:,0] *= sx
K[:,1] *= sy
K[:,2] *= sx
K[:,3] *= sy
return I0, I1, D0, D1, K
def run_inference(dataloader, net):
progress = tqdm(dataloader, ncols=100,
desc = 'Run the deeper inverse compositional algorithm',
total= len(dataloader))
net.eval()
for idx, batch, in enumerate(progress):
color0, color1, depth0, depth1, K = check_cuda(batch)
# downsize the input to 120*160, it is the size of data when the algorthm is trained
C0, C1, D0, D1, K = resize(color0, color1, depth0, depth1, K, resizeH = 120, resizeW=160)
with torch.no_grad():
R, t = net.forward(C0, C1, D0, D1, K)
print('Rotation: ')
print(R)
print('translation: ')
print(t)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run the network inference example.')
parser.add_argument('--checkpoint', default='trained_models/TUM_RGBD_ABC_final.pth.tar',
type=str, help='the path to the pre-trained checkpoint.')
parser.add_argument('--color_dir', default='data/data_examples/TUM/color',
help='the directory of color images')
parser.add_argument('--depth_dir', default='data/data_examples/TUM/depth',
help='the directory of depth images')
parser.add_argument('--intrinsic', default='525.0,525.0,319.5,239.5',
help='Simple pin-hole camera intrinsics, input in the format (fx, fy, cx, cy)')
config = parser.parse_args()
K = [float(x) for x in config.intrinsic.split(',')]
simple_loader = SimpleLoader(config.color_dir, config.depth_dir, K)
simple_loader = DataLoader(simple_loader, batch_size=1, shuffle=False)
net = ICtracking.LeastSquareTracking(
encoder_name = 'ConvRGBD2',
max_iter_per_pyr= 3,
mEst_type = 'MultiScale2w',
solver_type = 'Direct-ResVol')
if torch.cuda.is_available():
net.cuda()
net.load_state_dict(torch.load(config.checkpoint)['state_dict'])
run_inference(simple_loader, net) |
tests/network/contract/test_verification.py | ActorForth/brownie | 122 | 11076739 | from pathlib import Path
import pytest
import solcx
from brownie.project import load, new
from brownie.project.compiler.solidity import find_best_solc_version
sources = [
(
"contracts/Foo.sol",
"""
contract Foo {
uint256 value_;
function value() external view returns(uint256) {
return value_;
}
}
""",
),
(
"contracts/Baz.sol",
"""
enum Test {
A,
B,
C,
D
}
contract Baz {}
""",
),
(
"contracts/Bar.sol",
"""
import {Foo as FooSomething} from "./Foo.sol";
import './Baz.sol';
struct Helper {
address x;
uint256 y;
uint8 z;
}
contract Bar is FooSomething {}
""",
),
]
@pytest.mark.parametrize("version", ("0.6.0", "0.7.3", "0.8.6"))
def test_verification_info(tmp_path_factory, version):
header = f"""
// SPDX-License-Identifier: MIT
pragma solidity {version};
"""
# setup directory
dir: Path = tmp_path_factory.mktemp("verify-project")
# initialize brownie project
new(dir.as_posix())
modded_sources = {}
for fp, src in sources:
with dir.joinpath(fp).open("w") as f:
f.write(header + src)
modded_sources[fp] = header + src
find_best_solc_version(modded_sources, install_needed=True)
project = load(dir, "TestImportProject")
for contract_name in ("Foo", "Bar", "Baz"):
contract = getattr(project, contract_name)
input_data = contract.get_verification_info()["standard_json_input"]
# output selection isn't included in the verification info because
# etherscan replaces it regardless. Here we just replicate with what they
# would include
input_data["settings"]["outputSelection"] = {
"*": {"*": ["evm.bytecode", "evm.deployedBytecode", "abi"]}
}
compiler_version, _ = contract._build["compiler"]["version"].split("+")
output_data = solcx.compile_standard(input_data, solc_version=compiler_version)
# keccak256 = 0xd61b13a841b15bc814760b36086983db80788946ca38aa90a06bebf287a67205
build_info = output_data["contracts"][f"{contract_name}.sol"][contract_name]
assert build_info["abi"] == contract.abi
# ignore the metadata at the end of the bytecode, etherscan does the same
assert build_info["evm"]["bytecode"]["object"][:-96] == contract.bytecode[:-96]
assert (
build_info["evm"]["deployedBytecode"]["object"][:-96]
== contract._build["deployedBytecode"][:-96]
)
project.close()
|
pypy/module/micronumpy/test/test_scalar.py | nanjekyejoannah/pypy | 381 | 11076773 | <gh_stars>100-1000
# -*- encoding:utf-8 -*-
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
class AppTestScalar(BaseNumpyAppTest):
spaceconfig = dict(usemodules=["micronumpy", "binascii", "struct"])
def test_integer_types(self):
import numpy as np
_32BIT = np.dtype('int').itemsize == 4
if _32BIT:
assert np.int32 is np.dtype('l').type
assert np.uint32 is np.dtype('L').type
assert np.intp is np.dtype('i').type
assert np.uintp is np.dtype('I').type
assert np.int64 is np.dtype('q').type
assert np.uint64 is np.dtype('Q').type
else:
assert np.int32 is np.dtype('i').type
assert np.uint32 is np.dtype('I').type
assert np.intp is np.dtype('l').type
assert np.uintp is np.dtype('L').type
assert np.int64 is np.dtype('l').type
assert np.uint64 is np.dtype('L').type
assert np.int16 is np.short is np.dtype('h').type
assert np.int_ is np.dtype('l').type
assert np.uint is np.dtype('L').type
assert np.dtype('intp') == np.dtype('int')
assert np.dtype('uintp') == np.dtype('uint')
assert np.dtype('i') is not np.dtype('l') is not np.dtype('q')
assert np.dtype('I') is not np.dtype('L') is not np.dtype('Q')
def test_hierarchy(self):
import numpy
assert issubclass(numpy.float64, numpy.floating)
assert issubclass(numpy.longfloat, numpy.floating)
assert not issubclass(numpy.float64, numpy.longfloat)
assert not issubclass(numpy.longfloat, numpy.float64)
def test_mro(self):
import numpy
assert numpy.int16.__mro__ == (numpy.int16, numpy.signedinteger,
numpy.integer, numpy.number,
numpy.generic, object)
assert numpy.bool_.__mro__ == (numpy.bool_, numpy.generic, object)
def test_init(self):
import numpy as np
import math
import sys
assert np.intp() == np.intp(0)
assert np.intp('123') == np.intp(123)
raises(TypeError, np.intp, None)
assert np.float64() == np.float64(0)
assert math.isnan(np.float64(None))
assert np.bool_() == np.bool_(False)
assert np.bool_('abc') == np.bool_(True)
assert np.bool_(None) == np.bool_(False)
assert np.complex_() == np.complex_(0)
#raises(TypeError, np.complex_, '1+2j')
assert math.isnan(np.complex_(None))
for c in ['i', 'I', 'l', 'L', 'q', 'Q']:
assert np.dtype(c).type().dtype.char == c
for c in ['l', 'q']:
assert np.dtype(c).type(sys.maxint) == sys.maxint
for c in ['L', 'Q']:
assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42
assert np.float32(np.array([True, False])).dtype == np.float32
assert type(np.float32(np.array([True]))) is np.ndarray
assert type(np.float32(1.0)) is np.float32
a = np.array([True, False])
assert np.bool_(a) is a
def test_builtin(self):
import numpy as np
assert int(np.str_('12')) == 12
exc = raises(ValueError, "int(np.str_('abc'))")
assert exc.value.message.startswith('invalid literal for int()')
assert int(np.uint64((2<<63) - 1)) == (2<<63) - 1
exc = raises(ValueError, "int(np.float64(np.nan))")
assert str(exc.value) == "cannot convert float NaN to integer"
exc = raises(OverflowError, "int(np.float64(np.inf))")
assert str(exc.value) == "cannot convert float infinity to integer"
assert int(np.float64(1e100)) == int(1e100)
assert long(np.float64(1e100)) == int(1e100)
assert int(np.complex128(1e100+2j)) == int(1e100)
exc = raises(OverflowError, "int(np.complex64(1e100+2j))")
assert str(exc.value) == "cannot convert float infinity to integer"
assert int(np.str_('100000000000000000000')) == 100000000000000000000
assert long(np.str_('100000000000000000000')) == 100000000000000000000
assert float(np.float64(1e100)) == 1e100
assert float(np.complex128(1e100+2j)) == 1e100
assert float(np.str_('1e100')) == 1e100
assert float(np.str_('inf')) == np.inf
assert str(float(np.float64(np.nan))) == 'nan'
assert oct(np.int32(11)) == '013'
assert oct(np.float32(11.6)) == '013'
assert oct(np.complex64(11-12j)) == '013'
assert hex(np.int32(11)) == '0xb'
assert hex(np.float32(11.6)) == '0xb'
assert hex(np.complex64(11-12j)) == '0xb'
assert bin(np.int32(11)) == '0b1011'
exc = raises(TypeError, "bin(np.float32(11.6))")
assert "index" in exc.value.message
exc = raises(TypeError, "len(np.int32(11))")
assert "has no len" in exc.value.message
assert len(np.string_('123')) == 3
def test_pickle(self):
from numpy import dtype, zeros
import sys
try:
from numpy.core.multiarray import scalar
except ImportError:
# running on dummy module
from numpy import scalar
from cPickle import loads, dumps
i = dtype('int32').type(1337)
f = dtype('float64').type(13.37)
c = dtype('complex128').type(13 + 37.j)
swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s
assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00')))
assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@')))
assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \
swap('\x00\x00\x00\x00\x00\x80B@')))
assert loads(dumps(i)) == i
assert loads(dumps(f)) == f
assert loads(dumps(c)) == c
a = zeros(3)
assert loads(dumps(a.sum())) == a.sum()
def test_round(self):
import numpy as np
i = np.dtype('int32').type(1337)
f = np.dtype('float64').type(13.37)
c = np.dtype('complex128').type(13 + 37.j)
b = np.dtype('bool').type(1)
assert i.round(decimals=-2) == 1300
assert i.round(decimals=1) == 1337
assert c.round() == c
assert f.round() == 13.
assert f.round(decimals=-1) == 10.
assert f.round(decimals=1) == 13.4
raises(TypeError, b.round, decimals=5)
assert f.round(decimals=1, out=None) == 13.4
assert b.round() == 1.0
def test_astype(self):
import numpy as np
a = np.bool_(True).astype(np.float32)
assert type(a) is np.float32
assert a == 1.0
a = np.bool_(True).astype('int32')
assert type(a) is np.int32
assert a == 1
a = np.str_('123').astype('int32')
assert type(a) is np.int32
assert a == 123
def test_copy(self):
import numpy as np
a = np.int32(2)
b = a.copy()
assert type(b) is type(a)
assert b == a
assert b is not a
def test_methods(self):
import numpy as np
for a in [np.int32(2), np.float64(2.0), np.complex64(42)]:
for op in ['min', 'max', 'sum', 'prod']:
assert getattr(a, op)() == a
for op in ['argmin', 'argmax']:
b = getattr(a, op)()
assert type(b) is np.int_
assert b == 0
def test_buffer(self):
import numpy as np
a = np.int32(123)
b = buffer(a)
assert type(b) is buffer
a = np.string_('abc')
b = buffer(a)
assert str(b) == a
def test_byteswap(self):
import numpy as np
assert np.int64(123).byteswap() == 8863084066665136128
a = np.complex64(1+2j).byteswap()
assert repr(a.real).startswith('4.60060')
assert repr(a.imag).startswith('8.96831')
def test_squeeze(self):
import numpy as np
assert np.True_.squeeze() is np.True_
a = np.float32(1.0)
assert a.squeeze() is a
raises(TypeError, a.squeeze, 2)
def test_bitshift(self):
import numpy as np
assert np.int32(123) >> 1 == 61
assert type(np.int32(123) >> 1) is np.int_
assert np.int64(123) << 1 == 246
assert type(np.int64(123) << 1) is np.int64
exc = raises(TypeError, "np.uint64(123) >> 1")
assert 'not supported for the input types' in exc.value.message
def test_attributes(self):
import numpy as np
value = np.dtype('int64').type(12345)
assert value.dtype == np.dtype('int64')
assert value.size == 1
assert value.itemsize == 8
assert value.nbytes == 8
assert value.shape == ()
assert value.strides == ()
assert value.ndim == 0
assert value.T is value
def test_indexing(self):
import numpy as np
v = np.int32(2)
b = v[()]
assert isinstance(b, np.int32)
assert b.shape == ()
assert b == v
b = v[...]
assert isinstance(b, np.ndarray)
assert b.shape == ()
assert b == v
raises(IndexError, "v['blah']")
def test_realimag(self):
import numpy as np
a = np.int64(2)
assert a.real == 2
assert a.imag == 0
a = np.float64(2.5)
assert a.real == 2.5
assert a.imag == 0.0
a = np.complex64(2.5-1.5j)
assert a.real == 2.5
assert a.imag == -1.5
def test_view(self):
import numpy as np
import sys
s = np.dtype('int64').type(12)
exc = raises(ValueError, s.view, 'int8')
assert exc.value[0] == "new type not compatible with array."
t = s.view('double')
assert type(t) is np.double
assert t < 7e-323
t = s.view('complex64')
assert type(t) is np.complex64
if sys.byteorder == 'big':
assert 0 < t.imag < 1
assert t.real == 0
else:
assert 0 < t.real < 1
assert t.imag == 0
exc = raises(TypeError, s.view, 'string')
assert exc.value[0] == "data-type must not be 0-sized"
t = s.view('S8')
assert type(t) is np.string_
if sys.byteorder == 'big':
assert t == '\x00' * 7 + '\x0c'
else:
assert t == '\x0c'
s = np.dtype('string').type('abc1')
assert s.view('S4') == 'abc1'
if '__pypy__' in sys.builtin_module_names:
raises(NotImplementedError, s.view, [('a', 'i2'), ('b', 'i2')])
else:
b = s.view([('a', 'i2'), ('b', 'i2')])
assert b.shape == ()
assert b[0] == 25185
assert b[1] == 12643
if '__pypy__' in sys.builtin_module_names:
raises(TypeError, "np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16)")
else:
s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16)
assert s.view('S16') == 'a' * 16
def test_as_integer_ratio(self):
import numpy as np
raises(AttributeError, 'np.float32(1.5).as_integer_ratio()')
assert np.float64(1.5).as_integer_ratio() == (3, 2)
def test_tostring(self):
import numpy as np
assert np.int64(123).tostring() == np.array(123, dtype='i8').tostring()
assert np.int64(123).tostring('C') == np.array(123, dtype='i8').tostring()
assert np.float64(1.5).tostring() == np.array(1.5, dtype=float).tostring()
exc = raises(TypeError, 'np.int64(123).tostring("Z")')
assert exc.value[0] == 'order not understood'
def test_reshape(self):
import numpy as np
assert np.int64(123).reshape((1,)) == 123
assert np.int64(123).reshape(1).shape == (1,)
assert np.int64(123).reshape((1,)).shape == (1,)
exc = raises(ValueError, "np.int64(123).reshape((2,))")
assert exc.value[0] == 'total size of new array must be unchanged'
assert type(np.int64(123).reshape(())) == np.int64
def test_complex_scalar_complex_cast(self):
import numpy as np
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert hasattr(x, '__complex__') == (tp != np.cdouble)
assert complex(x) == 1+2j
def test_complex_str_format(self):
import numpy as np
for t in [np.complex64, np.complex128]:
assert str(t(complex(1, float('nan')))) == '(1+nan*j)'
assert str(t(complex(1, float('-nan')))) == '(1+nan*j)'
assert str(t(complex(1, float('inf')))) == '(1+inf*j)'
assert str(t(complex(1, float('-inf')))) == '(1-inf*j)'
for x in [0, 1, -1]:
assert str(t(complex(x))) == str(complex(x))
assert str(t(x*1j)) == str(complex(x*1j))
assert str(t(x + x*1j)) == str(complex(x + x*1j))
def test_complex_zero_division(self):
import numpy as np
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert np.isinf(b/a)
b = t(complex(np.inf, np.inf))
assert np.isinf(b/a)
b = t(complex(np.inf, np.nan))
assert np.isinf(b/a)
b = t(complex(np.nan, np.inf))
assert np.isinf(b/a)
b = t(complex(np.nan, np.nan))
assert np.isnan(b/a)
b = t(0.)
assert np.isnan(b/a)
def test_scalar_iter(self):
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128
for t in (int8, int16, int32, int64, float32, float64,
complex64, complex128):
raises(TypeError, iter, t(17))
def test_item_tolist(self):
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128, dtype
def _do_test(np_type, py_type, orig_val, exp_val):
val = np_type(orig_val)
assert val == orig_val
assert val.item() == exp_val
assert val.tolist() == exp_val
assert type(val.item()) is py_type
assert type(val.tolist()) is py_type
val.item(0)
val.item(())
val.item((0,))
raises(ValueError, val.item, 0, 1)
raises(ValueError, val.item, 0, '')
raises(TypeError, val.item, '')
raises(IndexError, val.item, 2)
for t in int8, int16, int32:
_do_test(t, int, 17, 17)
py_type = int if dtype('int').itemsize == 8 else long
_do_test(int64, py_type, 17, 17)
for t in float32, float64:
_do_test(t, float, 17, 17)
for t in complex64, complex128:
_do_test(t, complex, 17j, 17j)
def test_transpose(self):
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128
def _do_test(np_type, orig_val, exp_val):
val = np_type(orig_val)
assert val == orig_val
assert val.transpose() == exp_val
assert type(val.transpose()) is np_type
val.transpose(())
raises(ValueError, val.transpose, 0, 1)
raises(TypeError, val.transpose, 0, '')
raises(ValueError, val.transpose, 0)
for t in int8, int16, int32, int64:
_do_test(t, 17, 17)
for t in float32, float64:
_do_test(t, 17, 17)
for t in complex64, complex128:
_do_test(t, 17j, 17j)
def test_swapaxes(self):
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128
def _do_test(np_type, orig_val, exp_val):
val = np_type(orig_val)
assert val == orig_val
raises(ValueError, val.swapaxes, 10, 20)
raises(ValueError, val.swapaxes, 0, 1)
raises(TypeError, val.swapaxes, 0, ())
for t in int8, int16, int32, int64:
_do_test(t, 17, 17)
for t in float32, float64:
_do_test(t, 17, 17)
for t in complex64, complex128:
_do_test(t, 17j, 17j)
def test_nonzero(self):
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128
for t in (int8, int16, int32, int64, float32, float64,
complex64, complex128):
res, = t(17).nonzero()
assert len(res) == 1
assert res[0] == 0
res, = t(0).nonzero()
assert len(res) == 0
def test_fill(self):
import sys
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128
for t in (int8, int16, int32, int64, float32, float64,
complex64, complex128):
t(17).fill(2)
exc = (TypeError if t in (complex64, complex128)
and '__pypy__' not in sys.builtin_module_names
else ValueError)
raises(exc, t(17).fill, '')
def test_conj(self):
from numpy import int8, int16, int32, int64, float32, float64
from numpy import complex64, complex128
def _do_test(np_type, orig_val, exp_val):
val = np_type(orig_val)
assert val == orig_val
assert val.conj() == exp_val
assert val.conjugate() == exp_val
for t in (int8, int16, int32, int64, float32, float64,
complex64, complex128):
_do_test(t, 17, 17)
for t in complex64, complex128:
_do_test(t, 17j, -17j)
def test_string_boxes(self):
from numpy import str_
assert isinstance(str_(3), str_)
assert str_(3) == '3'
assert str(str_(3)) == '3'
assert repr(str_(3)) == "'3'"
def test_unicode_boxes(self):
from numpy import unicode_
u = unicode_(3)
assert isinstance(u, unicode)
assert u == u'3'
def test_unicode_repr(self):
from numpy import unicode_
u = unicode_(3)
assert str(u) == '3'
assert repr(u) == "u'3'"
u = unicode_(u'Aÿ')
# raises(UnicodeEncodeError, "str(u)") # XXX
assert repr(u) == repr(u'Aÿ')
def test_binop_with_sequence(self):
import numpy as np
c = np.float64(1.) + [1.]
assert isinstance(c, np.ndarray)
assert (c == [2.]).all()
|
bnpy/allocmodel/topics/HDPTopicRestrictedLocalStep2.py | raphael-group/bnpy | 184 | 11076784 | <filename>bnpy/allocmodel/topics/HDPTopicRestrictedLocalStep2.py
import numpy as np
from scipy.special import digamma, gammaln
from bnpy.util import NumericUtil
from bnpy.allocmodel import make_xPiVec_and_emptyPi
def calcDocTopicCountCorrelationFromTargetToAbsorbingSet(
DocTopicMat, ktarget, kabsorbList, MINVAL=1.0e-8):
''' Find correlation in DocTopicCount between target and absorbing states.
Returns
-------
CorrVec : 1D array, size nAbsorbing
CorrVec[j] : correlation value (-1 < corr < 1)
from kabsorbList[j] to the target
'''
D = DocTopicMat.shape[0]
Smat = np.dot(DocTopicMat.T, DocTopicMat)
svec = np.sum(DocTopicMat, axis=0)
nanIDs = np.isnan(Smat)
Smat[nanIDs] = 0
svec[np.isnan(svec)] = 0
offlimitcompIDs = np.logical_or(np.isnan(svec), svec < MINVAL)
CovMat = Smat / D - np.outer(svec / D, svec / D)
varc = np.diag(CovMat)
sqrtc = np.sqrt(varc)
sqrtc[offlimitcompIDs] = MINVAL
assert sqrtc.min() >= MINVAL
CorrMat = CovMat / np.outer(sqrtc, sqrtc)
return CorrMat[kabsorbList, ktarget].copy()
def summarizeRestrictedLocalStep_HDPTopicModel(
Dslice=None,
curModel=None,
curLPslice=None,
curSSwhole=None,
targetUID=None,
ktarget=None,
kabsorbList=None,
xUIDs=None,
xObsModel=None,
xInitSS=None,
doBuildOnInit=False,
xPiVec=None,
curPiVec=None,
emptyPiFrac=0.0,
b_emptyPiFrac=None,
nUpdateSteps=5,
d_initWordCounts='none',
**kwargs):
''' Perform restricted local step and summarize it.
Returns
-------
xSSslice : SuffStatBag
Info : dict with other information
'''
if b_emptyPiFrac is not None:
emptyPiFrac = b_emptyPiFrac
# Translate specififed unique-IDs (UID) into current order IDs
if targetUID is not None:
ktarget = curSSwhole.uid2k(targetUID)
Kfresh = None
if xUIDs is not None:
Kfresh = len(xUIDs)
intersectUIDs = np.intersect1d(xUIDs, curSSwhole.uids)
if intersectUIDs.size > 0:
kabsorbList = list()
for uid in intersectUIDs:
kabsorbList.append(curSSwhole.uid2k(uid))
kabsorbList.sort()
if kabsorbList is not None:
if Kfresh is None:
Kfresh = len(kabsorbList)
else:
assert len(kabsorbList) == Kfresh
if xInitSS is not None:
if Kfresh is None:
Kfresh = xInitSS.K
else:
assert Kfresh == xInitSS.K
if xObsModel is not None:
if Kfresh is None:
Kfresh = xObsModel.K
else:
assert Kfresh == xObsModel.K
# Identify original cluster probabilities for all clusters involved
# And maintain sum constraints within these for proposed state
if curPiVec is None:
curPiVec = curModel.allocModel.get_active_comp_probs()
if kabsorbList is None:
availablePiMass = curPiVec[ktarget]
emptyPi = emptyPiFrac * availablePiMass
else:
availablePiMass = np.sum(curPiVec[kabsorbList]) + curPiVec[ktarget]
emptyPi = 0.0
# Create probabilities for each of the Kfresh new clusters
# by subdividing the involved clusters' original probabilities
if xPiVec is None:
if kabsorbList is None:
xPiVec = ((1-emptyPiFrac) * availablePiMass) / Kfresh * np.ones(Kfresh)
else:
xPiVec = curPiVec[kabsorbList].copy()
xPiVec /= xPiVec.sum()
xPiVec *= availablePiMass
assert np.allclose(availablePiMass, emptyPi + np.sum(xPiVec))
assert Kfresh == xPiVec.size
# Create expansion observation model, if necessary
if xObsModel is None:
# # Create expanded observation model
xObsModel = curModel.obsModel.copy()
# # Verify provided initialization-stats exist
# # Otherwise, there's no way to initialize the new obsmodel
assert xInitSS is not None
# # Optionally, for delete moves smartly initialize absorbing clusters
if kabsorbList is not None:
isMult = curModel.getObsModelName().count('Mult')
if not doBuildOnInit and isMult and d_initWordCounts.count('corr'):
corrVec = calcDocTopicCountCorrelationFromTargetToAbsorbingSet(
curLPslice['DocTopicCount'], ktarget, kabsorbList)
bestAbsorbIDs = np.flatnonzero(corrVec >= .001)
for k in bestAbsorbIDs:
xInitSS.WordCounts[k,:] += curSSwhole.WordCounts[ktarget,:]
# # Initialize xObsModel using provided stats
xObsModel.update_global_params(xInitSS)
assert Kfresh == xObsModel.K
# Perform restricted inference!
# xLPslice contains local params for all Kfresh expansion clusters
xalphaPi = curModel.allocModel.alpha * xPiVec
xLPslice = restrictedLocalStep_HDPTopicModel(
Dslice=Dslice,
curLPslice=curLPslice,
ktarget=ktarget,
kabsorbList=kabsorbList,
xObsModel=xObsModel,
xalphaPi=xalphaPi,
thetaEmptyComp=curModel.allocModel.alpha * emptyPi,
nUpdateSteps=nUpdateSteps,
doBuildOnInit=doBuildOnInit,
xInitSS=xInitSS,
**kwargs)
if emptyPiFrac > 0:
assert "HrespOrigComp" in xLPslice
# Summarize this expanded local parameter pack
xSSslice = curModel.get_global_suff_stats(
Dslice, xLPslice,
trackDocUsage=1, doPrecompEntropy=1, doTrackTruncationGrowth=1)
if xUIDs is not None:
xSSslice.setUIDs(xUIDs)
assert xSSslice.hasELBOTerm("Hresp")
if emptyPiFrac > 0:
assert xSSslice.hasELBOTerm("HrespEmptyComp")
# Prepare dict of info for debugging/inspection
Info = dict()
Info['Kfresh'] = xPiVec.size
Info['xLPslice'] = xLPslice
Info['xPiVec'] = xPiVec
Info['emptyPi'] = emptyPi
return xSSslice, Info
def restrictedLocalStep_HDPTopicModel(
Dslice=None,
curLPslice=None,
ktarget=0,
kabsorbList=None,
xObsModel=None,
xalphaPi=None,
nUpdateSteps=3,
doBuildOnInit=False,
convThr=0.5,
thetaEmptyComp=0.0,
**kwargs):
''' Compute local parameters for HDPTopicModel via restricted local step.
Returns
-------
xLPslice : dict with updated fields
Fields with learned values
* resp : N x Kfresh
* DocTopicCount : nDoc x Kfresh
* theta : nDoc x Kfresh
* ElogPi : nDoc x Kfresh
Fields copied directly from curLPslice
* digammaSumTheta : 1D array, size nDoc
* thetaRem : scalar
* ElogPiRem : scalar
'''
if doBuildOnInit:
xWholeSS = xInitSS.copy()
Kfresh = xObsModel.K
assert Kfresh == xalphaPi.size
xLPslice = dict()
# Default warm_start initialization for DocTopicCount
# by copying the previous counts at all absorbing states
if kabsorbList is None:
xLPslice['DocTopicCount'] = np.zeros((Dslice.nDoc, Kfresh))
xLPslice['resp'] = np.zeros((
curLPslice['resp'].shape[0], Kfresh))
else:
# Initialize DocTopicCounts by copying those from absorbing states
xLPslice['DocTopicCount'] = \
curLPslice['DocTopicCount'][:, kabsorbList].copy()
# Initialize resp by copying existing resp for absorbing state
# Note: this is NOT consistent with some docs in DocTopicCount
# but that will get fixed by restricted step
xLPslice['resp'] = \
curLPslice['resp'][:, kabsorbList].copy()
xLPslice['theta'] = \
xLPslice['DocTopicCount'] + xalphaPi[np.newaxis,:]
xLPslice['_nIters'] = -1 * np.ones(Dslice.nDoc)
xLPslice['_maxDiff'] = -1 * np.ones(Dslice.nDoc)
for step in range(nUpdateSteps):
# Compute conditional likelihoods for every data atom
xLPslice = xObsModel.calc_local_params(Dslice, xLPslice)
assert 'E_log_soft_ev' in xLPslice
assert 'obsModelName' in xLPslice
# Fill in these fields, one doc at a time
for d in range(Dslice.nDoc):
xLPslice = restrictedLocalStepForSingleDoc_HDPTopicModel(
d=d,
Dslice=Dslice,
curLPslice=curLPslice,
xLPslice=xLPslice,
ktarget=ktarget,
kabsorbList=kabsorbList,
xalphaPi=xalphaPi,
thetaEmptyComp=thetaEmptyComp,
**kwargs)
isLastStep = step == nUpdateSteps - 1
if not isLastStep:
xSS = xObsModel.calcSummaryStats(Dslice, None, xLPslice)
# Increment
if doBuildOnInit:
xSS.setUIDs(xWholeSS.uids)
xWholeSS += xSS
else:
xWholeSS = xSS
# Global step
xObsModel.update_global_params(xWholeSS)
# Decrement stats
if doBuildOnInit:
xWholeSS -= xSS
# Assess early stopping
if step > 0:
thr = np.sum(np.abs(prevCountVec - xSS.getCountVec()))
if thr < convThr:
break
prevCountVec = xSS.getCountVec()
# Compute other LP quantities related to log prob (topic | doc)
# and fill these into the expanded LP dict
digammaSumTheta = curLPslice['digammaSumTheta'].copy()
xLPslice['digammaSumTheta'] = digammaSumTheta
xLPslice['ElogPi'] = \
digamma(xLPslice['theta']) - digammaSumTheta[:, np.newaxis]
xLPslice['thetaRem'] = curLPslice['thetaRem'].copy()
xLPslice['ElogPiRem'] = curLPslice['ElogPiRem'].copy()
# Compute quantities related to leaving ktarget almost empty,
# as we expand and transfer mass to other comps
if thetaEmptyComp > 0:
ElogPiEmptyComp = digamma(thetaEmptyComp) - digammaSumTheta
xLPslice['thetaEmptyComp'] = thetaEmptyComp
xLPslice['ElogPiEmptyComp'] = ElogPiEmptyComp
# Compute quantities related to OrigComp, the original target cluster.
# These need to be tracked and turned into relevant summaries
# so that they can be used to created a valid proposal state "propSS"
xLPslice['ElogPiOrigComp'] = curLPslice['ElogPi'][:, ktarget]
xLPslice['gammalnThetaOrigComp'] = \
np.sum(gammaln(curLPslice['theta'][:, ktarget]))
slack = curLPslice['DocTopicCount'][:, ktarget] - \
curLPslice['theta'][:, ktarget]
xLPslice['slackThetaOrigComp'] = np.sum(
slack * curLPslice['ElogPi'][:, ktarget])
if hasattr(Dslice, 'word_count') and \
xLPslice['resp'].shape[0] == Dslice.word_count.size:
xLPslice['HrespOrigComp'] = -1 * NumericUtil.calcRlogRdotv(
curLPslice['resp'][:, ktarget], Dslice.word_count)
else:
xLPslice['HrespOrigComp'] = -1 * NumericUtil.calcRlogR(
curLPslice['resp'][:, ktarget])
return xLPslice
def restrictedLocalStepForSingleDoc_HDPTopicModel(
d=0,
Dslice=None,
curLPslice=None,
ktarget=0,
kabsorbList=None,
xalphaPi=None,
xLPslice=None,
LPkwargs=dict(),
d_initTargetDocTopicCount="warm_start",
thetaEmptyComp=0.0,
**kwargs):
''' Perform restricted local step on one document.
Returns
-------
xLPslice : dict with updated entries related to document d
* resp
* DocTopicCount
* theta
'''
# Verify we have likelihoods
assert 'E_log_soft_ev' in xLPslice
assert 'obsModelName' in xLPslice
obsModelName = xLPslice['obsModelName']
# Verify prior
Kfresh = xalphaPi.size
assert xLPslice['E_log_soft_ev'].shape[1] == Kfresh
if hasattr(Dslice, 'word_count') and obsModelName.count('Bern'):
raise ValueError("TODO")
start = Dslice.doc_range[d]
stop = Dslice.doc_range[d+1]
if kabsorbList is None:
constrained_sumTheta_d = curLPslice['theta'][d,ktarget]
# Establish the total mass we must reallocate
constrained_sumResp_d = curLPslice['resp'][start:stop,ktarget]
else:
constrained_sumTheta_d = curLPslice['theta'][d,ktarget] + \
np.sum(curLPslice['theta'][d, kabsorbList])
# Establish the total mass we must reallocate
constrained_sumResp_d = curLPslice['resp'][start:stop,ktarget] + \
np.sum(curLPslice['resp'][start:stop, kabsorbList], axis=1)
mask_d = np.arange(stop-start)
if mask_d.size == 0:
return xLPslice
# Compute the conditional likelihood matrix for the target atoms
# xCLik_d will always have an entry equal to one.
if mask_d.size > 0:
xCLik_d = xLPslice['E_log_soft_ev'][start + mask_d].copy()
xCLik_d -= np.max(xCLik_d, axis=1)[:,np.newaxis]
# Protect against underflow
np.maximum(xCLik_d, -300, out=xCLik_d)
np.exp(xCLik_d, out=xCLik_d)
if hasattr(Dslice, 'word_count') and obsModelName.count('Mult'):
wc_d = Dslice.word_count[start + mask_d]
wc_d *= constrained_sumResp_d[mask_d]
else:
wc_d = constrained_sumResp_d[mask_d].copy()
# Initialize doc-topic counts
prevxDocTopicCount_d = -1 * np.ones(Kfresh)
xDocTopicCount_d = xLPslice['DocTopicCount'][d, :].copy()
if kabsorbList is None:
doWarmStart = False # always cold start for birth move
else:
fracTargetMass_d = curLPslice['DocTopicCount'][d,ktarget] \
/ curLPslice['DocTopicCount'][d,:].sum()
if fracTargetMass_d >= 0.05:
doWarmStart = d_initTargetDocTopicCount.count("warm_start")
else:
doWarmStart = True
if doWarmStart:
# Initialize xDocTopicProb_d
xDocTopicProb_d = xDocTopicCount_d + xalphaPi
digamma(xDocTopicProb_d, out=xDocTopicProb_d)
np.maximum(xDocTopicProb_d, -300, out=xDocTopicProb_d)
np.exp(xDocTopicProb_d, out=xDocTopicProb_d)
else:
# Cold start! xDocTopicProb_d[k] \approx alpha * Prob[k]
xDocTopicProb_d = xalphaPi.copy()
# Initialize xsumResp_d
xsumResp_d = np.zeros(xCLik_d.shape[0])
np.dot(xCLik_d, xDocTopicProb_d, out=xsumResp_d)
maxDiff_d = -1
for riter in range(LPkwargs['nCoordAscentItersLP']):
# Update DocTopicCount_d
np.dot(wc_d / xsumResp_d, xCLik_d,
out=xDocTopicCount_d)
xDocTopicCount_d *= xDocTopicProb_d
# Update xDocTopicProb_d
np.add(xDocTopicCount_d, xalphaPi,
out=xDocTopicProb_d)
digamma(xDocTopicProb_d, out=xDocTopicProb_d)
# Protect against underflow
np.maximum(xDocTopicProb_d, -300, out=xDocTopicProb_d)
np.exp(xDocTopicProb_d, out=xDocTopicProb_d)
assert np.min(xDocTopicProb_d) > 0.0
# Update xsumResp_d
np.dot(xCLik_d, xDocTopicProb_d, out=xsumResp_d)
# Check for convergence
if riter % 5 == 0:
maxDiff_d = np.max(np.abs(
prevxDocTopicCount_d - xDocTopicCount_d))
if maxDiff_d < LPkwargs['convThrLP']:
break
# Track previous DocTopicCount
prevxDocTopicCount_d[:] = xDocTopicCount_d
# Update xResp_d
assert np.all(np.isfinite(xDocTopicCount_d))
xResp_d = xCLik_d
xResp_d *= xDocTopicProb_d[np.newaxis, :]
xResp_d /= xsumResp_d[:, np.newaxis]
# Here, sum of each row of xResp_d is equal to 1.0
# Need to make sum of each row equal mass on target cluster
xResp_d *= constrained_sumResp_d[mask_d,np.newaxis]
np.maximum(xResp_d, 1e-100, out=xResp_d)
# Right here, xResp_d and xDocTopicProb_d
# are exactly equal to one fwd step from the current xDocTopicCount_d
# So, we can use our short-cut ELBO calculation.
if False:
#curLPslice['DocTopicCount'][d, ktarget] > 10.0
L_doc_theta = np.sum(gammaln(xDocTopicCount_d + xalphaPi)) \
- np.inner(xDocTopicCount_d, np.log(xDocTopicProb_d))
L_doc_resp = np.inner(wc_d, np.log(xsumResp_d))
L_doc = L_doc_resp + L_doc_theta
#print "d=%3d L_d=% .4e" % (d, L_doc)
#print " ".join(["%6.1f" % (x) for x in xDocTopicCount_d])
#xLPslice['L_doc'] = L_doc
# Pack up into final LP dict
# Taking one forward step so xDocTopicCount_d is consistent with xResp_d
xLPslice['resp'][start+mask_d] = xResp_d
if hasattr(Dslice, 'word_count') and obsModelName.count('Mult'):
xDocTopicCount_d = np.dot(Dslice.word_count[start+mask_d], xResp_d)
else:
xDocTopicCount_d = np.sum(xResp_d, axis=0)
xLPslice['DocTopicCount'][d, :] = xDocTopicCount_d
xLPslice['theta'][d, :] = xalphaPi + xDocTopicCount_d
xLPslice['_nIters'][d] = riter
xLPslice['_maxDiff'][d] = maxDiff_d
# Final verifcation that output meets required constraints
respOK = np.allclose(
xLPslice['resp'][start:stop].sum(axis=1),
constrained_sumResp_d,
atol=0.0001,
rtol=0)
assert respOK
thetaOK = np.allclose(
xLPslice['theta'][d, :].sum() + thetaEmptyComp,
constrained_sumTheta_d,
atol=0.0001,
rtol=0)
assert thetaOK
# That's all folks
return xLPslice
|
FastSimulation/Tracking/python/electronCkfTrackCandidates_cff.py | ckamtsikis/cmssw | 852 | 11076795 | import FWCore.ParameterSet.Config as cms
import FastSimulation.Tracking.TrackCandidateProducer_cfi
electronCkfTrackCandidates = FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone(
src = cms.InputTag("electronMergedSeeds"),
MinNumberOfCrossedLayers = 5,
OverlapCleaning = True
)
|
tests/test_template.py | mkohler/hovercraft | 1,089 | 11076800 | import os
import unittest
from lxml import etree
from hovercraft.template import (
Template,
CSS_RESOURCE,
JS_RESOURCE,
JS_POSITION_BODY,
JS_POSITION_HEADER,
)
TEST_DATA = os.path.join(os.path.split(__file__)[0], "test_data")
class TemplateInfoTests(unittest.TestCase):
"""Tests that template information is correctly parsed"""
def test_template_paths(self):
# You can specify a folder or a cfg file and that's the same thing.
template_info1 = Template(os.path.join(TEST_DATA, "minimal"))
template_info2 = Template(os.path.join(TEST_DATA, "minimal", "template.cfg"))
self.assertEqual(
etree.tostring(template_info1.xml_node()),
etree.tostring(template_info2.xml_node()),
)
def test_template_minimal(self):
template_info = Template(os.path.join(TEST_DATA, "minimal"))
with open(os.path.join(TEST_DATA, "minimal", "template.xsl"), "rb") as xslfile:
xsl = xslfile.read()
self.assertEqual(template_info.xsl, xsl)
template_files = [each.filepath for each in template_info.resources]
self.assertIn("js/impress.js", template_files)
self.assertIn("js/hovercraft-minimal.js", template_files)
css_files = list(
each.filepath
for each in template_info.resources
if each.resource_type == CSS_RESOURCE
)
self.assertEqual(len(css_files), 0)
self.assertEqual(template_info.doctype, b"<!DOCTYPE html>")
def test_template_maximal(self):
template_info = Template(os.path.join(TEST_DATA, "maximal"))
with open(os.path.join(TEST_DATA, "maximal", "template.xsl"), "rb") as xslfile:
xsl = xslfile.read()
self.assertEqual(template_info.xsl, xsl)
template_files = [each.filepath for each in template_info.resources]
self.assertIn("images/hovercraft_logo.png", template_files)
self.assertIn("js/impress.js", template_files)
self.assertIn("js/impressConsole.js", template_files)
self.assertIn("js/hovercraft.js", template_files)
js_bodies = [
each.filepath
for each in template_info.resources
if each.resource_type == JS_RESOURCE and each.extra_info == JS_POSITION_BODY
]
self.assertIn("js/impress.js", js_bodies)
self.assertIn("js/impressConsole.js", js_bodies)
self.assertIn("js/hovercraft.js", js_bodies)
js_headers = [
each.filepath
for each in template_info.resources
if each.resource_type == JS_RESOURCE
and each.extra_info == JS_POSITION_HEADER
]
self.assertIn("js/dummy.js", js_headers)
self.assertEqual(template_info.resources[0].filepath, "css/style.css")
self.assertEqual(template_info.resources[0].extra_info, "all")
self.assertEqual(template_info.resources[1].filepath, "css/print.css")
self.assertEqual(template_info.resources[1].extra_info, "print")
self.assertEqual(template_info.resources[2].filepath, "css/impressConsole.css")
self.assertEqual(template_info.resources[2].extra_info, "screen,projection")
self.assertEqual(
template_info.doctype, b'<!DOCTYPE html SYSTEM "about:legacy-compat">'
)
class TemplateInfoNodeTests(unittest.TestCase):
"""Tests that template information is correctly made into an xml nodes"""
def test_minimal_template(self):
template_info = Template(os.path.join(TEST_DATA, "minimal"))
node = template_info.xml_node()
self.assertEqual(
etree.tostring(node),
(
b"<templateinfo><header/><body>"
b'<js src="js/impress.js"/><js src="js/hovercraft-minimal.js"/>'
b"</body></templateinfo>"
),
)
def test_maximal_template(self):
template_info = Template(os.path.join(TEST_DATA, "maximal"))
node = template_info.xml_node()
self.assertEqual(
etree.tostring(node),
(
b"<templateinfo><header>"
b'<css href="css/style.css" media="all"/>'
b'<css href="css/print.css" media="print"/>'
b'<css href="css/impressConsole.css" media="screen,projection"/>'
b'<js src="js/dummy.js"/></header>'
b'<body><js src="js/impress.js"/><js src="js/impressConsole.js"/>'
b'<js src="js/hovercraft.js"/>'
b"</body></templateinfo>"
),
)
if __name__ == "__main__":
unittest.main()
|
python/mxnet_benchmarks/models/resnet.py | joehandzik/dlcookbook-dlbs | 123 | 11076804 | <filename>python/mxnet_benchmarks/models/resnet.py
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This implementation
https://github.com/apache/incubator-mxnet/blob/master/example/image-classification/symbols/resnet.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
from mxnet_benchmarks.models.model import Model, Layers
class ResNet(Model):
implements = [
'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200', 'resnet269'
]
specs = {
'resnet18': {'name': 'ResNet18', 'units': [2, 2, 2, 2], 'num_layers': 18}, # pylint: disable=C0326
'resnet34': {'name': 'ResNet34', 'units': [3, 4, 6, 3], 'num_layers': 34}, # pylint: disable=C0326
'resnet50': {'name': 'ResNet50', 'units': [3, 4, 6, 3], 'num_layers': 50}, # pylint: disable=C0326
'resnet101': {'name': 'ResNet101', 'units': [3, 4, 23, 3], 'num_layers': 101}, # pylint: disable=C0326
'resnet152': {'name': 'ResNet152', 'units': [3, 8, 36, 3], 'num_layers': 152}, # pylint: disable=C0326
'resnet200': {'name': 'ResNet200', 'units': [3, 24, 36, 3], 'num_layers': 200}, # pylint: disable=C0326
'resnet269': {'name': 'ResNet269', 'units': [3, 30, 48, 8], 'num_layers': 269} # pylint: disable=C0326
}
@property
def output(self):
return self.__output
def residual_unit(self, data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256,
memonger=False, cudnn_bn_off=False, fuse_bn_relu=False, fuse_bn_add_relu=False):
"""Return ResNet Unit symbol for building ResNet
Args:
data: Input tensor to this residual unit.
num_filter (int): Number of filters.
stride (tuple): Stride used in convolution
dim_match (boolean): True means channel number between input and output is the same, otherwise means differ.
name (str): Base name of the module
workspace (int): Workspace used in convolution operator
cudnn_bn_off (boolean): Do not use CUDNN for batch norm operator
fuse_bn_relu (boolean): Use fused implementation of batch norm and activation, only available in NGC
containers.
fuse_bn_add_relu (boolean): Use fused implementation. Only available in NGC containers.
Returns:
Output tensor
Since new layer factory is used, depending on the runtime (like specific NGC containers), neural net operators
may have additional parameters.
"""
act = 'relu' if fuse_bn_relu else None
if bottle_neck:
# Branch 1
if dim_match:
shortcut = data
else:
shortcut = self.layers.Convolution(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride,
no_bias=True, workspace=workspace, name=name+'_sc_conv',)
shortcut = self.layers.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom,
name=name + '_sc_bn', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
# Branch 2
# Block 2A
conv1 = self.layers.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1, 1), stride=(1, 1),
pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = self.layers.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1',
cudnn_off=cudnn_bn_off, act_type=act)
act1 = self.layers.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
# Block 2B
conv2 = self.layers.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3, 3), stride=stride,
pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = self.layers.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2',
cudnn_off=cudnn_bn_off, act_type=act)
act2 = self.layers.Activation(data=bn2, act_type='relu', name=name + '_relu2') if not fuse_bn_relu else bn2
# Block 3B
conv3 = self.layers.Convolution(data=act2, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv3')
if fuse_bn_add_relu:
return self.layers.BatchNormAddRelu(data=conv3, addend=shortcut, axis=1, fix_gamma=False, eps=2e-5,
momentum=bn_mom, cudnn_off=cudnn_bn_off)
else:
bn3 = self.layers.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3',
cudnn_off=cudnn_bn_off)
# Element-wise summation and ReLU
return self.layers.Activation(data=shortcut + bn3, act_type='relu', name=name + '_relu')
else:
# Branch 1
if dim_match:
shortcut = data
else:
shortcut = self.layers.Convolution(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride,
no_bias=True, workspace=workspace, name=name+'_sc_conv')
shortcut = self.layers.BatchNorm(data=shortcut, fix_gamma=False, momentum=bn_mom, eps=2e-5,
name=name + '_sc_bn', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
# Branch 2
# Block 2A
conv1 = self.layers.Convolution(data=data, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = self.layers.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1',
cudnn_off=cudnn_bn_off, act_type=act)
act1 = self.layers.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
# Block 2B
conv2 = self.layers.Convolution(data=act1, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if fuse_bn_add_relu:
return self.layers.BatchNormAddRelu(data=conv2, addend=shortcut, axis=1, fix_gamma=False, eps=2e-5,
momentum=bn_mom, cudnn_off=cudnn_bn_off)
else:
bn2 = self.layers.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2',
cudnn_off=cudnn_bn_off)
# Element-wise summation and ReLU
return self.layers.Activation(data=shortcut + bn2, act_type='relu', name=name + '_relu')
def resnet(self, units, num_stages, filter_list, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False,
cudnn_bn_off=False, fuse_bn_relu=False, fuse_bn_add_relu=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
"""
act = 'relu' if fuse_bn_relu else None
num_unit = len(units)
assert num_unit == num_stages
v = self.add_data_node()
v = Layers.conv_transform_layout(v, self.params['input_layout'], self.params['model_layout'])
v = self.layers.Convolution(data=v, num_filter=filter_list[0], kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
v = self.layers.BatchNorm(data=v, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0',
cudnn_off=cudnn_bn_off, act_type=act)
if not fuse_bn_relu:
v = self.layers.Activation(data=v, act_type='relu', name='relu0')
v = self.layers.Pooling(data=v, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max')
for i in range(num_stages):
v = self.residual_unit(v, filter_list[i+1], (1 if i == 0 else 2, 1 if i == 0 else 2),
False, name='stage%d_unit%d' % (i + 1, 1),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger,
cudnn_bn_off=cudnn_bn_off, fuse_bn_relu=fuse_bn_relu,
fuse_bn_add_relu=fuse_bn_add_relu)
for j in range(units[i]-1):
v = self.residual_unit(v, filter_list[i+1], (1, 1), True,
name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger,
cudnn_bn_off=cudnn_bn_off, fuse_bn_relu=fuse_bn_relu,
fuse_bn_add_relu=fuse_bn_add_relu)
# Although kernel is not used here when global_pool=True, we should put one
v = self.layers.Pooling(data=v, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
v = mx.sym.Flatten(data=v)
v = self.add_head_nodes(v)
return v
def __init__(self, params):
specs = ResNet.specs[params['model']]
Model.check_parameters(
params,
{'name': specs['name'], 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32',
'input_layout': 'NCHW', 'model_layout': 'NCHW', 'nvidia_layers': False,
'workspace': 1024}
)
params['input_shape'] = Model.conv_shape(3, (224, 224), params['input_layout'])
Model.__init__(self, params)
self.params = params
self.layers = Layers(params)
# Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
# Original author <NAME>
# Some optimizations are taken from NVIDIA code from NGC containers.
if specs['num_layers'] >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
self.__output = self.resnet(
units=specs['units'],
num_stages=4,
filter_list=filter_list,
bottle_neck=bottle_neck,
workspace=params['workspace'],
fuse_bn_add_relu=params['nvidia_layers'],
fuse_bn_relu=params['nvidia_layers']
)
|
mlcomp/server/back/create_dags/copy.py | lightforever/kaggler | 166 | 11076827 | import hashlib
import re
from mlcomp.utils.config import merge_dicts_smart
from mlcomp.utils.io import yaml_load, yaml_dump
from mlcomp.db.core import Session
from mlcomp.db.enums import ComponentType, TaskStatus
from mlcomp.db.models import Dag, Task, TaskDependence, DagStorage, File
from mlcomp.db.providers import DagProvider, TaskProvider, DagStorageProvider, \
FileProvider
from mlcomp.utils.misc import now
class DagCopyBuilder:
def __init__(
self,
session: Session,
dag: int,
file_changes: str = '',
dag_suffix: str = '',
logger=None,
component: ComponentType = None
):
self.dag = dag
self.file_changes = file_changes
self.session = session
self.logger = logger
self.component = component
self.dag_suffix = dag_suffix
self.dag_db = None
self.dag_provider = None
self.task_provider = None
self.file_provider = None
self.dag_storage_provider = None
def log_info(self, message: str):
if self.logger:
self.logger.info(message, self.component)
def create_providers(self):
self.log_info('create_providers')
self.dag_provider = DagProvider(self.session)
self.task_provider = TaskProvider(self.session)
self.file_provider = FileProvider(self.session)
self.dag_storage_provider = DagStorageProvider(self.session)
def create_dag(self):
dag = self.dag_provider.by_id(self.dag)
name = dag.name
if self.dag_suffix:
name += ' ' + self.dag_suffix
dag_new = Dag(name=name, created=now(), config=dag.config,
project=dag.project, docker_img=dag.docker_img,
img_size=0, file_size=0, type=dag.type)
self.dag_provider.add(dag_new)
self.dag_db = dag_new
def find_replace(self, changes: dict, path: str):
for k, v in changes.items():
if not re.match(k, path):
continue
return v
def create_tasks(self):
tasks = self.task_provider.by_dag(self.dag)
tasks_new = []
tasks_old = []
for t in tasks:
if t.parent:
continue
task = Task(name=t.name, status=TaskStatus.NotRan.value,
computer=t.computer, gpu=t.gpu, gpu_max=t.gpu_max,
cpu=t.cpu, executor=t.executor, memory=t.memory,
steps=t.steps, dag=self.dag_db.id, debug=t.debug,
type=t.type,
)
task.additional_info = t.additional_info
tasks_new.append(task)
tasks_old.append(t)
self.task_provider.bulk_save_objects(tasks_new, return_defaults=True)
old2new = {t_old.id: t_new.id for t_new, t_old in
zip(tasks_new, tasks_old)}
dependencies = self.task_provider.get_dependencies(self.dag)
dependencies_new = []
for d in dependencies:
d_new = TaskDependence(task_id=old2new[d.task_id],
depend_id=old2new[d.depend_id])
dependencies_new.append(d_new)
self.task_provider.bulk_save_objects(dependencies_new,
return_defaults=False)
changes = yaml_load(self.file_changes)
storages = self.dag_storage_provider.by_dag(self.dag)
storages_new = []
for s, f in storages:
if not isinstance(changes, dict):
continue
replace = self.find_replace(changes, s.path)
if replace is not None and f:
content = f.content.decode('utf-8')
if s.path.endswith('.yml'):
data = yaml_load(content)
data = merge_dicts_smart(data, replace)
content = yaml_dump(data)
else:
for k, v in replace:
if k not in content:
raise Exception(f'{k} is not in the content')
content = content.replace(k, v)
content = content.encode('utf-8')
md5 = hashlib.md5(content).hexdigest()
f = self.file_provider.by_md5(md5)
if not f:
f = File(
content=content,
created=now(),
project=self.dag_db.project,
md5=md5,
dag=self.dag_db.id
)
self.file_provider.add(f)
s_new = DagStorage(dag=self.dag_db.id, file=f.id, path=s.path,
is_dir=s.is_dir)
storages_new.append(s_new)
self.dag_storage_provider.bulk_save_objects(
storages_new,
return_defaults=False
)
def build(self):
self.create_providers()
self.create_dag()
self.create_tasks()
def dag_copy(session: Session, dag: int, file_changes: str = '',
dag_suffix: str = ''):
builder = DagCopyBuilder(session, dag=dag, file_changes=file_changes,
dag_suffix=dag_suffix)
builder.build()
__all__ = ['dag_copy']
|
suite/synctools/register.py | akihikodaki/libcapstone | 127 | 11076838 | #!/usr/bin/python
# print out all registers from LLVM GenRegisterInfo.inc for Capstone disassembler.
# NOTE: the list then must be filtered, manually.
# by <NAME>, 2019
import sys
if len(sys.argv) == 1:
print("Syntax: %s <GenRegisterInfo.inc> <architecture>" %sys.argv[0])
sys.exit(1)
f = open(sys.argv[1])
lines = f.readlines()
f.close()
arch = sys.argv[2].upper()
enum_count = 0
# 1st enum is register enum
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if line.strip() == 'enum {':
enum_count += 1
continue
if enum_count == 1:
if line == '};':
# done with first enum
break
else:
# enum items
if 'NoRegister' in line or 'TARGET_REGS' in line:
continue
reg = line.strip().split('=')[0].strip()
if reg.startswith('H') or reg.endswith('PH') or or reg.endswith('IH') or or reg.endswith('WH'):
print(" %s_REG_%s = REMOVE," %(arch, reg))
elif 'K' in reg or 'BND' in reg:
print(" %s_REG_%s = REMOVE," %(arch, reg))
elif reg in ('DF', 'SSP', 'R8BH', 'R9BH', 'R10BH', 'R11BH', 'R12BH', 'R13BH', 'R14BH', 'R15BH'):
print(" %s_REG_%s = REMOVE," %(arch, reg))
else:
print(" %s_REG_%s," %(arch, reg))
|
scripts/data_extractor.py | GarfieldTheLightning/Dynamic-Pokemon-Expansion | 139 | 11076856 | <reponame>GarfieldTheLightning/Dynamic-Pokemon-Expansion
#!/usr/bin/env python3
import os
############
#Options go here.
############
ROM_NAME = "BPRE0.gba" #the name of your rom
NumberOfPokemon = 440 #Change to total number of species in original rom
OutputFolder = os.getcwd() + "/extracted/"
SpeciesDefines = os.getcwd() + "/include/species.h"
############
#Code
############
def GeneralTableExtractor(dynamicOffset, definesDict, definesDict2, outputPath, tableType, tableName,
indexOffset, tableLengthName, startIndex, numEntries, entryLength, alignData):
output = open(outputPath, 'w')
startIndexName = ""
if startIndex != 0:
startIndexName = " - " + indexOffset
dataAlignment = 0
if alignData:
dataAlignment = GetLengthOfLongestValue(definesDict)
with open(ROM_NAME, 'rb') as binary_file:
#Load Dynamic Pointer to Table
binary_file.seek(dynamicOffset)
line = binary_file.read(3)
TablePointer = ExtractPointer(line)
output.write('#include "defines.h"\n\n')
output.write(tableType + ' ' + tableName + '[' + tableLengthName + startIndexName + '] =\n{\n')
for i in range(startIndex, numEntries):
binary_file.seek(TablePointer + ((i - startIndex) * entryLength))
byteList = binary_file.read(entryLength)
if i in definesDict:
output.write('\t[' + definesDict[i] + startIndexName + '] = ')
lenEntry = len(definesDict[i] + startIndexName)
else:
output.write('\t[' + str(i) + startIndexName + '] = ')
lenEntry = len(str(i) + startIndexName)
while lenEntry < dataAlignment: #Align Data
output.write(' ')
lenEntry += 1
data = ExtractPointer(byteList)
if definesDict2 is not False and data in definesDict2:
data = definesDict2[data]
else:
data = hex(data)
output.write(data + ',\n')
output.write('};\n')
output.close()
print("Success!")
def GeneralStructTableExtractor(dynamicOffset, definesDict, outputPath, tableType, tableName, tableLengthName,
numEntries, structLength, memberNames, memberLengths, memberDicts, useMemberNames,
loadDictDataFromIndex, offsetForForce):
assert(len(memberNames) == len(memberLengths) or not useMemberNames)
output = open(outputPath, 'w')
with open(ROM_NAME, 'rb') as binary_file:
#Load Dynamic Pointer to Table
binary_file.seek(dynamicOffset)
line = binary_file.read(3)
TablePointer = ExtractPointer(line)
output.write('#include "defines.h"\n\n')
output.write(tableType + ' ' + tableName + '[' + tableLengthName + '] =\n{\n')
for i in range(numEntries):
binary_file.seek(TablePointer + (i * structLength))
byteList = binary_file.read(structLength)
lenName = 0
if i in definesDict:
output.write('\t[' + definesDict[i] + '] =')
lenName = len(definesDict[i])
else:
output.write('\t[' + str(i) + '] =')
lenName = len(str(i))
if useMemberNames:
output.write('\n\t{\n')
else:
while lenName < 28: #Align structs
output.write(' ')
lenName += 1
output.write(' {')
for j in range(len(memberLengths)):
data = int(ExtractPointer(byteList[:memberLengths[j]]))
if memberDicts != [] and memberDicts[j] is not False:
if loadDictDataFromIndex:
if i in memberDicts[j]:
data = memberDicts[j][i]
else:
data = hex(data)
if offsetForForce > 0:
data += ' + ' + str(offsetForForce)
elif data in memberDicts[j]:
data = memberDicts[j][data]
else:
data = hex(data)
else:
data = hex(data)
if useMemberNames:
output.write('\t\t.' + memberNames[j] + ' = ' + str(data) + ',\n')
elif j + 1 < len(memberLengths):
output.write(str(data) + ', ')
else:
output.write(str(data))
byteList = byteList[memberLengths[j]:]
if i + 1 == numEntries: #Last iteration of loop
if useMemberNames:
output.write('\t},\n};\n')
else:
output.write('},\n};\n')
else:
if useMemberNames:
output.write('\t},\n')
else:
output.write('},\n')
output.close()
print("Success!")
def ExtractPointer(line):
offset = 0
for i in range(len(line)):
offset += (line[i] << (8 * i))
return offset
def DefinesDictMaker(definesFile):
definesDict = {}
with open(definesFile, 'r') as file:
for line in file:
if '#define ' in line:
linelist = line.split()
try:
definesDict[int(linelist[2])] = linelist[1]
except:
try:
definesDict[int(linelist[2], 16)] = linelist[1]
except:
pass
return definesDict
def GetLengthOfLongestValue(dicty):
maxim = ""
for key in dicty:
if len(dicty[key]) > len(maxim):
maxim = dicty[key]
return len(maxim)
#Code Execution Begins Here
try:
os.makedirs(OutputFolder)
except FileExistsError:
pass
SpeciesDict = DefinesDictMaker(SpeciesDefines)
GeneralStructTableExtractor(0x128, SpeciesDict, OutputFolder + "Front_Pic_Table.c", "const struct CompressedSpriteSheet", "gMonFrontPicTable", "NUM_SPECIES", NumberOfPokemon, 8, [], [4, 2, 2], [False, False, SpeciesDict], False, True, 0)
GeneralStructTableExtractor(0x12C, SpeciesDict, OutputFolder + "Back_Pic_Table.c", "const struct CompressedSpriteSheet", "gMonBackPicTable", "NUM_SPECIES", NumberOfPokemon, 8, [], [4, 2, 2], [False, False, SpeciesDict], False, True, 0)
GeneralStructTableExtractor(0x130, SpeciesDict, OutputFolder + "Palette_Table.c", "const struct CompressedSpritePalette", "gMonPaletteTable", "NUM_SPECIES", NumberOfPokemon, 8, [], [4, 2, 2], [False, SpeciesDict, False], False, True, 0)
GeneralStructTableExtractor(0x134, SpeciesDict, OutputFolder + "Shiny_Palette_Table.c", "const struct CompressedSpritePalette", "gMonShinyPaletteTable", "NUM_SPECIES", NumberOfPokemon, 8, [], [4, 2, 2], [False, SpeciesDict, False], False, True, 1500)
GeneralStructTableExtractor(0x11F4C, SpeciesDict, OutputFolder + "Front_Pic_Coords_Table.c", "const struct MonCoords", "gMonFrontPicCoords", "NUM_SPECIES", NumberOfPokemon, 4, ["size", "y_offset"], [1, 1], [], True, False, 0)
GeneralStructTableExtractor(0x74634, SpeciesDict, OutputFolder + "Back_Pic_Coords_Table.c", "const struct MonCoords", "gMonBackPicCoords", "NUM_SPECIES", NumberOfPokemon, 4, ["size", "y_offset"], [1, 1], [], True, False, 0)
GeneralTableExtractor(0x356F8, SpeciesDict, False, OutputFolder + "Enemy_Elevation_Table.c", "const u8", "gEnemyMonElevation", "", "NUM_SPECIES", 0, NumberOfPokemon, 1, False)
GeneralTableExtractor(0x138, SpeciesDict, False, OutputFolder + "Icon_Table.c", "const u32", "gMonIconTable", "", "NUM_SPECIES", 0, NumberOfPokemon, 4, False)
GeneralTableExtractor(0x13C, SpeciesDict, False, OutputFolder + "Icon_Palette_Table.c", "const u8", "gMonIconPaletteIndices", "", "NUM_SPECIES", 0, NumberOfPokemon, 1, False)
GeneralTableExtractor(0x105E14, SpeciesDict, False, OutputFolder + "Footprint_Table.c", "const u32", "gMonFootprintTable", "", "NUM_SPECIES", 0, NumberOfPokemon, 4, False) |
replies/factories.py | bllli/Django-China-API | 187 | 11076870 | <reponame>bllli/Django-China-API<gh_stars>100-1000
import factory
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from posts.factories import PostFactory
from replies.models import Reply
from users.factories import UserFactory
class SiteFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'example_%s' % n)
domain = factory.LazyAttribute(lambda o: '%s.com' % o.name)
class Meta:
model = Site
class BaseReplyFactory(factory.DjangoModelFactory):
content_type = factory.LazyAttribute(
lambda o: ContentType.objects.get_for_model(o.content_object))
object_pk = factory.SelfAttribute('content_object.id')
user = factory.SubFactory(UserFactory)
site = factory.SubFactory(SiteFactory)
comment = 'test comment'
class Meta:
model = Reply
class PostReplyFactory(BaseReplyFactory):
content_object = factory.SubFactory(PostFactory)
|
698 Partition to K Equal Sum Subsets.py | krishna13052001/LeetCode | 872 | 11076878 | <reponame>krishna13052001/LeetCode
#!/usr/bin/python3
"""
Given an array of integers nums and a positive integer k, find whether it's
possible to divide this array into k non-empty subsets whose sums are all equal.
Example 1:
Input: nums = [4, 3, 2, 3, 5, 2, 1], k = 4
Output: True
Explanation: It's possible to divide it into 4 subsets (5), (1, 4), (2,3), (2,3)
with equal sums.
Note:
1 <= k <= len(nums) <= 16.
0 < nums[i] < 10000.
"""
from typing import List
class Solution:
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
"""
resurive search
"""
s = sum(nums)
if s % k != 0:
return False
target = s // k
visited = [False for _ in nums]
return self.dfs(nums, 0, None, target, visited, k)
def dfs(self, nums, start_idx, cur_sum, target_sum, visited, k):
"""
some corner cases:
1. target_sum default at 0: sum or empty array is 0?
2. nxt_sum = (cur_sum or 0) + nums[i] rather than cur_sum or 0 + nums[i]
arithmetic operator has higher precedence than logic operator
start index to prune
"""
if k == 1:
return True
if cur_sum and cur_sum == target_sum:
# start index is 0
return self.dfs(nums, 0, None, target_sum, visited, k - 1)
for i in range(start_idx, len(nums)):
if not visited[i]:
# corner case target_sum is 0
visited[i] = True
nxt_sum = (cur_sum or 0) + nums[i]
# error when cur_sum or 0 + nums[i]
# arithmetic operator has higher precedence than logic operator
if self.dfs(nums, i + 1, nxt_sum, target_sum, visited, k):
return True
visited[i] = False
return False
class Solution_TLE:
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
"""
resurive search
"""
s = sum(nums)
if s % k != 0:
return False
target = s // k
visited = [False for _ in nums]
return self.dfs(nums, None, target, visited, k)
def dfs(self, nums, cur_sum, target_sum, visited, k):
"""
some corner cases:
1. target_sum default at 0: sum or empty array is 0?
2. nxt_sum = (cur_sum or 0) + nums[i] rather than cur_sum or 0 + nums[i]
arithmetic operator has higher precedence than logic operator
"""
if k == 0:
return True
if cur_sum and cur_sum == target_sum:
return self.dfs(nums, None, target_sum, visited, k - 1)
for i in range(len(nums)):
if not visited[i]:
# corner case target_sum is 0
visited[i] = True
nxt_sum = (cur_sum or 0) + nums[i]
# error when cur_sum or 0 + nums[i]
# arithmetic operator has higher precedence than logic operator
if self.dfs(nums, nxt_sum, target_sum, visited, k):
return True
visited[i] = False
return False
if __name__ == "__main__":
assert Solution().canPartitionKSubsets([5, 3, 2, 3, 1, 2, 4], 4) == True
assert Solution().canPartitionKSubsets([4, 3, 2, 3, 5, 2, 1], 4) == True
|
common_utils.py | b01901143/DeepMatchVO | 211 | 11076886 | """
<NAME>, HKUST, 2018
Common utility functions
"""
import os
import numpy as np
from preprocess_matches import read_feature_repo, read_match_repo, get_inlier_image_coords, compute_fmat_error
def complete_batch_size(input_list, batch_size):
left = len(input_list) % batch_size
if left != 0:
for _ in range(batch_size-left):
input_list.append(input_list[-1])
return input_list
def is_valid_sample(frames, tgt_idx, seq_length):
N = len(frames)
tgt_drive, _ = frames[tgt_idx].split(' ')
max_src_offset = int((seq_length - 1)/2)
min_src_idx = tgt_idx - max_src_offset
max_src_idx = tgt_idx + max_src_offset
if min_src_idx < 0 or max_src_idx >= N:
return False
min_src_drive, _ = frames[min_src_idx].split(' ')
max_src_drive, _ = frames[max_src_idx].split(' ')
if tgt_drive == min_src_drive and tgt_drive == max_src_drive:
return True
return False
def load_match_func(sift_folder, match_folder, frame_id, zoom_x, zoom_y, seq_length, sample_num=100, finlier_thresh=4):
matches = []
sift_file1 = os.path.join(sift_folder, frame_id+'.sift')
sift_keys1, _ = read_feature_repo(sift_file1)
half_offset = int((seq_length - 1)/2)
frame_id_int = int(frame_id.lstrip('0'))
for o in range(-half_offset, half_offset+1):
adj_frame_idx = frame_id_int + o
adj_frame_idx_str = str(adj_frame_idx)
prefix = ['0' for i in range(len(frame_id) - len(adj_frame_idx_str))]
adj_frame_idx_str = ''.join(prefix) + adj_frame_idx_str
sift_file2 = os.path.join(sift_folder, adj_frame_idx_str+'.sift')
sift_keys2, _ = read_feature_repo(sift_file2)
if o == 0:
continue
elif o < 0:
match_file = os.path.join(match_folder, adj_frame_idx_str+'.mat')
search_sift_name = os.path.splitext(os.path.split(sift_file1)[1])[0]
else:
match_file = os.path.join(match_folder, frame_id+'.mat')
search_sift_name = os.path.splitext(os.path.split(sift_file2)[1])[0]
match_ret = read_match_repo(match_file)
found = False
for i in range(len(match_ret)):
sift_name = match_ret[i][0]
if sift_name == search_sift_name:
found = True
match_num = match_ret[i][1]
fmat = match_ret[i][3]
match_pairs = match_ret[i][4]
if o < 0:
image_coords = get_inlier_image_coords(sift_keys2, sift_keys1, match_pairs)
else:
image_coords = get_inlier_image_coords(sift_keys1, sift_keys2, match_pairs)
ave_error = compute_fmat_error(fmat, image_coords, homogeneous=False)
assert image_coords.shape[0] == match_num[2]
assert ave_error < finlier_thresh
# sample matches
if image_coords.shape[0] > sample_num:
sample_idx = np.random.choice(image_coords.shape[0], sample_num, replace=False)
else:
sample_idx = range(image_coords.shape[0])
for i in range(sample_num - image_coords.shape[0]):
sample_idx.append(0)
assert len(sample_idx) == sample_num
sampled_coords = image_coords[sample_idx, :]
if o < 0:
sampled_coords = np.matrix(
[zoom_x*sampled_coords[:, 2], zoom_y*sampled_coords[:, 3], zoom_x*sampled_coords[:, 0], zoom_y*sampled_coords[:, 1]]).transpose()
else:
sampled_coords = np.matrix(
[zoom_x*sampled_coords[:, 0], zoom_y*sampled_coords[:, 1], zoom_x*sampled_coords[:, 2], zoom_y*sampled_coords[:, 3]]).transpose()
matches.append(sampled_coords)
if not found:
print('Error: No matches for ', sift_file1, sift_file2)
exit(-1)
return matches |
python/tvm/relay/analysis/sparse_dense.py | XiaoSong9905/tvm | 4,640 | 11076896 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains helper functions for convert dense model
to block sparse model
"""
from collections import namedtuple
import numpy as np
import scipy.sparse as sp
import tvm
from . import _ffi_api
SparseAnalysisResult = namedtuple(
"SparseAnalysisResult",
[
"weight_name",
"weight_shape",
],
)
def _search_dense_op_weight(expr):
"""Search name of weight in all ```nn.dense``` operator
This is a helpful function to determine which param need
to be converted to sparse
Parameters
----------
expr : relay.Expr
Expr will be searched
Returns
-------
ret : Array[String]
name of weight in all ``nn.dense``` operator
"""
return _ffi_api.search_dense_op_weight(expr)
def process_params(expr, params, block_size, sparsity_threshold):
"""[summary]
Parameters
----------
expr : Relay.Expr
Expr of the network
params : Dict[String, tvm.nd.array]
parameters of the network
block_size : Tuple(int, int)
Blocksize in BSR matrix
sparsity_threshold : float
Minimal sparsity requirement for converting to sparse operation
Returns
-------
ret : Namedtuple[weight_name: Array[String], weight_shape: Array[Array[IntImm]]]
return names of qualified dense weight and the shape in BSR format
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.search_task import (
register_task_input_buffer,
) # lazily import to avoid recursive dependency
memo = SparseAnalysisResult(weight_name=[], weight_shape=[])
weight_names = _search_dense_op_weight(expr)
for name in weight_names:
name = str(name)
w_np = params[name].numpy()
sparsity = 1.0 - (np.count_nonzero(w_np) / w_np.size)
if sparsity >= sparsity_threshold:
sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size)
# remove dense weight
del params[name]
memo.weight_name.append(name)
memo.weight_shape.append(
list(sparse_weight.data.shape)
+ list(sparse_weight.indices.shape)
+ list(sparse_weight.indptr.shape)
)
params[name + ".data"] = tvm.nd.array(sparse_weight.data)
params[name + ".indices"] = tvm.nd.array(sparse_weight.indices)
params[name + ".indptr"] = tvm.nd.array(sparse_weight.indptr)
prefix = "sparse_dense_bsr_%d_%d_%d_%d_%d_%d_" % (
w_np.shape[0],
w_np.shape[1],
block_size[0],
block_size[1],
sparse_weight.indices.shape[0],
sparse_weight.indptr.shape[0],
)
register_task_input_buffer(
"default",
prefix + "W_data",
tvm.runtime.ndarray.array(sparse_weight.data),
overwrite=True,
)
register_task_input_buffer(
"default",
prefix + "W_indices",
tvm.runtime.ndarray.array(sparse_weight.indices),
overwrite=True,
)
register_task_input_buffer(
"default",
prefix + "W_indptr",
tvm.runtime.ndarray.array(sparse_weight.indptr),
overwrite=True,
)
ret = SparseAnalysisResult(
weight_name=tvm.runtime.convert(memo.weight_name),
weight_shape=tvm.runtime.convert(memo.weight_shape),
)
return ret
|
pyjswidgets/pyjamas/ui/ClickDelegatePanel.py | takipsizad/pyjs | 739 | 11076926 | <reponame>takipsizad/pyjs<filename>pyjswidgets/pyjamas/ui/ClickDelegatePanel.py
# Copyright 2006 <NAME> and contributors
# Copyright (C) 2009 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.ui.Composite import Composite
from pyjamas.ui import Event
from pyjamas.ui import Focus
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui import KeyboardListener
class ClickDelegatePanel(Composite):
def __init__(self, p, child, cDelegate, kDelegate) :
Composite.__init__(self)
self.clickDelegate = cDelegate
self.keyDelegate = kDelegate
self.focusablePanel = SimplePanel(Focus.createFocusable())
self.focusablePanel.setWidget(child)
wrapperWidget = p.createTabTextWrapper()
if wrapperWidget is None:
self.initWidget(self.focusablePanel)
else :
wrapperWidget.setWidget(self.focusablePanel)
self.initWidget(wrapperWidget)
if hasattr(child, "addKeyboardListener"):
child.addKeyboardListener(kDelegate)
self.sinkEvents(Event.ONCLICK | Event.ONKEYDOWN)
# receive Label's onClick and pass it through, pretending it came from us
def onClick(self, sender=None):
self.clickDelegate.onClick(sender)
def getFocusablePanel(self):
return self.focusablePanel
def onBrowserEvent(self, event) :
type = DOM.eventGetType(event)
if type == "click":
self.onClick(self)
elif type == "keydown":
modifiers = KeyboardListener.getKeyboardModifiers(event)
if hasattr(self.keyDelegate, "onKeyDown"):
self.keyDelegate.onKeyDown(self, DOM.eventGetKeyCode(event),
modifiers)
# TODO: sort out how to create or grab an element for
# Factory.createWidgetOnElement to work
#Factory.registerClass('pyjamas.ui.ClickDelegatePanel', 'ClickDelegatePanel', ClickDelegatePanel)
|
pigar/tests/imports_example/subbar/bar.py | yasirroni/pigar | 959 | 11076937 | from subfoo.foo import foo
def bar():
foo()
|
plan.py | qkhy/poetry-seq2seq | 186 | 11076964 | #! /usr/bin/env python
#-*- coding:utf-8 -*-
import os
import jieba
from gensim import models
from random import shuffle, random, randint
from utils import uprintln, uprint, DATA_PROCESSED_DIR, split_sentences
from data_utils import get_kw_train_data
from segment import Segmenter
from quatrains import get_quatrains
from rank_words import get_word_ranks
_model_path = os.path.join(DATA_PROCESSED_DIR, 'kw_model.bin')
class Planner:
def __init__(self):
self.ranks = get_word_ranks()
if not os.path.exists(_model_path):
self._train()
else:
self.model = models.Word2Vec.load(_model_path)
def _train(self):
print "Start training Word2Vec for planner ..."
quatrains = get_quatrains()
segmenter = Segmenter()
seg_lists = []
for idx, quatrain in enumerate(quatrains):
seg_list = []
for sentence in quatrain['sentences']:
seg_list.extend(filter(lambda seg: seg in self.ranks,
segmenter.segment(sentence)))
seg_lists.append(seg_list)
if 0 == (idx+1)%10000:
print "[Plan Word2Vec] %d/%d quatrains has been processed." %(idx+1, len(quatrains))
print "Hold on. This may take some time ..."
self.model = models.Word2Vec(seg_lists, size = 512, min_count = 5)
self.model.save(_model_path)
def expand(self, words, num):
positive = filter(lambda w: w in self.model.wv, words)
similars = self.model.wv.most_similar(positive = positive) \
if len(positive) > 0 else []
words.extend(pair[0] for pair in similars[:min(len(similars), num-len(words))])
if len(words) < num:
_prob_sum = sum(1./(i+1) for i in range(len(self.ranks)))
_prob_sum -= sum(1./(self.ranks[word]+1) for word in words)
while len(words) < num:
prob_sum = _prob_sum
for word, rank in self.ranks.items():
if word in words:
continue
elif prob_sum * random() < 1./(rank+1):
words.append(word)
break
else:
prob_sum -= 1./(rank+1)
shuffle(words)
def plan(self, text):
def extract(sentence):
return filter(lambda x: x in self.ranks, jieba.lcut(sentence))
keywords = sorted(reduce(lambda x,y:x+y, map(extract, split_sentences(text)), []),
cmp = lambda x,y: cmp(self.ranks[x], self.ranks[y]))
words = [keywords[idx] for idx in \
filter(lambda i: 0 == i or keywords[i] != keywords[i-1], range(len(keywords)))]
if len(words) < 4:
self.expand(words, 4)
else:
while len(words) > 4:
words.pop()
return words
if __name__ == '__main__':
planner = Planner()
kw_train_data = get_kw_train_data()
for row in kw_train_data:
num = randint(1,3)
uprint(row[1:])
print "num = %d" %num
guess = row[1:num+1]
planner.expand(guess, 4)
uprintln(guess)
assert len(guess) == 4
print
|
pointer.py | ikostrikov/TensorFlow-Pointer-Networks | 223 | 11076973 | <reponame>ikostrikov/TensorFlow-Pointer-Networks
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A pointer-network helper.
Based on attenton_decoder implementation from TensorFlow
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
def pointer_decoder(decoder_inputs, initial_state, attention_states, cell,
feed_prev=True, dtype=dtypes.float32, scope=None):
"""RNN decoder with pointer net for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "pointer_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
[batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either i-th decoder_inputs.
First, we run the cell
on a combination of the input and previous attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
with vs.variable_scope(scope or "point_decoder"):
batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.
input_size = decoder_inputs[0].get_shape()[1].value
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(
attention_states, [-1, attn_length, 1, attn_size])
attention_vec_size = attn_size # Size of query vectors for attention.
k = vs.get_variable("AttnW", [1, 1, attn_size, attention_vec_size])
hidden_features = nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME")
v = vs.get_variable("AttnV", [attention_vec_size])
states = [initial_state]
def attention(query):
"""Point on hidden using hidden_features and query."""
with vs.variable_scope("Attention"):
y = core_rnn_cell_impl._linear(query, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(
v * math_ops.tanh(hidden_features + y), [2, 3])
return s
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = array_ops.zeros(batch_attn_size, dtype=dtype)
attns.set_shape([None, attn_size])
inps = []
for i in range(len(decoder_inputs)):
if i > 0:
vs.get_variable_scope().reuse_variables()
inp = decoder_inputs[i]
if feed_prev and i > 0:
inp = tf.stack(decoder_inputs)
inp = tf.transpose(inp, perm=[1, 0, 2])
inp = tf.reshape(inp, [-1, attn_length, input_size])
inp = tf.reduce_sum(inp * tf.reshape(tf.nn.softmax(output), [-1, attn_length, 1]), 1)
inp = tf.stop_gradient(inp)
inps.append(inp)
# Use the same inputs in inference, order internaly
# Merge input and previous attentions into one vector of the right size.
x = core_rnn_cell_impl._linear([inp, attns], cell.output_size, True)
# Run the RNN.
cell_output, new_state = cell(x, states[-1])
states.append(new_state)
# Run the attention mechanism.
output = attention(new_state)
outputs.append(output)
return outputs, states, inps
|
tests/test_model_v2_EdmStructTypeSerializer.py | daughterlycare/python-pyodata | 142 | 11076976 | """Tests of OData Model: class VariableDeclaration"""
import pytest
import datetime
from pyodata.v2.model import EdmStructTypeSerializer, Types, StructType, StructTypeProperty
from pyodata.exceptions import PyODataException
@pytest.fixture
def complex_type_property_declarations():
return {
'TestString': (Types.parse_type_name('Edm.String'), "'FooBar'", "'FooBar'", 'FooBar'),
'TestBoolean': (Types.parse_type_name('Edm.Boolean'), False, 'false', False),
'TestInt64': (Types.parse_type_name('Edm.Int64'), '123L', '123L', 123),
'TestDateTime': (Types.parse_type_name('Edm.DateTime'), "/Date(2147483647000)/", "datetime'2038-01-19T3:14:7'",
datetime.datetime(2038, 1, 19, hour=3, minute=14, second=7, tzinfo=datetime.timezone.utc))
}
def define_complex_type(complex_type_property_declarations, nullable = True):
complex_typ = StructType('TestComplexType', 'Label Complex Type', False)
for name, prop_decl in complex_type_property_declarations.items():
prop = StructTypeProperty(name, prop_decl[0], nullable, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None)
prop.typ = Types.from_name(prop.type_info.name)
complex_typ._properties[prop.name] = prop
prop.struct_type = complex_typ
return complex_typ
@pytest.fixture
def complex_type_with_nullable_props(complex_type_property_declarations, nullable = True):
return define_complex_type(complex_type_property_declarations, nullable=True)
@pytest.fixture
def complex_type_without_nullable_props(complex_type_property_declarations, nullable = True):
return define_complex_type(complex_type_property_declarations, nullable=False)
def test_nullable_from_json_null_properties(complex_type_with_nullable_props, complex_type_property_declarations):
entity_json = { prop_name: None for prop_name in complex_type_property_declarations.keys() }
entity_odata = complex_type_with_nullable_props.traits.from_json(entity_json)
assert entity_json.keys() == entity_odata.keys()
for name, value in entity_odata.items():
assert value is None, f'Property: {name}'
def test_non_nullable_from_json_null_properties(complex_type_without_nullable_props, complex_type_property_declarations):
for prop_name in complex_type_property_declarations.keys():
entity_json = { prop_name : None }
with pytest.raises(PyODataException):
entity_odata = complex_type_without_nullable_props.traits.from_json(entity_json)
def test_non_nullable_from_json(complex_type_without_nullable_props, complex_type_property_declarations):
entity_json = { prop_name : prop_decl[1] for prop_name, prop_decl in complex_type_property_declarations.items() }
entity_odata =complex_type_without_nullable_props.traits.from_json(entity_json)
assert entity_json.keys() == entity_odata.keys()
for name, value in entity_odata.items():
assert value == complex_type_property_declarations[name][3], f'Value of {name}'
def test_nullable_from_literal_null_properties(complex_type_with_nullable_props, complex_type_property_declarations):
entity_literal = { prop_name: None for prop_name in complex_type_property_declarations.keys() }
entity_odata = complex_type_with_nullable_props.traits.from_literal(entity_literal)
assert entity_literal.keys() == entity_odata.keys()
for name, value in entity_odata.items():
assert value is None, f'Property: {name}'
def test_non_nullable_from_literal_null_properties(complex_type_without_nullable_props, complex_type_property_declarations):
for prop_name in complex_type_property_declarations.keys():
entity_literal = { prop_name : None }
with pytest.raises(PyODataException):
entity_odata = complex_type_without_nullable_props.traits.from_literal(entity_literal)
def test_non_nullable_from_literal(complex_type_without_nullable_props, complex_type_property_declarations):
entity_literal = { prop_name : prop_decl[2] for prop_name, prop_decl in complex_type_property_declarations.items() }
entity_odata =complex_type_without_nullable_props.traits.from_literal(entity_literal)
assert entity_literal.keys() == entity_odata.keys()
for name, value in entity_odata.items():
assert value == complex_type_property_declarations[name][3], f'Value of {name}'
|
pandas/core/computation/api.py | CJL89/pandas | 28,899 | 11076995 | <reponame>CJL89/pandas
# flake8: noqa
from pandas.core.computation.eval import eval
|
homeassistant/components/recorder/pool.py | andersop91/core | 22,481 | 11077050 | <gh_stars>1000+
"""A pool for sqlite connections."""
import threading
from sqlalchemy.pool import NullPool, StaticPool
class RecorderPool(StaticPool, NullPool):
"""A hybird of NullPool and StaticPool.
When called from the creating thread acts like StaticPool
When called from any other thread, acts like NullPool
"""
def __init__(self, *args, **kw): # pylint: disable=super-init-not-called
"""Create the pool."""
self._tid = threading.current_thread().ident
StaticPool.__init__(self, *args, **kw)
def _do_return_conn(self, conn):
if threading.current_thread().ident == self._tid:
return super()._do_return_conn(conn)
conn.close()
def dispose(self):
"""Dispose of the connection."""
if threading.current_thread().ident == self._tid:
return super().dispose()
def _do_get(self):
if threading.current_thread().ident == self._tid:
return super()._do_get()
return super( # pylint: disable=bad-super-call
NullPool, self
)._create_connection()
|
mindinsight/common/hook/datavisual.py | mindspore-ai/mindinsight | 216 | 11077057 | <gh_stars>100-1000
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Datavisual hook."""
import argparse
import os
from mindinsight.conf import settings
from mindinsight.utils.hook import BaseHook
class ReloadIntervalAction(argparse.Action):
"""Reload interval action class definition."""
def __call__(self, parser, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Option string for specific argument name.
"""
reload_interval = values
if reload_interval < 0 or reload_interval > settings.MAX_RELOAD_INTERVAL:
parser.error(f'{option_string} should be greater than or equal to 0 or less than or equal to 300 ')
setattr(namespace, self.dest, reload_interval)
class SummaryBaseDirAction(argparse.Action):
"""Summary base dir action class definition."""
def __call__(self, parser, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Option string for specific argument name.
"""
summary_base_dir = os.path.realpath(values)
if not os.path.exists(summary_base_dir):
print('Warning: summary-base-dir does not exist')
setattr(namespace, self.dest, summary_base_dir)
class Hook(BaseHook):
"""Hook class definition."""
def register_startup_arguments(self, parser):
"""
Hook function to register startup arguments.
Args:
parser (ArgumentParser): Specify parser to which arguments are added.
"""
parser.add_argument(
'--reload-interval',
type=int,
action=ReloadIntervalAction,
help="""
data reload time(Seconds). It should be greater than 0 or equal to 0.
If it equals 0, load data only once. Default value is %s seconds.
""" % settings.RELOAD_INTERVAL)
parser.add_argument(
'--summary-base-dir',
type=str,
action=SummaryBaseDirAction,
help="""
directory where MindInsight will walk through its direct subdirectories
and look for summary files naming with regex 'summary.\\d+' or '\\.pb$'. Any direct
subdirectory containing summary files will turn out to be the summary
file directory. Summary file existing in summary-base-dir indicates that
sumamry-base-dir is one of the summary file directories as well. Default
value is current directory.""")
|
bark/pip_package.py | RdecKa/bark | 174 | 11077080 | #from core import core
#__all__ = ["core"]
|
trackstats/tests/models.py | keranno/django-trackstats | 369 | 11077086 | <reponame>keranno/django-trackstats
from django.db import models
from django.utils import timezone
class Comment(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
timestamp = models.DateTimeField(default=timezone.now)
|
icloudpd/email_notifications.py | mkirkland4874/icloud_photos_downloader | 1,514 | 11077100 | """Send an email notification when 2SA is expired"""
import smtplib
import datetime
from icloudpd.logger import setup_logger
# pylint: disable-msg=too-many-arguments
def send_2sa_notification(
smtp_email, smtp_password, smtp_host, smtp_port, smtp_no_tls, to_addr
):
"""Send an email notification when 2SA is expired"""
to_addr = to_addr if to_addr else smtp_email
from_addr = smtp_email if smtp_email else to_addr
logger = setup_logger()
logger.info("Sending 'two-step expired' notification via email...")
smtp = smtplib.SMTP(smtp_host, smtp_port)
smtp.set_debuglevel(0)
# leaving explicit call of connect to not break unit tests, even though it is
# called implicitly via cunstructor parameters
smtp.connect(smtp_host, smtp_port)
if not smtp_no_tls:
smtp.starttls()
if smtp_email is not None or smtp_password is not None:
smtp.login(smtp_email, smtp_password)
subj = "icloud_photos_downloader: Two step authentication has expired"
date = datetime.datetime.now().strftime("%d/%m/%Y %H:%M")
message_text = """Hello,
Two-step authentication has expired for the icloud_photos_downloader script.
Please log in to your server and run the script manually to update two-step authentication."""
msg = "From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s" % (
"iCloud Photos Downloader <" + from_addr + ">",
to_addr,
subj,
date,
message_text,
)
smtp.sendmail(from_addr, to_addr, msg)
smtp.quit()
|
blogs/migrations/0037_blog_lang.py | daaawx/bearblog | 657 | 11077126 | <reponame>daaawx/bearblog
# Generated by Django 3.0.7 on 2021-06-29 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0036_blog_challenge'),
]
operations = [
migrations.AddField(
model_name='blog',
name='lang',
field=models.CharField(default='en', max_length=10),
),
]
|
src/onedrivesdk/model/shared_collection_page.py | meson800/onedrive-sdk-python | 912 | 11077133 | <reponame>meson800/onedrive-sdk-python<gh_stars>100-1000
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..collection_base import CollectionPageBase
from ..model.item import Item
class SharedCollectionPage(CollectionPageBase):
def __getitem__(self, index):
"""Get the Item at the index specified
Args:
index (int): The index of the item to get from the SharedCollectionPage
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The Item at the index
"""
return Item(self._prop_list[index])
def shared(self):
"""Get a generator of Item within the SharedCollectionPage
Yields:
:class:`Item<onedrivesdk.model.item.Item>`:
The next Item in the collection
"""
for item in self._prop_list:
yield Item(item)
|
python/base_agent/ttad/ttad_transformer_model/recombine_data.py | yjernite/craftassist | 626 | 11077166 | import argparse
import ast
import copy
import json
import os
import random
from recombine_data_utils import *
from typing import *
def create_train_valid_split(chunk_index: int, k: int, data_dir: str, output_dir: str):
"""Create partitions for k fold Cross Validation
Given a chunk index for the valid set, create train and valid split from k chunks of the dataset.
Chunk index is a an index in the range 0 to k.
"""
# Read from other chunks and write JSON file to train/ dir
train_dataset: List[Dict] = []
valid_dataset: List[Dict] = []
for i in range(k):
# Use this as the validation set
if i == chunk_index:
valid_dataset += json.load(
open(data_dir + "cv_pool/chunk_{}/annotated_augmented.json".format(i))
)
else:
train_dataset += json.load(
open(data_dir + "cv_pool/chunk_{}/annotated_augmented.json".format(i))
)
# Write to train and valid directories
directories: List[str] = ["/", "train/", "valid/"]
for d in directories:
if not os.path.isdir(output_dir + d):
os.mkdir(output_dir + d)
print(
"Writing {} entries to {}".format(
len(train_dataset), output_dir + "train/annotated_augmented.json"
)
)
json.dump(train_dataset, open(output_dir + "train/annotated_augmented.json", "w"))
print(
"Writing {} entries to {}".format(
len(valid_dataset), output_dir + "valid/annotated_augmented.json"
)
)
json.dump(valid_dataset, open(output_dir + "valid/annotated_augmented.json", "w"))
def get_train_annotated_commands(
data_dir: str, tool1_path: str, tool2_path: str, node_types: List[str]
) -> (List[str], List[str], Dict[str, dict]):
"""
Fetch Turk data corresponding to annotated data training set.
"""
# Read from tool 1
tool1_lines: List[str] = open(tool1_path).readlines()
# Read from tool 2
tool2_lines: List[str] = open(tool2_path).readlines()
# Load the training data that we created
train_annotated_trees = json.load(open(data_dir + "train/annotated_augmented.json"))
train_annotated_phrases: List[str] = [x[0] for x in train_annotated_trees]
turk_processor = TurkToolProcessor(train_annotated_phrases, node_types)
# Filter samples that we want to use for recombination
filtered_tool1_lines: List[str] = turk_processor.filter_tool1_lines(tool1_lines)
filtered_tool2_lines: List[str] = turk_processor.filter_tool2_lines(tool2_lines)
chat_tree_inserts = turk_processor.build_tree_inserts_dict(tool2_lines)
return (filtered_tool1_lines, filtered_tool2_lines, chat_tree_inserts)
def create_templates_for_node_type(
chat: str,
node_type: str,
action_dict: dict,
filtered_tool1_lines: List[str],
chat_tree_inserts: dict,
) -> (List[tuple], List[tuple]):
"""
Generate templates and fragments for recombination.
"""
new_templates = []
new_fragments = []
# create recombination template and fragment from tree and chat
for k, v in ast.literal_eval(action_dict).items():
if k == node_type:
if contains_span(v):
full_tree_with_hole = get_full_tree(
chat, node_type, filtered_tool1_lines, chat_tree_inserts
)
if full_tree_with_hole is None:
print("Error finding the full tree for chat {}".format(chat))
break
span_idxs = get_loc_span_range(full_tree_with_hole, node_type)
fragment, new_chat = process_chat(chat, span_idxs[1])
# chat, fragment for subs, original tree with hole (for fragment)
# original span idxs so we can shift the new ones over
new_templates.append((new_chat, span_idxs[1], v, full_tree_with_hole))
# chat fragment, corresponding tree
new_fragments.append((fragment, span_idxs[1], v))
return (new_templates, new_fragments)
def gen_chat_tree_templates_and_fragments(
filtered_tool1_lines, filtered_tool2_lines, chat_tree_inserts, node_types
) -> (Dict[str, list], Dict[str, list]):
"""
Generate chat and tree fragments and templates.
"""
full_trees = {}
fragments = {}
for l in filtered_tool2_lines:
chat, child_name, action_dict = l.split("\t")
if child_name in node_types:
if child_name not in full_trees:
full_trees[child_name] = []
if child_name not in fragments:
fragments[child_name] = []
new_templates, new_fragments = create_templates_for_node_type(
chat, child_name, action_dict, filtered_tool1_lines, chat_tree_inserts
)
full_trees[child_name] += new_templates
fragments[child_name] += new_fragments
return (full_trees, fragments)
def process_chat(chat: str, span_idxs: list) -> (str, str):
"""Given a chat and span range, remove the span and insert a single <unk> token.
Return the removed span (Fragment) and processed chat (Template).
"""
tokens = chat.split(" ")
fragment = []
new_tokens = []
idx = span_idxs[0]
while idx <= span_idxs[1]:
fragment.append(tokens[idx])
idx += 1
new_tokens += tokens[0 : span_idxs[0]]
new_tokens.append("<unk>")
if len(span_idxs) > 1:
new_tokens += tokens[(span_idxs[1] + 1) : len(tokens)]
return (" ".join(fragment), " ".join(new_tokens))
def insert_fragment_to_templated_chat(templated_chat: str, fragment: str) -> (str, list):
"""
Utility for inserting fragments to trees and chats. Note that we deepcopy subtrees.
"""
chat_str = templated_chat.split(" ")
new_chat_str = []
span_idx = []
for token in chat_str:
if token == "<unk>":
span_idx.append(len(new_chat_str))
new_chat_str += fragment.split(" ")
span_idx.append(len(new_chat_str) - 1)
else:
new_chat_str.append(token)
return (" ".join(new_chat_str), span_idx)
def insert_subtree_into_full_tree(
subtree: dict, full_tree: dict, original_span_idx: list, idx_shift: int, span_offset: int
) -> dict:
"""
Recursively make sure each span node is updated other than the "no"
"""
new_tree = copy.deepcopy(full_tree)
for k, v in new_tree.items():
if type(v) == dict:
new_tree[k] = insert_subtree_into_full_tree(
subtree, v, original_span_idx, idx_shift, span_offset
)
if type(v) == list:
if type(v[0]) == str:
if v[0] == "yes":
if type(v[1]) == dict:
new_tree[k] = insert_subtree_into_full_tree(
subtree, v[1], original_span_idx, idx_shift, span_offset
)
elif type(v[1]) == list and is_span(v[1]):
new_tree[k] = reformat_span_idxs(v[1])
if new_tree[k][1][0] > original_span_idx[0]:
new_tree[k][1] = [x - idx_shift[1] for x in new_tree[k][1]]
else:
new_tree[k] = v[1]
elif v[0] == "no":
new_tree[k] = update_tree_spans(copy.deepcopy(subtree), span_offset)
elif is_span(v):
new_tree[k] = reformat_span_idxs(v)
# shift indices over if needed
if new_tree[k][1][0] > original_span_idx[0]:
new_tree[k][1] = [x - idx_shift[1] for x in new_tree[k][1]]
return new_tree
def update_fragment_tree(tree: dict, offset: int) -> dict:
"""
Update span positions in a subtree.
"""
new_tree = copy.deepcopy(tree)
for key, value in tree.items():
if type(value) == list and is_span(value):
reformat_idxs = reformat_span_idxs(value)
if contains_negative(reformat_idxs[1], offset):
del new_tree[key]
else:
new_tree[key] = [0, [x - offset for x in reformat_idxs[1]]]
elif type(value) == dict:
new_tree[key] = update_fragment_tree(value, offset)
return new_tree
def create_fragment_dataset(subtrees: list, key: str) -> list:
"""
Creates a dataset of spans given a node type, eg. schematic.
"""
fragments_dataset = []
for fragment_set in subtrees:
text, span, tree = fragment_set
head = {key: copy.deepcopy(tree)}
new_tree = postprocess_tree(update_fragment_tree(head, span[0]))
fragments_dataset.append((text, new_tree["action_sequence"][0]))
return fragments_dataset
def gen_recombined_data(templates: List[tuple], fragments: List[tuple]) -> List[tuple]:
"""
Generate recombined examples.
"""
recombined_data = []
for i in range(len(templates)):
for j in range(len(fragments)):
if i == j:
continue
templated_chat, orig_chat_span_idx, templated_tree, templated_full_tree = templates[i]
fragment, orig_fragment_span_idx, subtree = fragments[j]
recombined_chat, new_chat_span_idx = insert_fragment_to_templated_chat(
templated_chat, fragment
)
# Calculate shift between original span idx and new span idx
idx_shift = [
orig_chat_span_idx[0] - new_chat_span_idx[0],
orig_chat_span_idx[1] - new_chat_span_idx[1],
]
# span gap for templated chat - orig_chat_span_idx
# offset for span - orig_fragment_span_idx
span_offset = orig_fragment_span_idx[0] - new_chat_span_idx[0]
recombined_full_tree = insert_subtree_into_full_tree(
subtree, templated_full_tree, orig_chat_span_idx, idx_shift, span_offset
)
recombined_full_tree = postprocess_tree(recombined_full_tree)
recombined_data.append((recombined_chat, recombined_full_tree))
return recombined_data
def write_recombined_data_chunk(
data_dir: str,
output_dir: str,
tool1_path: str,
tool2_path: str,
dataset_name: str,
node_types: List[str],
use_fragments: bool,
):
"""
Read from a partition and write recombined results to output directory.
"""
filtered_tool1_lines, filtered_tool2_lines, chat_tree_inserts = get_train_annotated_commands(
data_dir, tool1_path, tool2_path, node_types
)
combined_templates, combined_fragments = gen_chat_tree_templates_and_fragments(
filtered_tool1_lines, filtered_tool2_lines, chat_tree_inserts, node_types
)
recombined_data: List[List[str, dict]] = []
fragments_dataset: List[List[str, dict]] = []
for key in node_types:
recombined_data += gen_recombined_data(combined_templates[key], combined_fragments[key])
fragments_dataset += create_fragment_dataset(combined_fragments[key], key)
train_output_dir = output_dir + "train/"
if not os.path.isdir(train_output_dir):
os.mkdir(train_output_dir)
if use_fragments:
random.shuffle(fragments_dataset)
fragments_data = [[str(x[0]), x[1]] for x in fragments_dataset]
with open(train_output_dir + dataset_name + "_fragments.json", "w") as outfile:
print(
"Writing {} fragments data samples to directory {}".format(
len(fragments_data), train_output_dir + dataset_name + ".json"
)
)
json.dump(fragments_dataset, outfile)
else:
recombined_data += fragments_dataset
random.shuffle(recombined_data)
recombined_data = [[str(x[0]), x[1]] for x in recombined_data]
print("Created recombined dataset with size {}".format(len(recombined_data)))
with open(train_output_dir + dataset_name + ".json", "w") as outfile:
print(
"Writing {} recombined data samples to directory {}".format(
len(recombined_data), train_output_dir + dataset_name + ".json"
)
)
json.dump(recombined_data, outfile)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default="/private/home/rebeccaqian/minecraft/python/craftassist/ttad/data/annotated_data/",
type=str,
help="train/valid/test data",
)
parser.add_argument(
"--dataset_name",
default="prompts_recombined_location_ref_objects",
type=str,
help="name of recombined dataset",
)
parser.add_argument(
"--output_dir",
default="/checkpoint/rebeccaqian/files/annotated_data/",
type=str,
help="directory to write recombined data",
)
parser.add_argument(
"-k", default=10, type=int, help="Number of partitions in leave-k-out-cross-validation."
)
parser.add_argument(
"--create_k_fold_split",
action="store_true",
help="Whether to split data into k partitions.",
)
parser.add_argument(
"--fragments", action="store_true", help="Only generate fragments (default is both)."
)
parser.add_argument(
"--node_types",
default="location,reference_object,schematic",
type=str,
help="Comma-separated types of nodes to use for recombination",
)
parser.add_argument(
"--tool1_path",
default="/private/home/rebeccaqian/minecraft/python/craftassist/text_to_tree_tool/turk_data/tool1/prompts/2_200/all_agreements.txt",
type=str,
help="Path to tool1 .txt file",
)
parser.add_argument(
"--tool2_path",
default="/private/home/rebeccaqian/minecraft/python/craftassist/text_to_tree_tool/turk_data/tool2/prompts/2_200/all_agreements.txt",
type=str,
help="Path to tool2 .txt file",
)
args = parser.parse_args()
# types of nodes we want to use for recombination
node_types = args.node_types.split(",")
if args.create_k_fold_split:
for valid_partition_idx in range(args.k):
output_dir = args.output_dir + "run_{}".format(str(valid_partition_idx)) + "/"
create_train_valid_split(valid_partition_idx, args.k, args.data_dir, output_dir)
data_dir = output_dir
write_recombined_data_chunk(
data_dir=data_dir,
output_dir=output_dir,
tool1_path=args.tool1_path,
tool2_path=args.tool2_path,
dataset_name=args.dataset_name,
node_types=node_types,
use_fragments=args.fragments,
)
else:
output_dir = args.output_dir
data_dir = args.data_dir
write_recombined_data_chunk(
data_dir=data_dir,
output_dir=output_dir,
tool1_path=args.tool1_path,
tool2_path=args.tool2_path,
dataset_name=args.dataset_name,
node_types=node_types,
use_fragments=args.fragments,
)
if __name__ == "__main__":
main()
|
spafe/features/mfcc.py | SuperKogito/cautious-palm-tree | 205 | 11077178 | <gh_stars>100-1000
import numpy as np
from ..utils.spectral import rfft, dct
from ..utils.cepstral import cms, cmvn, lifter_ceps
from ..utils.exceptions import ParameterError, ErrorMsgs
from ..utils.spectral import power_spectrum, audspec, postaud, invpostaud
from ..fbanks.mel_fbanks import inverse_mel_filter_banks, mel_filter_banks
from ..utils.preprocessing import pre_emphasis, framing, windowing, zero_handling
def mfcc(sig,
fs=16000,
num_ceps=13,
pre_emph=0,
pre_emph_coeff=0.97,
win_len=0.025,
win_hop=0.01,
win_type="hamming",
nfilts=26,
nfft=512,
low_freq=None,
high_freq=None,
scale="constant",
dct_type=2,
use_energy=False,
lifter=22,
normalize=1):
"""
Compute MFCC features (Mel-frequency cepstral coefficients) from an audio
signal. This function offers multiple approaches to features extraction
depending on the input parameters. Implemenation is using FFT and based on
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.63.8029&rep=rep1&type=pdf
- take the absolute value of the FFT
- warp to a Mel frequency scale
- take the DCT of the log-Mel-spectrum
- return the first <num_ceps> components
Args:
sig (array) : a mono audio signal (Nx1) from which to compute features.
fs (int) : the sampling frequency of the signal we are working with.
Default is 16000.
num_ceps (float) : number of cepstra to return.
Default is 13.
pre_emph (int) : apply pre-emphasis if 1.
Default is 1.
pre_emph_coeff (float) : apply pre-emphasis filter [1 -pre_emph] (0 = none).
Default is 0.97.
win_len (float) : window length in sec.
Default is 0.025.
win_hop (float) : step between successive windows in sec.
Default is 0.01.
win_type (float) : window type to apply for the windowing.
Default is "hamming".
nfilts (int) : the number of filters in the filterbank.
Default is 40.
nfft (int) : number of FFT points.
Default is 512.
low_freq (int) : lowest band edge of mel filters (Hz).
Default is 0.
high_freq (int) : highest band edge of mel filters (Hz).
Default is samplerate / 2 = 8000.
scale (str) : choose if max bins amplitudes ascend, descend or are constant (=1).
Default is "constant".
dct_type (int) : type of DCT used - 1 or 2 (or 3 for HTK or 4 for feac).
Default is 2.
use_energy (int) : overwrite C0 with true log energy
Default is 0.
lifter (int) : apply liftering if value > 0.
Default is 22.
normalize (int) : apply normalization if 1.
Default is 0.
Returns:
(array) : features - the MFFC features: num_frames x num_ceps
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
if nfilts < num_ceps:
raise ParameterError(ErrorMsgs["nfilts"])
# pre-emphasis
if pre_emph:
sig = pre_emphasis(sig=sig, pre_emph_coeff=0.97)
# -> framing
frames, frame_length = framing(sig=sig,
fs=fs,
win_len=win_len,
win_hop=win_hop)
# -> windowing
windows = windowing(frames=frames,
frame_len=frame_length,
win_type=win_type)
# -> FFT -> |.|
fourrier_transform = rfft(x=windows, n=nfft)
abs_fft_values = (1 / 1) * np.abs(fourrier_transform)
# -> x Mel-fbanks
mel_fbanks_mat = mel_filter_banks(nfilts=nfilts,
nfft=nfft,
fs=fs,
low_freq=low_freq,
high_freq=high_freq,
scale=scale)
features = np.dot(abs_fft_values, mel_fbanks_mat.T)
# -> log(.) -> DCT(.)
features_no_zero = zero_handling(features)
log_features = np.log(features_no_zero)
mfccs = dct(x=log_features, type=dct_type, axis=1,
norm='ortho')[:, :num_ceps]
# use energy for 1st features column
if use_energy:
# compute the power
power_frames = power_spectrum(fourrier_transform)
# compute total energy in each frame
frame_energies = np.sum(power_frames, 1)
# Handling zero enegies
energy = zero_handling(frame_energies)
mfccs[:, 0] = np.log(energy)
# liftering
if lifter > 0:
mfccs = lifter_ceps(mfccs, lifter)
# normalizatio
if normalize:
mfccs = cmvn(cms(mfccs))
return mfccs
def imfcc(sig,
fs=16000,
num_ceps=13,
pre_emph=0,
pre_emph_coeff=0.97,
win_len=0.025,
win_hop=0.01,
win_type="hamming",
nfilts=26,
nfft=512,
low_freq=None,
high_freq=None,
scale="constant",
dct_type=2,
use_energy=False,
lifter=22,
normalize=1):
"""
Compute Inverse MFCC features from an audio signal.
Args:
sig (array) : a mono audio signal (Nx1) from which to compute features.
fs (int) : the sampling frequency of the signal we are working with.
Default is 16000.
num_ceps (float) : number of cepstra to return.
Default is 13.
pre_emph (int) : apply pre-emphasis if 1.
Default is 1.
pre_emph_coeff (float) : apply pre-emphasis filter [1 -pre_emph] (0 = none).
Default is 0.97.
win_len (float) : window length in sec.
Default is 0.025.
win_hop (float) : step between successive windows in sec.
Default is 0.01.
win_type (float) : window type to apply for the windowing.
Default is "hamming".
nfilts (int) : the number of filters in the filterbank.
Default is 40.
nfft (int) : number of FFT points.
Default is 512.
low_freq (int) : lowest band edge of mel filters (Hz).
Default is 0.
high_freq (int) : highest band edge of mel filters (Hz).
Default is samplerate / 2 = 8000.
scale (str) : choose if max bins amplitudes ascend, descend or are constant (=1).
Default is "constant".
dct_type (int) : type of DCT used - 1 or 2 (or 3 for HTK or 4 for feac).
Default is 2.
use_energy (int) : overwrite C0 with true log energy
Default is 0.
lifter (int) : apply liftering if value > 0.
Default is 22.
normalize (int) : apply normalization if 1.
Default is 0.
Returns:
(array) : features - the MFFC features: num_frames x num_ceps
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
if nfilts < num_ceps:
raise ParameterError(ErrorMsgs["nfilts"])
# pre-emphasis
if pre_emph:
sig = pre_emphasis(sig=sig, pre_emph_coeff=pre_emph_coeff)
# -> framing
frames, frame_length = framing(sig=sig,
fs=fs,
win_len=win_len,
win_hop=win_hop)
# -> windowing
windows = windowing(frames=frames,
frame_len=frame_length,
win_type=win_type)
# -> FFT -> |.|
fourrier_transform = rfft(x=windows, n=nfft)
abs_fft_values = np.abs(fourrier_transform)
# -> x Mel-fbanks -> log(.) -> DCT(.)
imel_fbanks_mat = inverse_mel_filter_banks(nfilts=nfilts,
nfft=nfft,
fs=fs,
low_freq=low_freq,
high_freq=high_freq,
scale=scale)
features = np.dot(abs_fft_values, imel_fbanks_mat.T)
# -> log(.)
features_no_zero = zero_handling(features)
log_features = np.log(features_no_zero)
# -> DCT(.)
imfccs = dct(log_features, type=2, axis=1, norm='ortho')[:, :num_ceps]
# use energy for 1st features column
if use_energy:
# compute the power
power_frames = power_spectrum(fourrier_transform)
# compute total energy in each frame
frame_energies = np.sum(power_frames, 1)
# Handling zero enegies
energy = zero_handling(frame_energies)
imfccs[:, 0] = np.log(energy)
# liftering
if lifter > 0:
imfccs = lifter_ceps(imfccs, lifter)
# normalization
if normalize:
imfccs = cmvn(cms(imfccs))
return imfccs
|
srgan/predict.py | xwshi/GAN-keras | 197 | 11077209 | <gh_stars>100-1000
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from srgan import SRGAN
from PIL import Image
import numpy as np
def build_generator():
def residual_block(layer_input, filters):
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Add()([d, layer_input])
return d
def deconv2d(layer_input):
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
u = Activation('relu')(u)
return u
img_lr = Input(shape=[None,None,3])
# 第一部分,低分辨率图像进入后会经过一个卷积+RELU函数
c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
c1 = Activation('relu')(c1)
# 第二部分,经过16个残差网络结构,每个残差网络内部包含两个卷积+标准化+RELU,还有一个残差边。
r = residual_block(c1, 64)
for _ in range(15):
r = residual_block(r, 64)
# 第三部分,上采样部分,将长宽进行放大,两次上采样后,变为原来的4倍,实现提高分辨率。
c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
c2 = BatchNormalization(momentum=0.8)(c2)
c2 = Add()([c2, c1])
u1 = deconv2d(c2)
u2 = deconv2d(u1)
gen_hr = Conv2D(3, kernel_size=9, strides=1, padding='same', activation='tanh')(u2)
return Model(img_lr, gen_hr)
model = build_generator()
model.load_weights(r"weights\DIV\gen_epoch38500.h5")
before_image = Image.open(r"before.png")
new_image = Image.new('RGB', before_image.size, (128,128,128))
new_image.paste(before_image)
new_image = np.array(new_image)/127.5 - 1
new_image = np.expand_dims(new_image,axis=0)
fake = (model.predict(new_image)*0.5 + 0.5)*255
fake = Image.fromarray(np.uint8(fake[0]))
fake.save("out.png")
fake.show() |
setup.py | Rosuav/vdf | 110 | 11077235 | <reponame>Rosuav/vdf
#!/usr/bin/env python
from setuptools import setup
from codecs import open
from os import path
import vdf
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='vdf',
version=vdf.__version__,
description='Library for working with Valve\'s VDF text format',
long_description=long_description,
url='https://github.com/ValvePython/vdf',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords='valve keyvalue vdf tf2 dota2 csgo',
packages=['vdf'],
zip_safe=True,
)
|
SpiderKeeper/app/schedulers/common.py | fakegit/SpiderKeeper | 2,758 | 11077248 | import threading
import time
from SpiderKeeper.app import scheduler, app, agent, db
from SpiderKeeper.app.spider.model import Project, JobInstance, SpiderInstance
def sync_job_execution_status_job():
'''
sync job execution running status
:return:
'''
for project in Project.query.all():
agent.sync_job_status(project)
app.logger.debug('[sync_job_execution_status]')
def sync_spiders():
'''
sync spiders
:return:
'''
for project in Project.query.all():
spider_instance_list = agent.get_spider_list(project)
SpiderInstance.update_spider_instances(project.id, spider_instance_list)
app.logger.debug('[sync_spiders]')
def run_spider_job(job_instance_id):
'''
run spider by scheduler
:param job_instance:
:return:
'''
try:
job_instance = JobInstance.find_job_instance_by_id(job_instance_id)
agent.start_spider(job_instance)
app.logger.info('[run_spider_job][project:%s][spider_name:%s][job_instance_id:%s]' % (
job_instance.project_id, job_instance.spider_name, job_instance.id))
except Exception as e:
app.logger.error('[run_spider_job] ' + str(e))
def reload_runnable_spider_job_execution():
'''
add periodic job to scheduler
:return:
'''
running_job_ids = set([job.id for job in scheduler.get_jobs()])
# app.logger.debug('[running_job_ids] %s' % ','.join(running_job_ids))
available_job_ids = set()
# add new job to schedule
for job_instance in JobInstance.query.filter_by(enabled=0, run_type="periodic").all():
job_id = "spider_job_%s:%s" % (job_instance.id, int(time.mktime(job_instance.date_modified.timetuple())))
available_job_ids.add(job_id)
if job_id not in running_job_ids:
try:
scheduler.add_job(run_spider_job,
args=(job_instance.id,),
trigger='cron',
id=job_id,
minute=job_instance.cron_minutes,
hour=job_instance.cron_hour,
day=job_instance.cron_day_of_month,
day_of_week=job_instance.cron_day_of_week,
month=job_instance.cron_month,
second=0,
max_instances=999,
misfire_grace_time=60 * 60,
coalesce=True)
except Exception as e:
app.logger.error(
'[load_spider_job] failed {} {},may be cron expression format error '.format(job_id, str(e)))
app.logger.info('[load_spider_job][project:%s][spider_name:%s][job_instance_id:%s][job_id:%s]' % (
job_instance.project_id, job_instance.spider_name, job_instance.id, job_id))
# remove invalid jobs
for invalid_job_id in filter(lambda job_id: job_id.startswith("spider_job_"),
running_job_ids.difference(available_job_ids)):
scheduler.remove_job(invalid_job_id)
app.logger.info('[drop_spider_job][job_id:%s]' % invalid_job_id)
|
tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.py | Prithwijit-Chak/simpeg | 358 | 11077267 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cross-gradient Joint Inversion of Gravity and Magnetic Anomaly Data
===================================================================
Here we simultaneously invert gravity and magentic data using cross-gradient
constraint. The recovered density and susceptibility models are supposed to have
structural similarity. For this tutorial, we focus on the following:
- Defining the survey from xyz formatted data
- Generating a mesh based on survey geometry
- Including surface topography
- Defining the inverse problem via combmaps (2 data misfit terms,
2 regularization terms, a coupling term and optimization)
- Specifying directives for the inversion
- Plotting the recovered model and data misfit
Although we consider gravity and magnetic anomaly data in this tutorial,
the same approach can be used to invert gradiometry and other types of geophysical data.
"""
#########################################################################
# Import modules
# --------------
#
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import tarfile
from discretize import TensorMesh
from SimPEG.utils import plot2Ddata, surface2ind_topo
from SimPEG.potential_fields import gravity, magnetics
from SimPEG import (
maps,
data,
data_misfit,
inverse_problem,
regularization,
optimization,
directives,
inversion,
utils,
)
np.random.seed(0)
#############################################
# Define File Names
# -----------------
#
# File paths for assets we are loading. To set up the inversion, we require
# topography and field observations. The true model defined on the whole mesh
# is loaded to compare with the inversion result. These files are stored as a
# tar-file on our google cloud bucket:
# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz"
# # storage bucket where we have the data
data_source = (
"https://storage.googleapis.com/simpeg/doc-assets/cross_gradient_data.tar.gz"
)
# # download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# path to the directory containing our data
dir_path = downloaded_data.split(".")[0] + os.path.sep
# files to work with
topo_filename = dir_path + "topo.txt"
model_filename = dir_path + "true_model.txt"
#############################################
# Load Data and Plot
# ------------------
#
# Here we load and plot synthetic gravity anomaly data. Topography is generally
# defined as an (N, 3) array. Gravity data is generally defined with 4 columns:
# x, y, z and data.
#
# Load topography
xyz_topo = np.loadtxt(topo_filename)
# Load field data
dobs_grav = np.loadtxt(dir_path + "gravity_data.obs")
dobs_mag = np.loadtxt(dir_path + "magnetic_data.obs")
# Define receiver locations and observed data
receiver_locations = dobs_grav[:, 0:3]
dobs_grav = dobs_grav[:, -1]
dobs_mag = dobs_mag[:, -1]
# Plot
mpl.rcParams.update({"font.size": 12})
# gravity data
fig = plt.figure(figsize=(7, 5))
ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85])
plot2Ddata(receiver_locations, dobs_grav, ax=ax1, contourOpts={"cmap": "bwr"})
ax1.set_title("Gravity Anomaly")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85])
norm = mpl.colors.Normalize(
vmin=-np.max(np.abs(dobs_grav)), vmax=np.max(np.abs(dobs_grav))
)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e"
)
cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12)
# magnetic data
fig = plt.figure(figsize=(7, 5))
ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85])
plot2Ddata(receiver_locations, dobs_mag, ax=ax1, contourOpts={"cmap": "bwr"})
ax1.set_title("Magnetic Anomaly")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85])
norm = mpl.colors.Normalize(
vmin=-np.max(np.abs(dobs_mag)), vmax=np.max(np.abs(dobs_mag))
)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e"
)
cbar.set_label("$nT$", rotation=270, labelpad=15, size=12)
plt.show()
#############################################
# Assign Uncertainties
# --------------------
#
# Inversion with SimPEG requires that we define standard deviation on our data.
# This represents our estimate of the noise in our data. For gravity inversion,
# a constant floor value is generally applied to all data. For this tutorial,
# the standard deviation on each datum will be 1% of the maximum observed
# gravity anomaly value. For magnetic inversion, the same strategy is performed.
#
maximum_anomaly_grav = np.max(np.abs(dobs_grav))
uncertainties_grav = 0.01 * maximum_anomaly_grav * np.ones(np.shape(dobs_grav))
maximum_anomaly_mag = np.max(np.abs(dobs_mag))
uncertainties_mag = 0.01 * maximum_anomaly_mag * np.ones(np.shape(dobs_mag))
#############################################
# Defining the Survey
# -------------------
#
# Here, we define survey that will be used for this tutorial. Gravity
# surveys are simple to create. The user only needs an (N, 3) array to define
# the xyz locations of the observation locations. From this, the user can
# define the receivers and the source field.
#
# Define the receivers. The data consist of vertical gravity anomaly measurements.
# The set of receivers must be defined as a list.
receiver_grav = gravity.receivers.Point(receiver_locations, components="gz")
# Define the source field and survey for gravity data
source_field_grav = gravity.sources.SourceField(receiver_list=[receiver_grav])
survey_grav = gravity.survey.Survey(source_field_grav)
# Define the component(s) of the field we want to simulate as a list of strings.
# Here we simulation total magnetic intensity data.
components = ["tmi"]
# Use the observation locations and components to define the receivers. To
# simulate data, the receivers must be defined as a list.
receiver_mag = magnetics.receivers.Point(receiver_locations, components=components)
# Define the inducing field H0 = (intensity [nT], inclination [deg], declination [deg])
inclination = 90
declination = 0
strength = 50000
inducing_field = (strength, inclination, declination)
# Define the source field and survey for gravity data
source_field_mag = magnetics.sources.SourceField(
receiver_list=[receiver_mag], parameters=inducing_field
)
survey_mag = magnetics.survey.Survey(source_field_mag)
#############################################
# Defining the Data
# -----------------
#
# Here is where we define the data that are inverted. The data are defined by
# the survey, the observation values and the standard deviation.
#
data_object_grav = data.Data(
survey_grav, dobs=dobs_grav, standard_deviation=uncertainties_grav
)
data_object_mag = data.Data(
survey_mag, dobs=dobs_mag, standard_deviation=uncertainties_mag
)
#############################################
# Defining a Tensor Mesh
# ----------------------
#
# Here, we create the tensor mesh that will be used to invert gravity anomaly
# data. If desired, we could define an OcTree mesh.
#
dh = 5.0
hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)]
hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)]
hz = [(dh, 5, -1.3), (dh, 15)]
mesh = TensorMesh([hx, hy, hz], "CCN")
########################################################
# Starting/Reference Model and Mapping on Tensor Mesh
# ---------------------------------------------------
#
# Here, we create starting and/or reference models for the inversion as
# well as the mapping from the model space to the active cells. Starting and
# reference models can be a constant background value or contain a-priori
# structures. Here, the backgrounds are 1e-6 g/cc and 1e-6 SI for density and
# susceptibility models, respectively. Note that the background values could
# be different for density and susceptibility models.
#
# Define density contrast values for each unit in g/cc.
background_dens, background_susc = 1e-6, 1e-6
# Find the indicies of the active cells in forward model (ones below surface)
ind_active = surface2ind_topo(mesh, xyz_topo)
# Define mapping from model to active cells
nC = int(ind_active.sum())
model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell
# Create Wires Map that maps from stacked models to individual model components
# m1 refers to density model, m2 refers to susceptibility
wires = maps.Wires(("density", nC), ("susceptibility", nC))
# Define and plot starting model
starting_model = np.r_[background_dens * np.ones(nC), background_susc * np.ones(nC)]
##############################################
# Define the Physics
# ------------------
#
# Here, we define the physics of the gravity and magnetic problems by using the simulation
# class.
#
simulation_grav = gravity.simulation.Simulation3DIntegral(
survey=survey_grav, mesh=mesh, rhoMap=wires.density, actInd=ind_active
)
simulation_mag = magnetics.simulation.Simulation3DIntegral(
survey=survey_mag,
mesh=mesh,
model_type="scalar",
chiMap=wires.susceptibility,
actInd=ind_active,
)
#######################################################################
# Define the Inverse Problem
# --------------------------
#
# The inverse problem is defined by 4 things:
#
# 1) Data Misfit: a measure of how well our recovered model explains the field data
# 2) Regularization: constraints placed on the recovered model and a priori information
# 3) Coupling: a connection of two different physical property models
# 4) Optimization: the numerical approach used to solve the inverse problem
#
# Define the data misfit. Here the data misfit is the L2 norm of the weighted
# residual between the observed data and the data predicted for a given model.
# Within the data misfit, the residual between predicted and observed data are
# normalized by the data's standard deviation.
dmis_grav = data_misfit.L2DataMisfit(data=data_object_grav, simulation=simulation_grav)
dmis_mag = data_misfit.L2DataMisfit(data=data_object_mag, simulation=simulation_mag)
# Define the regularization (model objective function).
reg_grav = regularization.Simple(mesh, indActive=ind_active, mapping=wires.density)
reg_mag = regularization.Simple(
mesh, indActive=ind_active, mapping=wires.susceptibility
)
# Define the coupling term to connect two different physical property models
lamda = 2e12 # weight for coupling term
cross_grad = regularization.CrossGradient(mesh, wires, indActive=ind_active)
# combo
dmis = dmis_grav + dmis_mag
reg = reg_grav + reg_mag + lamda * cross_grad
# Define how the optimization problem is solved. Here we will use a projected
# Gauss-Newton approach that employs the conjugate gradient solver.
opt = optimization.ProjectedGNCG(
maxIter=10,
lower=-2.0,
upper=2.0,
maxIterLS=20,
maxIterCG=100,
tolCG=1e-3,
tolX=1e-3,
)
# Here we define the inverse problem that is to be solved
inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt)
#######################################################################
# Define Inversion Directives
# ---------------------------
#
# Here we define any directiveas that are carried out during the inversion. This
# includes the cooling schedule for the trade-off parameter (beta), stopping
# criteria for the inversion and saving inversion results at each iteration.
#
# Defining a starting value for the trade-off parameter (beta) between the data
# misfit and the regularization.
starting_beta = directives.PairedBetaEstimate_ByEig(beta0_ratio=1e0)
# starting_beta.n_pw_iter = 10
# Defining the fractional decrease in beta and the number of Gauss-Newton solves
# for each beta value.
beta_schedule = directives.PairedBetaSchedule(cooling_factor=5, cooling_rate=1)
# Options for outputting recovered models and predicted data for each beta.
save_iteration = directives.SimilarityMeasureSaveOutputEveryIteration(save_txt=False)
joint_inv_dir = directives.SimilarityMeasureInversionDirective()
stopping = directives.MovingAndMultiTargetStopping(tol=1e-6)
sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False)
# Updating the preconditionner if it is model dependent.
update_jacobi = directives.UpdatePreconditioner()
# The directives are defined as a list.
directives_list = [
joint_inv_dir,
sensitivity_weights,
stopping,
starting_beta,
beta_schedule,
save_iteration,
update_jacobi,
]
#####################################################################
# Running the Inversion
# ---------------------
#
# To define the inversion object, we need to define the inversion problem and
# the set of directives. We can then run the inversion.
#
# Here we combine the inverse problem and the set of directives
inv = inversion.BaseInversion(inv_prob, directives_list)
# Run inversion
recovered_model = inv.run(starting_model)
############################################################
# Plotting True Model and Recovered Model
# ---------------------------------------
#
# Load the true model (was defined on the whole mesh) and extract only the
# values on active cells.
true_model_dens = np.loadtxt(dir_path + "true_model_dens.txt")
true_model_dens[~ind_active] = np.NaN
true_model_susc = np.loadtxt(dir_path + "true_model_susc.txt")
true_model_susc[~ind_active] = np.NaN
# Plot True Model
fig = plt.figure(figsize=(9, 8))
ax1 = plt.subplot(211)
(im,) = mesh.plot_slice(true_model_dens, normal="Y", ax=ax1, grid=True)
ax1.set_title("True density model slice at y = 0 m")
cbar = plt.colorbar(im, format="%.1e")
cbar.set_label("g/cc", rotation=270, labelpad=15, size=12)
ax2 = plt.subplot(212)
(im,) = mesh.plot_slice(
true_model_susc, normal="Y", ax=ax2, grid=True, pcolor_opts={"cmap": "inferno"}
)
ax2.set_title("True susceptibility model slice at y = 0 m")
cbar = plt.colorbar(im, format="%.1e")
cbar.set_label("SI", rotation=270, labelpad=15, size=12)
plt.tight_layout()
plt.show()
# Plot Recovered Model
m_dens_joint, m_susc_joint = wires * recovered_model
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
fig = plt.figure(figsize=(9, 8))
ax1 = plt.subplot(211)
(im,) = mesh.plotSlice(
plotting_map * m_dens_joint, normal="Y", ax=ax1, clim=(-0.04, 0.03),
)
ax1.set_title("Density model slice at y = 0 m")
cbar = plt.colorbar(im)
cbar.set_label("g/cc", rotation=270, labelpad=15, size=12)
ax2 = plt.subplot(212)
(im,) = mesh.plotSlice(
plotting_map * m_susc_joint, normal="Y", ax=ax2, pcolor_opts={"cmap": "inferno"}
)
ax2.set_title("Susceptibility model slice at y = 0 m")
cbar = plt.colorbar(im)
cbar.set_label("SI", rotation=270, labelpad=15, size=12)
plt.tight_layout()
plt.show()
############################################################
# Comparing jointly and separatly recovered models
# ---------------------------------------
#
# Normalized Cross Gradient of Jointly Recovered Susceptibility and Density Models
ncg = cross_grad.calculate_cross_gradient(recovered_model, normalized=True)
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
(im,) = mesh.plot_slice(plotting_map * ncg, normal="Y", ax=ax, grid=True,)
ax.set_title("Normalized cross gradient for joint inversion slice at y = 0 m")
cbar = plt.colorbar(im, format="%.1e")
cbar.set_label("|cross grad|", rotation=270, labelpad=15, size=12)
plt.show()
# Normalized Cross Gradient of Separately Recovered Susceptibility and Density Models
m_dens_single = np.loadtxt(dir_path + "single_model_dens.txt")
m_susc_single = np.loadtxt(dir_path + "single_model_susc.txt")
m_separate = np.r_[m_dens_single[ind_active], m_susc_single[ind_active]]
ncg_single = cross_grad.calculate_cross_gradient(m_separate, normalized=True)
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
(im,) = mesh.plot_slice(plotting_map * ncg_single, normal="Y", ax=ax, grid=True,)
ax.set_title("Normalized cross gradient for separate inversion slice at y = 0 m")
cbar = plt.colorbar(im, format="%.1e")
cbar.set_label("|cross grad|", rotation=270, labelpad=15, size=12)
plt.show()
# Cross Plots Recovered Susceptibility and Density Models
fig = plt.figure(figsize=(14, 5))
ax0 = plt.subplot(121)
ax0.scatter(
plotting_map * m_dens_joint, plotting_map * m_susc_joint, s=4, c="black", alpha=0.1
)
ax0.set_xlabel("Density", size=12)
ax0.set_ylabel("Susceptibility", size=12)
ax0.tick_params(labelsize=12)
ax0.set_title("Joint inversion")
ax1 = plt.subplot(122)
ax1.scatter(m_dens_single, m_susc_single, s=4, c="black", alpha=0.1)
ax1.set_xlabel("Density", size=12)
ax1.set_ylabel("Susceptibility", size=12)
ax1.tick_params(labelsize=12)
ax1.set_title("Separate inversion")
plt.show()
|
hooks/install.py | guiled/Acktie-Mobile-Android-Barcode | 130 | 11077269 | <filename>hooks/install.py
#!/usr/bin/env python
#
# This is the module install hook that will be
# called when your module is first installed
#
import os, sys
def main(args,argc):
# TODO: write your install hook here (optional)
# exit
sys.exit(0)
if __name__ == '__main__':
main(sys.argv,len(sys.argv))
|
aztk/client/cluster/helpers/list.py | Geims83/aztk | 161 | 11077297 | <filename>aztk/client/cluster/helpers/list.py
from aztk import models
from aztk.utils import constants
def list_clusters(cluster_client, software_metadata_key):
"""
List all the cluster on your account.
"""
pools = cluster_client.batch_client.pool.list()
software_metadata = (constants.AZTK_SOFTWARE_METADATA_KEY, software_metadata_key)
cluster_metadata = (constants.AZTK_MODE_METADATA_KEY, constants.AZTK_CLUSTER_MODE_METADATA)
aztk_clusters = []
for pool in [pool for pool in pools if pool.metadata]:
pool_metadata = [(metadata.name, metadata.value) for metadata in pool.metadata]
if all([metadata in pool_metadata for metadata in [software_metadata, cluster_metadata]]):
aztk_clusters.append(models.Cluster(pool))
return aztk_clusters
|
azure-iot-device/tests/iothub/pipeline/test_config.py | danewalton/azure-iot-sdk-python | 366 | 11077314 | <gh_stars>100-1000
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import logging
from tests.common.pipeline.config_test import PipelineConfigInstantiationTestBase
from azure.iot.device.iothub.pipeline.config import IoTHubPipelineConfig
device_id = "my_device"
module_id = "my_module"
hostname = "hostname.some-domain.net"
product_info = "some_info"
@pytest.mark.describe("IoTHubPipelineConfig - Instantiation")
class TestIoTHubPipelineConfigInstantiation(PipelineConfigInstantiationTestBase):
# This fixture is needed for tests inherited from the parent class
@pytest.fixture
def config_cls(self):
return IoTHubPipelineConfig
# This fixture is needed for tests inherited from the parent class
@pytest.fixture
def required_kwargs(self):
return {"device_id": device_id, "hostname": hostname}
# The parent class defines the auth mechanism fixtures (sastoken, x509).
# For the sake of ease of testing, we will assume sastoken is being used unless
# there is a strict need to do something else.
# It does not matter which is used for the purposes of these tests.
@pytest.mark.it(
"Instantiates with the 'device_id' attribute set to the provided 'device_id' paramater"
)
def test_device_id_set(self, sastoken):
config = IoTHubPipelineConfig(device_id=device_id, hostname=hostname, sastoken=sastoken)
assert config.device_id == device_id
@pytest.mark.it(
"Instantiates with the 'module_id' attribute set to the provided 'module_id' paramater"
)
def test_module_id_set(self, sastoken):
config = IoTHubPipelineConfig(
device_id=device_id, module_id=module_id, hostname=hostname, sastoken=sastoken
)
assert config.module_id == module_id
@pytest.mark.it(
"Instantiates with the 'module_id' attribute set to 'None' if no 'module_id' paramater is provided"
)
def test_module_id_default(self, sastoken):
config = IoTHubPipelineConfig(device_id=device_id, hostname=hostname, sastoken=sastoken)
assert config.module_id is None
@pytest.mark.it(
"Instantiates with the 'product_info' attribute set to the provided 'product_info' parameter"
)
def test_product_info_set(self, sastoken):
config = IoTHubPipelineConfig(
device_id=device_id, hostname=hostname, product_info=product_info, sastoken=sastoken
)
assert config.product_info == product_info
@pytest.mark.it(
"Instantiates with the 'product_info' attribute defaulting to empty string if no 'product_info' paramater is provided"
)
def test_product_info_default(self, sastoken):
config = IoTHubPipelineConfig(device_id=device_id, hostname=hostname, sastoken=sastoken)
assert config.product_info == ""
@pytest.mark.it("Instantiates with the 'blob_upload' attribute set to False")
def test_blob_upload(self, sastoken):
config = IoTHubPipelineConfig(device_id=device_id, hostname=hostname, sastoken=sastoken)
assert config.blob_upload is False
@pytest.mark.it("Instantiates with the 'method_invoke' attribute set to False")
def test_method_invoke(self, sastoken):
config = IoTHubPipelineConfig(device_id=device_id, hostname=hostname, sastoken=sastoken)
assert config.method_invoke is False
|
venv/Lib/site-packages/isapi/simple.py | ajayiagbebaku/NFL-Model | 150 | 11077325 | """Simple base-classes for extensions and filters.
None of the filter and extension functions are considered 'optional' by the
framework. These base-classes provide simple implementations for the
Initialize and Terminate functions, allowing you to omit them,
It is not necessary to use these base-classes - but if you don't, you
must ensure each of the required methods are implemented.
"""
class SimpleExtension:
"Base class for a simple ISAPI extension"
def __init__(self):
pass
def GetExtensionVersion(self, vi):
"""Called by the ISAPI framework to get the extension version
The default implementation uses the classes docstring to
set the extension description."""
# nod to our reload capability - vi is None when we are reloaded.
if vi is not None:
vi.ExtensionDesc = self.__doc__
def HttpExtensionProc(self, control_block):
"""Called by the ISAPI framework for each extension request.
sub-classes must provide an implementation for this method.
"""
raise NotImplementedError("sub-classes should override HttpExtensionProc")
def TerminateExtension(self, status):
"""Called by the ISAPI framework as the extension terminates."""
pass
class SimpleFilter:
"Base class for a a simple ISAPI filter"
filter_flags = None
def __init__(self):
pass
def GetFilterVersion(self, fv):
"""Called by the ISAPI framework to get the extension version
The default implementation uses the classes docstring to
set the extension description, and uses the classes
filter_flags attribute to set the ISAPI filter flags - you
must specify filter_flags in your class.
"""
if self.filter_flags is None:
raise RuntimeError("You must specify the filter flags")
# nod to our reload capability - fv is None when we are reloaded.
if fv is not None:
fv.Flags = self.filter_flags
fv.FilterDesc = self.__doc__
def HttpFilterProc(self, fc):
"""Called by the ISAPI framework for each filter request.
sub-classes must provide an implementation for this method.
"""
raise NotImplementedError("sub-classes should override HttpExtensionProc")
def TerminateFilter(self, status):
"""Called by the ISAPI framework as the filter terminates."""
pass
|
solthiruthi/solthiruthi.py | aathi2002/open-tamil | 218 | 11077359 | ## -*- coding: utf-8 -*-
## (C) 2015 <NAME>,
##
from __future__ import print_function
import argparse
import sys
import tamil
# from pprint import pprint
PYTHON3 = sys.version[0] == "3"
class Solthiruthi:
@staticmethod
def get_CLI_options(do_parse=True, DEBUG=False):
# Ref: ArgParse doc - https://docs.python.org/dev/library/argparse.html
# parse and get CLI options. Set do_parse = False for testing
parser = argparse.ArgumentParser()
parser.add_argument("-files", default="", nargs="*")
parser.add_argument(
"-dialects",
default=[u"std"],
nargs="*",
choices=(u"std", u"ceylon", u"kovai", u"nellai", u"chennai"),
)
parser.add_argument(
"-Dictionary",
default=[u"std"],
nargs="*",
choices=(u"std", u"wikipedia", u"madurai"),
)
parser.add_argument(
"-nalt",
default=10,
type=int,
help=u"number of alternative suggestions for wrong type",
)
parser.add_argument(
"-debug", default=False, help="enable debugging information on screen"
)
parser.add_argument(
"-stdin",
default=False,
const=True,
nargs="?",
help="read input from the standard input",
)
parser.add_argument(
"-auto",
default=False,
const=True,
nargs="?",
help="write output as suitable for testing",
)
parser.add_argument(
"-help", default=False, const=True, nargs="?", help="show help and exit"
)
if do_parse:
args = parser.parse_args()
else:
args = None
if DEBUG:
print(u"files = %s" % u"|".join(args.files))
print(u"help = %s" % str(args.help))
print(u"dialects = %s" % u"|".join(args.dialects))
print(u"Dictionary = %s" % u"|".join(args.Dictionary))
print(u"nalt = %d" % args.nalt)
print(u"IN = %s" % str(args.stdin))
return args, parser
if __name__ == u"__main__":
args, parser = Solthiruthi.get_CLI_options(DEBUG=False)
if (len(args.files) == 0 and not args.stdin) or args.help:
parser.print_help()
sys.exit(-1)
|
mint/utils/inputs_util.py | wqliu657/mint | 241 | 11077366 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for creating inputs."""
import tensorflow as tf
def get_modality_to_param_dict(dataset_config):
"""Creates a map from modality name to modality parameters."""
modality_to_param_dict = {}
for modality in dataset_config.modality:
modality_type = modality.WhichOneof("modality")
if modality_type == "general_modality":
modality = modality.general_modality
modality_to_param_dict[modality.feature_name] = {}
modality_to_param_dict[
modality.feature_name]["feature_dim"] = modality.dimension
modality_to_param_dict[modality.feature_name]["input_length"] = int(
dataset_config.input_length_sec * modality.sample_rate)
modality_to_param_dict[modality.feature_name]["target_length"] = int(
dataset_config.target_length_sec * modality.sample_rate)
modality_to_param_dict[modality.feature_name]["target_shift"] = int(
dataset_config.target_shift_sec * modality.sample_rate)
modality_to_param_dict[
modality.feature_name]["sample_rate"] = modality.sample_rate
# Raw image specific parameters.
modality_to_param_dict[modality.feature_name]["resize"] = modality.resize
modality_to_param_dict[
modality.feature_name]["crop_size"] = modality.crop_size
elif modality_type == "raw_text":
modality_to_param_dict[modality.feature_name] = {}
else:
raise ValueError("Unknown modality type:", modality_type)
return modality_to_param_dict
def preprocess_labels(example, dataset_config):
"""Preprocess labels to one_hot encoding."""
target = example.pop(dataset_config.data_target_field)
example["target"] = tf.reduce_max(
tf.one_hot(
tf.sparse.to_dense(target),
depth=dataset_config.target_num_categories),
axis=0)
return example
def fact_preprocessing(example, modality_to_params, is_training):
"""Preprocess data for FACT model."""
motion_seq_length = tf.shape(example["motion_sequence"])[0]
motion_input_length = modality_to_params["motion"]["input_length"]
motion_target_length = modality_to_params["motion"]["target_length"]
motion_target_shift = modality_to_params["motion"]["target_shift"]
audio_input_length = modality_to_params["audio"]["input_length"]
motion_dim = modality_to_params["motion"]["feature_dim"]
audio_dim = modality_to_params["audio"]["feature_dim"]
# Pad the input motion translation from 3-dim to 9-dim.
motion_dim += 6
example["motion_sequence"] = tf.pad(example["motion_sequence"],
[[0, 0], [6, 0]])
if is_training:
windows_size = tf.maximum(motion_input_length,
motion_target_shift + motion_target_length)
windows_size = tf.maximum(windows_size, audio_input_length)
# the start frame id for this window.
start = tf.random.uniform([],
0,
motion_seq_length - windows_size + 1,
dtype=tf.int32)
else:
start = 0
# motion input: [start, start + motion_input_length)
example["motion_input"] = example["motion_sequence"][start:start +
motion_input_length, :]
example["motion_input"].set_shape([motion_input_length, motion_dim])
if is_training:
# motion target: [start + shift, start + shift + motion_target_length)
example["target"] = example["motion_sequence"][start +
motion_target_shift:start +
motion_target_shift +
motion_target_length, :]
example["target"].set_shape([motion_target_length, motion_dim])
del example["motion_sequence"]
if is_training:
# audio input: [start, start + audio_input_length)
example["audio_input"] = example["audio_sequence"][start:start +
audio_input_length, :]
example["audio_input"].set_shape([audio_input_length, audio_dim])
else:
example["audio_input"] = example["audio_sequence"]
del example["audio_sequence"]
return example
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.