max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
deeppy/expr/graph/util.py | purushothamgowthu/deeppy | 1,170 | 11176081 | <reponame>purushothamgowthu/deeppy
import os
import tempfile
import subprocess
import numpy as np
from ..base import Output
from . import digraph
from .exprgraph import (
ExprSplit, ExprGraph, build_graph, _require_list, node_exception_msg,
traceback_str
)
def draw(sinks, filepath, omit_splits=True, emph_nodes=[]):
# Requires: networkx, graphviz, pygraphviz
import networkx as nx
sinks = _require_list(sinks)
graph = build_graph(sinks)
nx_graph = nx.DiGraph()
node_labels = {}
graph = digraph.copy(graph)
if omit_splits:
for node in list(graph.nodes()):
if isinstance(node, (ExprSplit, Output)):
for _, neighbor in list(graph.in_edges([node])):
graph.remove_edge(neighbor, node)
for _, in_neighbor in list(graph.edges([node])):
graph.add_edge(neighbor, in_neighbor)
for _, neighbor in list(graph.edges([node])):
graph.remove_edge(node, neighbor)
graph.remove_node(node)
for node in digraph.topsort(graph):
label = node.__class__.__name__
if label not in node_labels:
node_labels[label] = 0
else:
node_labels[label] += 1
label += ' #' + str(node_labels[label])
if node.shape is not None:
label += ' ' + str(node.shape)
color = 'black' if node.bpropable else 'grey'
nx_graph.add_node(node, label=label, color=color)
for node in emph_nodes:
nx_graph.add_node(node, color='red')
nx_graph.add_edges_from(graph.edges())
for node in nx_graph.nodes():
if not node.bpropable:
for _, neighbor in nx_graph.edges([node]):
nx_graph.add_edge(node, neighbor, color='grey')
_, tmpfilepath = tempfile.mkstemp(suffix='.dot')
nx.drawing.nx_agraph.write_dot(nx_graph, tmpfilepath)
subprocess.call(['dot', '-Tpdf', tmpfilepath, '-o', filepath])
os.remove(tmpfilepath)
class DebugExprGraph(ExprGraph):
def _setup_nodes(self, nodes):
visited = []
for node in nodes:
try:
node.setup()
visited.append(node.__class__.__name__)
except:
draw(self.sinks, 'debug_setup_trace.pdf', omit_splits=True,
emph_nodes=[node])
raise Exception('\n' + traceback_str() + '\n\n' +
node_exception_msg(node) +
'\n\nNodes visited:\n' + str(visited))
def fprop(self):
visited = []
for node in self._fprop_top:
try:
node.fprop()
visited.append(node.__class__.__name__)
except:
draw(self.sinks, 'debug_fprop_trace.pdf', omit_splits=True,
emph_nodes=[node])
raise Exception('\n' + traceback_str() +
'\n' + node_exception_msg(node) +
'\n\nNodes visited:\n' + str(visited))
def bprop(self):
visited = []
for node in self._bprop_top:
try:
node.bprop()
visited.append(node.__class__.__name__)
except:
draw(self.sinks, 'debug_bprop_trace.pdf', omit_splits=True,
emph_nodes=[node])
raise Exception('\n' + traceback_str() +
'\n' + node_exception_msg(node) +
'\n\nNodes visited:\n' + str(visited))
class NANGuardExprGraph(ExprGraph):
def fprop(self):
visited = []
for node in self._fprop_top:
node.fprop()
if node.array is not None:
arr = np.array(node.array)
if np.any(np.isnan(arr) + np.isinf(arr)):
draw(self.sinks, 'debug_fprop_nan.pdf', omit_splits=True,
emph_nodes=[node])
raise Exception('\n' + traceback_str() +
'\n' + node_exception_msg(node) +
'\n\nNodes visited:\n' + str(visited))
visited.append(node.__class__.__name__)
def bprop(self):
visited = []
for node in self._bprop_top:
if node.grad_array is not None:
arr = np.array(node.grad_array)
if np.any(np.isnan(arr) + np.isinf(arr)):
draw(self.sinks, 'debug_bprop_nan.pdf', omit_splits=True,
emph_nodes=[node])
raise Exception('\n' + traceback_str() +
'\n' + node_exception_msg(node) +
'\n\nNodes visited:\n' + str(visited))
node.bprop()
visited.append(node.__class__.__name__)
|
convlab2/dst/trade/multiwoz/__init__.py | Malavikka/ConvLab-2 | 339 | 11176118 | from convlab2.dst.trade.multiwoz.trade import MultiWOZTRADE as TRADE
|
setup_metadata.py | timgates42/Py2C | 149 | 11176121 | #!/usr/bin/python3
"""Read options from [metadata] section from setup.cfg
"""
from os.path import dirname, abspath, join
from configparser import ConfigParser
THIS_FILE = __file__
def get_metadata():
"""Read the [metadata] section of setup.cfg and return it as a dict.
"""
parser = ConfigParser()
parser.read(_get_cfg_fname())
options = dict(parser.items("metadata"))
# return options
return _normalize(options)
def _get_cfg_fname():
return join(dirname(abspath(THIS_FILE)), "setup.cfg")
def _normalize(options):
"""Return correct kwargs for setup() from provided options-dict.
"""
retval = {
key.replace("-", "_"): value for key, value in options.items()
}
# Classifiers
value = retval.pop("classifiers", None)
if value and isinstance(value, str):
classifiers = value.splitlines()
while "" in classifiers:
classifiers.remove("")
retval["classifiers"] = classifiers
# Long description from file
description_file = retval.pop("long_description_file", None)
if description_file:
try:
with open(description_file) as fdesc:
retval["long_description"] = fdesc.read()
except IOError:
retval["long_description"] = "Read the accompanying {}".format(
description_file
)
return retval
|
monkeylearn/classification.py | monkeylearn/monkeylearn-python | 169 | 11176128 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
from six.moves import range
from monkeylearn.base import ModelEndpointSet
from monkeylearn.response import MonkeyLearnResponse
from monkeylearn.settings import DEFAULT_BATCH_SIZE
from monkeylearn.validation import validate_batch_size, validate_order_by_param
class Classification(ModelEndpointSet):
model_type = 'classifiers'
@property
def tags(self):
if not hasattr(self, '_tags'):
self._tags = Tags(self.token, self.base_url)
return self._tags
def list(self, page=None, per_page=None, order_by=None, retry_if_throttled=True):
if order_by is not None:
order_by = validate_order_by_param(order_by)
query_string = self.remove_none_value(dict(
page=page,
per_page=per_page,
order_by=order_by,
))
url = self.get_list_url(query_string=query_string)
response = self.make_request('GET', url, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def detail(self, model_id, retry_if_throttled=True):
url = self.get_detail_url(model_id)
response = self.make_request('GET', url, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def edit(self, model_id, name=None, description=None, algorithm=None, language=None,
max_features=None, ngram_range=None, use_stemming=None, preprocess_numbers=None,
preprocess_social_media=None, normalize_weights=None, stopwords=None,
whitelist=None, retry_if_throttled=True):
data = self.remove_none_value({
'name': name,
'description': description,
'algorithm': algorithm,
'language': language,
'max_features': max_features,
'ngram_range': ngram_range,
'use_stemming': use_stemming,
'preprocess_numbers': preprocess_numbers,
'preprocess_social_media': preprocess_social_media,
'normalize_weights': normalize_weights,
'stopwords': stopwords,
'whitelist': whitelist,
})
url = self.get_detail_url(model_id)
response = self.make_request('PATCH', url, data, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def deploy(self, model_id, retry_if_throttled=True):
url = self.get_detail_url(model_id, action='deploy')
response = self.make_request('POST', url, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def train(self, model_id, retry_if_throttled=True):
url = self.get_detail_url(model_id, action='train')
response = self.make_request('POST', url, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def delete(self, model_id, retry_if_throttled=True):
url = self.get_detail_url(model_id)
response = self.make_request('DELETE', url, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def create(self, name, description='', algorithm='svm', language='en', max_features=10000,
ngram_range=(1, 2), use_stemming=True, preprocess_numbers=True,
preprocess_social_media=False, normalize_weights=True, stopwords=True,
whitelist=None, retry_if_throttled=True):
data = self.remove_none_value({
'name': name,
'description': description,
'algorithm': algorithm,
'language': language,
'max_features': max_features,
'ngram_range': ngram_range,
'use_stemming': use_stemming,
'preprocess_numbers': preprocess_numbers,
'preprocess_social_media': preprocess_social_media,
'normalize_weights': normalize_weights,
'stopwords': stopwords,
'whitelist': whitelist,
})
url = self.get_list_url()
response = self.make_request('POST', url, data, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def classify(self, model_id, data, production_model=False, batch_size=DEFAULT_BATCH_SIZE,
auto_batch=True, retry_if_throttled=True):
validate_batch_size(batch_size)
url = self.get_detail_url(model_id, action='classify')
response = MonkeyLearnResponse()
for i in range(0, len(data), batch_size):
data_dict = self.remove_none_value({
'data': data[i:i + batch_size],
'production_model': production_model,
})
raw_response = self.make_request('POST', url, data_dict,
retry_if_throttled=retry_if_throttled)
response.add_raw_response(raw_response)
return response
def upload_data(self, model_id, data, input_duplicates_strategy=None,
existing_duplicates_strategy=None, retry_if_throttled=True):
url = self.get_detail_url(model_id, action='data')
data_dict = {'data': data}
data_dict = self.remove_none_value({
'data': data,
'input_duplicates_strategy': input_duplicates_strategy,
'existing_duplicates_strategy': existing_duplicates_strategy
})
response = self.make_request('POST', url, data_dict, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
class Tags(ModelEndpointSet):
model_type = ('classifiers', 'tags')
def detail(self, model_id, tag_id, retry_if_throttled=True):
url = self.get_nested_detail_url(model_id, tag_id)
response = self.make_request('GET', url, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def create(self, model_id, name, retry_if_throttled=True):
data = self.remove_none_value({
'name': name,
})
url = self.get_nested_list_url(model_id)
response = self.make_request('POST', url, data, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def edit(self, model_id, tag_id, name=None, retry_if_throttled=True):
data = self.remove_none_value({
'name': name,
})
url = self.get_nested_detail_url(model_id, tag_id)
response = self.make_request('PATCH', url, data, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
def delete(self, model_id, tag_id, move_data_to=None, retry_if_throttled=True):
data = self.remove_none_value({
'move_data_to': move_data_to,
})
url = self.get_nested_detail_url(model_id, tag_id)
response = self.make_request('DELETE', url, data, retry_if_throttled=retry_if_throttled)
return MonkeyLearnResponse(response)
|
gobigger/hyper/tests/test_config.py | jayyoung0802/GoBigger | 189 | 11176198 | import logging
import pytest
from gobigger.hyper.configs.config_2f2s import server_default_config as c1
from gobigger.hyper.configs.config_2f2s_v2 import server_default_config as c2
from gobigger.hyper.configs.config_2f2s_v3 import server_default_config as c3
from gobigger.server import Server
logging.basicConfig(level=logging.DEBUG)
@pytest.mark.unittest
class TestHyperConfig:
def test_2f2s(self):
server = Server(c1)
def test_2f2s_v2(self):
server = Server(c2)
def test_2f2s_v3(self):
server = Server(c3)
|
terrascript/resource/innovationnorway/azure_preview.py | mjuenema/python-terrascript | 507 | 11176234 | # terrascript/resource/innovationnorway/azure_preview.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:49 UTC)
import terrascript
class azurepreview_budget(terrascript.Resource):
pass
class azurepreview_subscription(terrascript.Resource):
pass
__all__ = [
"azurepreview_budget",
"azurepreview_subscription",
]
|
napari/layers/utils/_text_constants.py | mkitti/napari | 1,345 | 11176246 | from enum import auto
from ...utils.misc import StringEnum
class TextMode(StringEnum):
"""
TextMode: Text setting mode.
NONE (default mode) no text is displayed
PROPERTY the text value is a property value
FORMATTED allows text to be set with an f-string like syntax
"""
NONE = auto()
PROPERTY = auto()
FORMATTED = auto()
class Anchor(StringEnum):
"""
Anchor: The anchor position for text
CENTER The text origin is centered on the layer item bounding box.
UPPER_LEFT The text origin is on the upper left corner of the bounding box
UPPER_RIGHT The text origin is on the upper right corner of the bounding box
LOWER_LEFT The text origin is on the lower left corner of the bounding box
LOWER_RIGHT The text origin is on the lower right corner of the bounding box
"""
CENTER = auto()
UPPER_LEFT = auto()
UPPER_RIGHT = auto()
LOWER_LEFT = auto()
LOWER_RIGHT = auto()
|
autoremovetorrents/conditionlexer.py | cnpilot/autoremove-torrents | 437 | 11176249 | <gh_stars>100-1000
import ply.lex as lex
from . import logger
from .exception.illegalcharacter import IllegalCharacter
class ConditionLexer(object):
reserved = {
'and': 'AND',
'or': 'OR'
}
tokens = [
'CONDITION', 'NUMBER',
'LT', 'GT',
'LPAREN', 'RPAREN',
] + list(reserved.values())
# Regular expression of tokens
t_LT = r'<'
t_GT = r'>'
t_LPAREN = r'\('
t_RPAREN = r'\)'
# Ignored characters
t_ignore = ' \t'
def t_NUMBER(self, t):
r'\d+(\.\d+)?'
t.value = float(t.value)
return t
def t_CONDITION(self, t):
r'[a-zA-Z_]+'
t.value = t.value.lower()
t.type = self.reserved.get(t.value, 'CONDITION')
return t
def t_error(self, t):
raise IllegalCharacter('Illegal character \'%s\'.' % t.value[0])
def __init__(self):
# Build the lexer
self.lexer = lex.lex(module=self)
# Set logger
self._logger = logger.Logger.register(__name__) |
conda-verify/run_test.py | nikicc/anaconda-recipes | 130 | 11176250 | from conda_verify import __version__
assert __version__ == '2.0.0'
|
observations/r/shrimp.py | hajime9652/observations | 199 | 11176265 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def shrimp(path):
"""Percentage of Shrimp in Shrimp Cocktail
A numeric vector with 18 determinations by different laboratories of the
amount (percentage of the declared total weight) of shrimp in shrimp
cocktail.
<NAME> and <NAME> (1976) Collaborative study of the
determination of the amount of shrimp in shrimp cocktail. *<NAME>.
Chem.* **59**, 644–649.
<NAME> and <NAME> (1990) *Robust Estimation and Testing.*
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `shrimp.csv`.
Returns:
Tuple of np.ndarray `x_train` with 18 rows and 1 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'shrimp.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/MASS/shrimp.csv'
maybe_download_and_extract(path, url,
save_file_name='shrimp.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
intake/gui/source/tests/test_source_select.py | tewf/intake | 578 | 11176302 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import intake
import pytest
pytest.importorskip('panel')
def assert_widget_matches(browser):
assert browser.options == browser.widget.options
assert browser.selected == browser.widget.value
def test_source_browser_init_with_cats(cat1, cat2, sources1, sources2):
from ..select import SourceSelector
source_browser = SourceSelector(cats=[cat1, cat2])
assert sources1[0].name in source_browser.options
assert sources2[0].name in source_browser.options
assert sources1[0] in source_browser.selected
assert_widget_matches(source_browser)
def test_source_browser_set_cats(cat1, cat2, sources1, sources2):
from ..select import SourceSelector
source_browser = SourceSelector()
source_browser.cats = [cat1, cat2]
assert sources1[0].name in source_browser.options
assert sources2[0].name in source_browser.options
assert sources1[0] in source_browser.selected
assert_widget_matches(source_browser)
def test_source_browser(source_browser, cat1, sources1):
assert len(source_browser.cats) == 1
assert cat1 in source_browser.cats
for source in sources1:
assert source.name in source_browser.options
assert source_browser.selected == [sources1[0]]
assert_widget_matches(source_browser)
def test_source_browser_add(source_browser, sources1, sources2):
source_browser.add(sources2[0])
for source in sources1:
assert source.name in source_browser.options
assert sources2[0].name in source_browser.options
assert source_browser.selected == [sources2[0]]
assert_widget_matches(source_browser)
def test_source_browser_add_list(source_browser, sources2):
source_browser.add(sources2)
assert sources2[1].name in source_browser.options
assert source_browser.selected == [sources2[0]]
assert_widget_matches(source_browser)
def test_source_browser_add_entry_with_nonunique_name(source_browser):
from intake.catalog.local import LocalCatalogEntry
name = source_browser.labels[0]
e0 = LocalCatalogEntry(name, '', 'csv', args=dict(urlpath='foo'))
source_browser.add(e0)
assert f'{name}_0' in source_browser.labels
assert name in source_browser.labels
assert_widget_matches(source_browser)
e1 = LocalCatalogEntry(name, '', 'csv', args=dict(urlpath='foo1'))
source_browser.add(e1)
assert f'{name}_1' in source_browser.labels
assert f'{name}_0' in source_browser.labels
assert name in source_browser.labels
assert_widget_matches(source_browser)
def test_source_browser_remove(source_browser, sources1):
source_browser.remove(sources1[0])
assert sources1[0].name not in source_browser.options
assert source_browser.selected == []
assert_widget_matches(source_browser)
def test_source_browser_remove_list(source_browser, sources1):
source_browser.remove(sources1)
assert source_browser.options == {}
assert source_browser.selected == []
assert_widget_matches(source_browser)
def test_source_browser_select_object(source_browser, sources1):
source_browser.selected = sources1[1]
assert source_browser.selected == [sources1[1]]
assert_widget_matches(source_browser)
def test_source_browser_select_name(source_browser, sources1):
source_browser.selected = sources1[1].name
assert source_browser.selected == [sources1[1]]
assert_widget_matches(source_browser)
def test_source_browser_select_list_of_names(source_browser, sources1):
source_browser.selected = []
source_browser.selected = [source.name for source in sources1]
assert source_browser.selected == sources1
assert_widget_matches(source_browser)
def test_source_browser_select_list_of_objects(source_browser, sources1):
source_browser.selected = sources1
assert source_browser.selected == sources1
assert_widget_matches(source_browser)
|
pycon/sponsorship/migrations/0020_auto_20180821_1031.py | azkarmoulana/pycon | 154 | 11176316 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsorship', '0019_auto_20180723_1655'),
]
operations = [
migrations.AddField(
model_name='sponsor',
name='print_logo',
field=models.FileField(upload_to=b'sponsor_files', null=True, verbose_name='Print logo (For printed materials, signage, and projection. SVG or EPS)'),
),
migrations.AlterField(
model_name='sponsor',
name='packages',
field=models.ManyToManyField(to='sponsorship.SponsorPackage', verbose_name='packages', blank=True),
),
migrations.AlterField(
model_name='sponsor',
name='web_logo',
field=models.ImageField(upload_to=b'sponsor_files', null=True, verbose_name='Web logo (For display on our sponsors page. High resolution PNG or JPG)'),
),
]
|
pytorch_toolkit/action_recognition/tests/test_attention.py | morkovka1337/openvino_training_extensions | 256 | 11176321 | <reponame>morkovka1337/openvino_training_extensions<gh_stars>100-1000
import torch
import numpy as np
from action_recognition.models.modules.self_attention import ScaledDotProductAttention, MultiHeadAttention
class TestScaledDotProductAttention:
def test_shapes(self):
layer = ScaledDotProductAttention(16, attn_dropout=0)
q = torch.zeros(4, 8, 16)
k = torch.zeros(4, 4, 16)
v = torch.zeros(4, 4, 16)
with torch.no_grad():
outputs, attns = layer(q, k, v)
assert outputs.size() == q.size()
assert attns.size(0) == v.size(0)
assert attns.size(1) == q.size(1)
assert attns.size(2) == k.size(1)
def test_atten_range(self, rand):
layer = ScaledDotProductAttention(16, attn_dropout=0)
q = torch.from_numpy(rand.rand(2, 4, 16))
k = torch.from_numpy(rand.rand(2, 4, 16))
v = torch.from_numpy(rand.rand(2, 4, 16))
with torch.no_grad():
outputs, attns = layer(q, k, v)
attns = attns.numpy()
assert np.alltrue(attns >= 0)
assert np.alltrue(attns <= 1)
assert np.allclose(attns.sum(2), np.ones((2, 4)))
class TestMultiHeadAttention:
def test_shapes(self):
layer = MultiHeadAttention(
n_head=2,
input_size=16,
output_size=16,
d_k=8,
d_v=8,
dropout=0,
use_proj=False,
layer_norm=False
)
q = torch.zeros(2, 8, 16)
k = torch.zeros(2, 4, 16)
v = torch.zeros(2, 4, 16)
with torch.no_grad():
outputs, attns = layer(q, k, v)
assert outputs.size() == q.size()
def test_shapes_with_use_proj_set(self):
layer = MultiHeadAttention(
n_head=2,
input_size=16,
output_size=16,
d_k=4,
d_v=4,
dropout=0,
use_proj=True,
layer_norm=False
)
q = torch.zeros(2, 8, 16)
k = torch.zeros(2, 4, 16)
v = torch.zeros(2, 4, 16)
with torch.no_grad():
outputs, attns = layer(q, k, v)
assert outputs.size() == q.size()
|
tests/r/test_med_gpa.py | hajime9652/observations | 199 | 11176333 | <reponame>hajime9652/observations
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.med_gpa import med_gpa
def test_med_gpa():
"""Test module med_gpa.py by downloading
med_gpa.csv and testing shape of
extracted data has 55 rows and 11 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = med_gpa(test_path)
try:
assert x_train.shape == (55, 11)
except:
shutil.rmtree(test_path)
raise()
|
tests/functional/coercers/test_coercer_non_null_boolean_with_default_field.py | matt-koevort/tartiflette | 530 | 11176445 | import pytest
from tests.functional.coercers.common import resolve_unwrapped_field
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
name="coercion",
resolvers={
"Query.nonNullBooleanWithDefaultField": resolve_unwrapped_field
},
)
@pytest.mark.parametrize(
"query,variables,expected",
[
(
"""query { nonNullBooleanWithDefaultField }""",
None,
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-True"}},
),
(
"""query { nonNullBooleanWithDefaultField(param: null) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < Boolean! > must not be null.",
"path": ["nonNullBooleanWithDefaultField"],
"locations": [{"line": 1, "column": 40}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query { nonNullBooleanWithDefaultField(param: false) }""",
None,
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-False"}},
),
(
"""query ($param: Boolean) { nonNullBooleanWithDefaultField(param: $param) }""",
None,
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-True"}},
),
(
"""query ($param: Boolean) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": None},
{
"data": {"nonNullBooleanWithDefaultField": None},
"errors": [
{
"message": "Argument < param > of non-null type < Boolean! > must not be null.",
"path": ["nonNullBooleanWithDefaultField"],
"locations": [{"line": 1, "column": 65}],
}
],
},
),
(
"""query ($param: Boolean) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": True},
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-True"}},
),
(
"""query ($param: Boolean = null) { nonNullBooleanWithDefaultField(param: $param) }""",
None,
{
"data": {"nonNullBooleanWithDefaultField": None},
"errors": [
{
"message": "Argument < param > of non-null type < Boolean! > must not be null.",
"path": ["nonNullBooleanWithDefaultField"],
"locations": [{"line": 1, "column": 72}],
}
],
},
),
(
"""query ($param: Boolean = null) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": None},
{
"data": {"nonNullBooleanWithDefaultField": None},
"errors": [
{
"message": "Argument < param > of non-null type < Boolean! > must not be null.",
"path": ["nonNullBooleanWithDefaultField"],
"locations": [{"line": 1, "column": 72}],
}
],
},
),
(
"""query ($param: Boolean = null) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": True},
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-True"}},
),
(
"""query ($param: Boolean = false) { nonNullBooleanWithDefaultField(param: $param) }""",
None,
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-False"}},
),
(
"""query ($param: Boolean = false) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": None},
{
"data": {"nonNullBooleanWithDefaultField": None},
"errors": [
{
"message": "Argument < param > of non-null type < Boolean! > must not be null.",
"path": ["nonNullBooleanWithDefaultField"],
"locations": [{"line": 1, "column": 73}],
}
],
},
),
(
"""query ($param: Boolean = false) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": True},
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-True"}},
),
(
"""query ($param: Boolean!) { nonNullBooleanWithDefaultField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < Boolean! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Boolean!) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < Boolean! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Boolean!) { nonNullBooleanWithDefaultField(param: $param) }""",
{"param": True},
{"data": {"nonNullBooleanWithDefaultField": "SUCCESS-True"}},
),
],
)
async def test_coercion_non_null_boolean_with_default_field(
engine, query, variables, expected
):
assert await engine.execute(query, variables=variables) == expected
|
tests/test_octodns_provider_digitalocean.py | nikolay-te/octodns | 1,865 | 11176452 | <reponame>nikolay-te/octodns
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
class TestDigitalOceanShim(TestCase):
def test_missing(self):
with self.assertRaises(ModuleNotFoundError):
from octodns.provider.digitalocean import DigitalOceanProvider
DigitalOceanProvider
|
plugin/flags_sources/flags_source.py | jeeb/EasyClangComplete | 648 | 11176473 | <reponame>jeeb/EasyClangComplete<filename>plugin/flags_sources/flags_source.py<gh_stars>100-1000
"""Holds an abstract class defining a flags source."""
from os import path
from ..utils.search_scope import TreeSearchScope
class FlagsSource(object):
"""An abstract class defining a Flags Source."""
def __init__(self, include_prefixes):
"""Initialize default flags storage."""
self._include_prefixes = include_prefixes
def get_flags(self, file_path=None, search_scope=None):
"""Get flags for a view path [ABSTRACT].
Raises:
NotImplementedError: Should not be called directly.
"""
raise NotImplementedError("calling abstract method")
@staticmethod
def _update_search_scope_if_needed(search_scope, file_path):
if not search_scope:
# Search from current file up the tree.
return TreeSearchScope(from_folder=path.dirname(file_path))
# We already know what we are doing. Leave search scope unchanged.
return search_scope
def _get_cached_from(self, file_path):
"""Get cached path for file path.
Args:
file_path (str): Input file path.
Returns:
str: Path to the cached flag source path.
"""
if file_path and file_path in self._cache:
return self._cache[file_path]
return None
|
desktop_local_tests/macos/test_macos_public_ip_disrupt_reorder_services.py | UAEKondaya1/expressvpn_leak_testing | 219 | 11176490 | from desktop_local_tests.public_ip_during_disruption import PublicIPDuringDisruptionTestCase
from desktop_local_tests.macos.macos_reorder_services_disrupter import MacOSDNSReorderServicesDisrupter
class TestMacOSPublicIPDisruptReorderServices(PublicIPDuringDisruptionTestCase):
'''Summary:
Tests whether traffic leaving the user's device has the public IP hidden when the network
service order is changed.
Details:
This test will connect to VPN then swap the priority of the primary and secondary network
services. The test then queries a webpage to detect it's public IP.
Discussion:
It's not 100% clear if, in the real world, services can change their order without user
involvement. It is still however a good stress test of the application.
Weaknesses:
The time taken to perform each IP request is relatively long. Tests using IPResponder should be
preferred over these tests.
Scenarios:
Requires two active network services.
TODO:
Consider a variant which changes the network "Location". This is much more likely to be
something a user might do.
'''
def __init__(self, devices, parameters):
super().__init__(MacOSDNSReorderServicesDisrupter, devices, parameters)
|
scripts/run_ppanggolin.py | AMARTELKE/Pangenome-with-Panaroo | 116 | 11176530 | <reponame>AMARTELKE/Pangenome-with-Panaroo<gh_stars>100-1000
import argparse
import subprocess
import sys, os
import gffutils as gff
def run_ppanggolin(input_files, out_dir, defrag=False, ncpus=1, verbose=False):
# create input file
input_gff_locations = out_dir + "gff_file_locations.tab"
with open(input_gff_locations, 'w') as outfile:
for gff in input_files:
prefix = os.path.splitext(os.path.basename(gff))[0]
outfile.write(prefix + "\t" + gff + "\n")
# run ppanggolin
cmd = "ppanggolin workflow"
cmd += " --anno " + input_gff_locations
cmd += " -o " + out_dir
cmd += " -c " + str(ncpus)
cmd += " --force"
if defrag:
cmd += " --defrag"
if verbose:
print("running cmd: ", cmd)
subprocess.run(cmd, shell=True, check=True)
return
#Clean other "##" starting lines from gff file, as it confuses parsers
def clean_gff_string(gff_string):
splitlines = gff_string.splitlines()
lines_to_delete = []
for index in range(len(splitlines)):
if '##sequence-region' in splitlines[index]:
lines_to_delete.append(index)
for index in sorted(lines_to_delete, reverse=True):
del splitlines[index]
cleaned_gff = "\n".join(splitlines)
return cleaned_gff
def post_process_fmt(input_files, out_dir):
# get mapping between GFF ids and ppanggolin ids
id_mapping = {}
for f in input_files:
prefix = os.path.splitext(os.path.basename(f))[0]
#Split file and parse
gff_file = open(f, 'r')
lines = gff_file.read()
split = lines.split('##FASTA')
if len(split) != 2:
print("Problem reading GFF3 file: ", gff_file.name)
raise RuntimeError("Error reading prokka input!")
parsed_gff = gff.create_db(clean_gff_string(split[0]),
dbfn=":memory:",
force=True,
keep_order=True,
from_string=True)
gene_count = 0
for entry in parsed_gff.all_features(featuretype=()):
if "CDS" not in entry.featuretype: continue
ppanggolin_id = prefix + "_CDS_" + str(gene_count).zfill(4)
id_mapping[ppanggolin_id] = entry.id
gene_count += 1
gff_file.close()
id_mapping[''] = ''
with open(out_dir + "ppanggolin_gene_presence_absence.csv", 'w') as outfile:
with open(out_dir + "matrix.csv", 'r') as infile:
outfile.write(next(infile))
for line in infile:
line = line.strip().split(",")
for i in range(14, len(line)):
line[i] = ";".join([id_mapping[g.strip('"')] for g in line[i].split()])
outfile.write(",".join(line) + "\n")
return
def main():
parser = argparse.ArgumentParser(
description=
"""Runs ppanggolin on GFF3 files and reformats output matrix.""")
parser.add_argument("-o",
"--out_dir",
dest="output_dir",
required=True,
help="location of an output directory",
type=str)
parser.add_argument(
"-i",
"--input",
dest="input_files",
required=True,
help="input GFF3 files (usually output from running Prokka)",
type=str,
nargs='+')
parser.add_argument("--defrag",
dest="defrag",
help=("Turn ppanggolin defragmentation."),
action='store_true',
default=False)
parser.add_argument("-t",
"--threads",
dest="n_cpu",
help="number of threads to use (default=1)",
type=int,
default=1)
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, "")
run_ppanggolin(args.input_files,
args.output_dir,
defrag=args.defrag,
ncpus=args.n_cpu,
verbose=False)
post_process_fmt(args.input_files,
args.output_dir)
return
if __name__ == '__main__':
main() |
setup.py | Kadantte/twist.moe | 112 | 11176544 | # To replace python with python3
with open("api.sh") as f:
newText = f.read().replace('python', 'python3')
newText = newText.replace('python33', 'python3')
with open("api.sh", "w") as f:
f.write(newText)
|
example/indexing/indexing.py | saloponov/faiss-server | 106 | 11176555 | <gh_stars>100-1000
import os
import zipfile
from urllib.request import urlretrieve
import faiss
from gensim.models import KeyedVectors, word2vec
from tqdm import tqdm
SRC_PATH = os.path.dirname(os.path.abspath(__file__))
DATASET_FILE_PATH = os.path.join(SRC_PATH, 'text8')
DATASET_ZIP_PATH = os.path.join(SRC_PATH, '{}.zip'.format(DATASET_FILE_PATH))
INDEX_FILE_PATH = os.path.join(SRC_PATH, 'faiss.index')
MODEL_FILE_PATH = os.path.join(SRC_PATH, 'word2vec.model')
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
def get_text8():
if not os.path.isfile(DATASET_ZIP_PATH):
with DLProgress(
unit='B', unit_scale=True, miniters=1,
desc=DATASET_FILE_PATH) as pbar:
urlretrieve('http://mattmahoney.net/dc/text8.zip',
DATASET_ZIP_PATH, pbar.hook)
if not os.path.isfile(DATASET_FILE_PATH):
with zipfile.ZipFile(DATASET_ZIP_PATH) as zip_ref:
zip_ref.extractall(path=SRC_PATH)
def get_vector():
if not os.path.isfile(MODEL_FILE_PATH):
sentences = word2vec.Text8Corpus(fname=DATASET_FILE_PATH)
model = word2vec.Word2Vec(sentences=sentences)
model.wv.save_word2vec_format(fname=MODEL_FILE_PATH)
return model
else:
return KeyedVectors.load_word2vec_format(
fname=MODEL_FILE_PATH, binary=False)
def do_indexing(word2vec_model=None):
if not os.path.isfile(INDEX_FILE_PATH):
index = faiss.IndexFlatIP(word2vec_model.vector_size)
index.add(word2vec_model.wv.syn0norm)
faiss.write_index(index, INDEX_FILE_PATH)
return index
else:
return faiss.read_index(INDEX_FILE_PATH)
def main():
# fetch dataset
get_text8()
# get word vector via word2vec
model = get_vector()
model.wv.init_sims(replace=True)
# indexing via faiss
do_indexing(word2vec_model=model)
if __name__ == '__main__':
main()
|
examples/python/statespace_news.py | CCHiggins/statsmodels | 6,931 | 11176560 | <reponame>CCHiggins/statsmodels<gh_stars>1000+
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook statespace_news.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# ## Forecasting, updating datasets, and the "news"
#
# In this notebook, we describe how to use Statsmodels to compute the
# impacts of updated or revised datasets on out-of-sample forecasts or in-
# sample estimates of missing data. We follow the approach of the
# "Nowcasting" literature (see references at the end), by using a state
# space model to compute the "news" and impacts of incoming data.
#
# **Note**: this notebook applies to Statsmodels v0.12+. In addition, it
# only applies to the state space models or related classes, which are:
# `sm.tsa.statespace.ExponentialSmoothing`, `sm.tsa.arima.ARIMA`,
# `sm.tsa.SARIMAX`, `sm.tsa.UnobservedComponents`, `sm.tsa.VARMAX`, and
# `sm.tsa.DynamicFactor`.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
# Forecasting exercises often start with a fixed set of historical data
# that is used for model selection and parameter estimation. Then, the
# fitted selected model (or models) can be used to create out-of-sample
# forecasts. Most of the time, this is not the end of the story. As new data
# comes in, you may need to evaluate your forecast errors, possibly update
# your models, and create updated out-of-sample forecasts. This is sometimes
# called a "real-time" forecasting exercise (by contrast, a pseudo real-time
# exercise is one in which you simulate this procedure).
#
# If all that matters is minimizing some loss function based on forecast
# errors (like MSE), then when new data comes in you may just want to
# completely redo model selection, parameter estimation and out-of-sample
# forecasting, using the updated datapoints. If you do this, your new
# forecasts will have changed for two reasons:
#
# 1. You have received new data that gives you new information
# 2. Your forecasting model or the estimated parameters are different
#
# In this notebook, we focus on methods for isolating the first effect.
# The way we do this comes from the so-called "nowcasting" literature, and
# in particular Bańbura, Giannone, and Reichlin (2011), Bańbura and Modugno
# (2014), and Bańbura et al. (2014). They describe this exercise as
# computing the "**news**", and we follow them in using this language in
# Statsmodels.
#
# These methods are perhaps most useful with multivariate models, since
# there multiple variables may update at the same time, and it is not
# immediately obvious what forecast change was created by what updated
# variable. However, they can still be useful for thinking about forecast
# revisions in univariate models. We will therefore start with the simpler
# univariate case to explain how things work, and then move to the
# multivariate case afterwards.
# **Note on revisions**: the framework that we are using is designed to
# decompose changes to forecasts from newly observed datapoints. It can also
# take into account *revisions* to previously published datapoints, but it
# does not decompose them separately. Instead, it only shows the aggregate
# effect of "revisions".
# **Note on `exog` data**: the framework that we are using only decomposes
# changes to forecasts from newly observed datapoints for *modeled*
# variables. These are the "left-hand-side" variables that in Statsmodels
# are given in the `endog` arguments. This framework does not decompose or
# account for changes to unmodeled "right-hand-side" variables, like those
# included in the `exog` argument.
# ### Simple univariate example: AR(1)
#
# We will begin with a simple autoregressive model, an AR(1):
#
# $$y_t = \phi y_{t-1} + \varepsilon_t$$
#
# - The parameter $\phi$ captures the persistence of the series
#
# We will use this model to forecast inflation.
#
# To make it simpler to describe the forecast updates in this notebook, we
# will work with inflation data that has been de-meaned, but it is
# straightforward in practice to augment the model with a mean term.
#
# De-mean the inflation series
y = macrodata['infl'] - macrodata['infl'].mean()
# #### Step 1: fitting the model on the available dataset
# Here, we'll simulate an out-of-sample exercise, by constructing and
# fitting our model using all of the data except the last five observations.
# We'll assume that we haven't observed these values yet, and then in
# subsequent steps we'll add them back into the analysis.
y_pre = y.iloc[:-5]
y_pre.plot(figsize=(15, 3), title='Inflation')
# To construct forecasts, we first estimate the parameters of the model.
# This returns a results object that we will be able to use produce
# forecasts.
mod_pre = sm.tsa.arima.ARIMA(y_pre, order=(1, 0, 0), trend='n')
res_pre = mod_pre.fit()
print(res_pre.summary())
# Creating the forecasts from the results object `res` is easy - you can
# just call the `forecast` method with the number of forecasts you want to
# construct. In this case, we'll construct four out-of-sample forecasts.
# Compute the forecasts
forecasts_pre = res_pre.forecast(4)
# Plot the last 3 years of data and the four out-of-sample forecasts
y_pre.iloc[-12:].plot(figsize=(15, 3), label='Data', legend=True)
forecasts_pre.plot(label='Forecast', legend=True)
# For the AR(1) model, it is also easy to manually construct the
# forecasts. Denoting the last observed variable as $y_T$ and the $h$-step-
# ahead forecast as $y_{T+h|T}$, we have:
#
# $$y_{T+h|T} = \hat \phi^h y_T$$
#
# Where $\hat \phi$ is our estimated value for the AR(1) coefficient. From
# the summary output above, we can see that this is the first parameter of
# the model, which we can access from the `params` attribute of the results
# object.
# Get the estimated AR(1) coefficient
phi_hat = res_pre.params[0]
# Get the last observed value of the variable
y_T = y_pre.iloc[-1]
# Directly compute the forecasts at the horizons h=1,2,3,4
manual_forecasts = pd.Series(
[phi_hat * y_T, phi_hat**2 * y_T, phi_hat**3 * y_T, phi_hat**4 * y_T],
index=forecasts_pre.index)
# We'll print the two to double-check that they're the same
print(pd.concat([forecasts_pre, manual_forecasts], axis=1))
# #### Step 2: computing the "news" from a new observation
#
# Suppose that time has passed, and we have now received another
# observation. Our dataset is now larger, and we can evaluate our forecast
# error and produce updated forecasts for the subsequent quarters.
# Get the next observation after the "pre" dataset
y_update = y.iloc[-5:-4]
# Print the forecast error
print('Forecast error: %.2f' % (y_update.iloc[0] - forecasts_pre.iloc[0]))
# To compute forecasts based on our updated dataset, we will create an
# updated results object `res_post` using the `append` method, to append on
# our new observation to the previous dataset.
#
# Note that by default, the `append` method does not re-estimate the
# parameters of the model. This is exactly what we want here, since we want
# to isolate the effect on the forecasts of the new information only.
# Create a new results object by passing the new observations to the
# `append` method
res_post = res_pre.append(y_update)
# Since we now know the value for 2008Q3, we will only use `res_post` to
# produce forecasts for 2008Q4 through 2009Q2
forecasts_post = pd.concat([y_update, res_post.forecast('2009Q2')])
print(forecasts_post)
# In this case, the forecast error is quite large - inflation was more
# than 10 percentage points below the AR(1) models' forecast. (This was
# largely because of large swings in oil prices around the global financial
# crisis).
# To analyse this in more depth, we can use Statsmodels to isolate the
# effect of the new information - or the "**news**" - on our forecasts. This
# means that we do not yet want to change our model or re-estimate the
# parameters. Instead, we will use the `news` method that is available in
# the results objects of state space models.
#
# Computing the news in Statsmodels always requires a *previous* results
# object or dataset, and an *updated* results object or dataset. Here we
# will use the original results object `res_pre` as the previous results and
# the `res_post` results object that we just created as the updated results.
# Once we have previous and updated results objects or datasets, we can
# compute the news by calling the `news` method. Here, we will call
# `res_pre.news`, and the first argument will be the updated results,
# `res_post` (however, if you have two results objects, the `news` method
# could can be called on either one).
#
# In addition to specifying the comparison object or dataset as the first
# argument, there are a variety of other arguments that are accepted. The
# most important specify the "impact periods" that you want to consider.
# These "impact periods" correspond to the forecasted periods of interest;
# i.e. these dates specify with periods will have forecast revisions
# decomposed.
#
# To specify the impact periods, you must pass two of `start`, `end`, and
# `periods` (similar to the Pandas `date_range` method). If your time series
# was a Pandas object with an associated date or period index, then you can
# pass dates as values for `start` and `end`, as we do below.
# Compute the impact of the news on the four periods that we previously
# forecasted: 2008Q3 through 2009Q2
news = res_pre.news(res_post, start='2008Q3', end='2009Q2')
# Note: one alternative way to specify these impact dates is
# `start='2008Q3', periods=4`
# The variable `news` is an object of the class `NewsResults`, and it
# contains details about the updates to the data in `res_post` compared to
# `res_pre`, the new information in the updated dataset, and the impact that
# the new information had on the forecasts in the period between `start` and
# `end`.
#
# One easy way to summarize the results are with the `summary` method.
print(news.summary())
# **Summary output**: the default summary for this news results object
# printed four tables:
#
# 1. Summary of the model and datasets
# 2. Details of the news from updated data
# 3. Summary of the impacts of the new information on the forecasts
# between `start='2008Q3'` and `end='2009Q2'`
# 4. Details of how the updated data led to the impacts on the forecasts
# between `start='2008Q3'` and `end='2009Q2'`
#
# These are described in more detail below.
#
# *Notes*:
#
# - There are a number of arguments that can be passed to the `summary`
# method to control this output. Check the documentation / docstring for
# details.
# - Table (4), showing details of the updates and impacts, can become
# quite large if the model is multivariate, there are multiple updates, or a
# large number of impact dates are selected. It is only shown by default for
# univariate models.
# **First table: summary of the model and datasets**
#
# The first table, above, shows:
#
# - The type of model from which the forecasts were made. Here this is an
# ARIMA model, since an AR(1) is a special case of an ARIMA(p,d,q) model.
# - The date and time at which the analysis was computed.
# - The original sample period, which here corresponds to `y_pre`
# - The endpoint of the updated sample period, which here is the last date
# in `y_post`
# **Second table: the news from updated data**
#
# This table simply shows the forecasts from the previous results for
# observations that were updated in the updated sample.
#
# *Notes*:
#
# - Our updated dataset `y_post` did not contain any *revisions* to
# previously observed datapoints. If it had, there would be an additional
# table showing the previous and updated values of each such revision.
# **Third table: summary of the impacts of the new information**
#
# *Columns*:
#
# The third table, above, shows:
#
# - The previous forecast for each of the impact dates, in the "estimate
# (prev)" column
# - The impact that the new information (the "news") had on the forecasts
# for each of the impact dates, in the "impact of news" column
# - The updated forecast for each of the impact dates, in the "estimate
# (new)" column
#
# *Notes*:
#
# - In multivariate models, this table contains additional columns
# describing the relevant impacted variable for each row.
# - Our updated dataset `y_post` did not contain any *revisions* to
# previously observed datapoints. If it had, there would be additional
# columns in this table showing the impact of those revisions on the
# forecasts for the impact dates.
# - Note that `estimate (new) = estimate (prev) + impact of news`
# - This table can be accessed independently using the `summary_impacts`
# method.
#
# *In our example*:
#
# Notice that in our example, the table shows the values that we computed
# earlier:
#
# - The "estimate (prev)" column is identical to the forecasts from our
# previous model, contained in the `forecasts_pre` variable.
# - The "estimate (new)" column is identical to our `forecasts_post`
# variable, which contains the observed value for 2008Q3 and the forecasts
# from the updated model for 2008Q4 - 2009Q2.
# **Fourth table: details of updates and their impacts**
#
# The fourth table, above, shows how each new observation translated into
# specific impacts at each impact date.
#
# *Columns*:
#
# The first three columns table described the relevant **update** (an
# "updated" is a new observation):
#
# - The first column ("update date") shows the date of the variable that
# was updated.
# - The second column ("forecast (prev)") shows the value that would have
# been forecasted for the update variable at the update date based on the
# previous results / dataset.
# - The third column ("observed") shows the actual observed value of that
# updated variable / update date in the updated results / dataset.
#
# The last four columns described the **impact** of a given update (an
# impact is a changed forecast within the "impact periods").
#
# - The fourth column ("impact date") gives the date at which the given
# update made an impact.
# - The fifth column ("news") shows the "news" associated with the given
# update (this is the same for each impact of a given update, but is just
# not sparsified by default)
# - The sixth column ("weight") describes the weight that the "news" from
# the given update has on the impacted variable at the impact date. In
# general, weights will be different between each "updated variable" /
# "update date" / "impacted variable" / "impact date" combination.
# - The seventh column ("impact") shows the impact that the given update
# had on the given "impacted variable" / "impact date".
#
# *Notes*:
#
# - In multivariate models, this table contains additional columns to show
# the relevant variable that was updated and variable that was impacted for
# each row. Here, there is only one variable ("infl"), so those columns are
# suppressed to save space.
# - By default, the updates in this table are "sparsified" with blanks, to
# avoid repeating the same values for "update date", "forecast (prev)", and
# "observed" for each row of the table. This behavior can be overridden
# using the `sparsify` argument.
# - Note that `impact = news * weight`.
# - This table can be accessed independently using the `summary_details`
# method.
#
# *In our example*:
#
# - For the update to 2008Q3 and impact date 2008Q3, the weight is equal
# to 1. This is because we only have one variable, and once we have
# incorporated the data for 2008Q3, there is no no remaining ambiguity about
# the "forecast" for this date. Thus all of the "news" about this variable
# at 2008Q3 passes through to the "forecast" directly.
# #### Addendum: manually computing the news, weights, and impacts
#
# For this simple example with a univariate model, it is straightforward
# to compute all of the values shown above by hand. First, recall the
# formula for forecasting $y_{T+h|T} = \phi^h y_T$, and note that it follows
# that we also have $y_{T+h|T+1} = \phi^h y_{T+1}$. Finally, note that
# $y_{T|T+1} = y_T$, because if we know the value of the observations
# through $T+1$, we know the value of $y_T$.
#
# **News**: The "news" is nothing more than the forecast error associated
# with one of the new observations. So the news associated with observation
# $T+1$ is:
#
# $$n_{T+1} = y_{T+1} - y_{T+1|T} = Y_{T+1} - \phi Y_T$$
#
# **Impacts**: The impact of the news is the difference between the
# updated and previous forecasts, $i_h \equiv y_{T+h|T+1} - y_{T+h|T}$.
#
# - The previous forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} \phi
# y_T & \phi^2 y_T & \phi^3 y_T & \phi^4 y_T \end{pmatrix}'$.
# - The updated forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix}
# y_{T+1} & \phi y_{T+1} & \phi^2 y_{T+1} & \phi^3 y_{T+1} \end{pmatrix}'$.
#
# The impacts are therefore:
#
# $$\{ i_h \}_{h=1}^4 = \begin{pmatrix} y_{T+1} - \phi y_T \\ \phi
# (Y_{T+1} - \phi y_T) \\ \phi^2 (Y_{T+1} - \phi y_T) \\ \phi^3 (Y_{T+1} -
# \phi y_T) \end{pmatrix}$$
#
# **Weights**: To compute the weights, we just need to note that it is
# immediate that we can rewrite the impacts in terms of the forecast errors,
# $n_{T+1}$.
#
# $$\{ i_h \}_{h=1}^4 = \begin{pmatrix} 1 \\ \phi \\ \phi^2 \\ \phi^3
# \end{pmatrix} n_{T+1}$$
#
# The weights are then simply $w = \begin{pmatrix} 1 \\ \phi \\ \phi^2 \\
# \phi^3 \end{pmatrix}$
# We can check that this is what the `news` method has computed.
# Print the news, computed by the `news` method
print(news.news)
# Manually compute the news
print()
print((y_update.iloc[0] - phi_hat * y_pre.iloc[-1]).round(6))
# Print the total impacts, computed by the `news` method
# (Note: news.total_impacts = news.revision_impacts + news.update_impacts,
# but
# here there are no data revisions, so total and update impacts are the
# same)
print(news.total_impacts)
# Manually compute the impacts
print()
print(forecasts_post - forecasts_pre)
# Print the weights, computed by the `news` method
print(news.weights)
# Manually compute the weights
print()
print(np.array([1, phi_hat, phi_hat**2, phi_hat**3]).round(6))
# ### Multivariate example: dynamic factor
#
# In this example, we'll consider forecasting monthly core price inflation
# based on the Personal Consumption Expenditures (PCE) price index and the
# Consumer Price Index (CPI), using a Dynamic Factor model. Both of these
# measures track prices in the US economy and are based on similar source
# data, but they have a number of definitional differences. Nonetheless,
# they track each other relatively well, so modeling them jointly using a
# single dynamic factor seems reasonable.
#
# One reason that this kind of approach can be useful is that the CPI is
# released earlier in the month than the PCE. One the CPI is released,
# therefore, we can update our dynamic factor model with that additional
# datapoint, and obtain an improved forecast for that month's PCE release. A
# more involved version of this kind of analysis is available in Knotek and
# Zaman (2017).
# We start by downloading the core CPI and PCE price index data from
# [FRED](https://fred.stlouisfed.org/), converting them to annualized
# monthly inflation rates, removing two outliers, and de-meaning each series
# (the dynamic factor model does not
import pandas_datareader as pdr
levels = pdr.get_data_fred(['PCEPILFE', 'CPILFESL'], start='1999',
end='2019').to_period('M')
infl = np.log(levels).diff().iloc[1:] * 1200
infl.columns = ['PCE', 'CPI']
# Remove two outliers and de-mean the series
infl['PCE'].loc['2001-09':'2001-10'] = np.nan
# To show how this works, we'll imagine that it is April 14, 2017, which
# is the data of the March 2017 CPI release. So that we can show the effect
# of multiple updates at once, we'll assume that we haven't updated our data
# since the end of January, so that:
#
# - Our **previous dataset** will consist of all values for the PCE and
# CPI through January 2017
# - Our **updated dataset** will additionally incorporate the CPI for
# February and March 2017 and the PCE data for February 2017. But it will
# not yet the PCE (the March 2017 PCE price index was not released until May
# 1, 2017).
# Previous dataset runs through 2017-02
y_pre = infl.loc[:'2017-01'].copy()
const_pre = np.ones(len(y_pre))
print(y_pre.tail())
# For the updated dataset, we'll just add in the
# CPI value for 2017-03
y_post = infl.loc[:'2017-03'].copy()
y_post.loc['2017-03', 'PCE'] = np.nan
const_post = np.ones(len(y_post))
# Notice the missing value for PCE in 2017-03
print(y_post.tail())
# We chose this particular example because in March 2017, core CPI prices
# fell for the first time since 2010, and this information may be useful in
# forecast core PCE prices for that month. The graph below shows the CPI and
# PCE price data as it would have been observed on April 14th$^\dagger$.
#
# -----
#
# $\dagger$ This statement is not entirely true, because both the CPI and
# PCE price indexes can be revised to a certain extent after the fact. As a
# result, the series that we're pulling are not exactly like those observed
# on April 14, 2017. This could be fixed by pulling the archived data from
# [ALFRED](https://alfred.stlouisfed.org/) instead of
# [FRED](https://fred.stlouisfed.org/), but the data we have is good enough
# for this tutorial.
# Plot the updated dataset
fig, ax = plt.subplots(figsize=(15, 3))
y_post.plot(ax=ax)
ax.hlines(0, '2009', '2017-06', linewidth=1.0)
ax.set_xlim('2009', '2017-06')
# To perform the exercise, we first construct and fit a `DynamicFactor`
# model. Specifically:
#
# - We are using a single dynamic factor (`k_factors=1`)
# - We are modeling the factor's dynamics with an AR(6) model
# (`factor_order=6`)
# - We have included a vector of ones as an exogenous variable
# (`exog=const_pre`), because the inflation series we are working with are
# not mean-zero.
mod_pre = sm.tsa.DynamicFactor(y_pre,
exog=const_pre,
k_factors=1,
factor_order=6)
res_pre = mod_pre.fit()
print(res_pre.summary())
# With the fitted model in hand, we now construct the news and impacts
# associated with observing the CPI for March 2017. The updated data is for
# February 2017 and part of March 2017, and we'll examining the impacts on
# both March and April.
#
# In the univariate example, we first created an updated results object,
# and then passed that to the `news` method. Here, we're creating the news
# by directly passing the updated dataset.
#
# Notice that:
#
# 1. `y_post` contains the entire updated dataset (not just the new
# datapoints)
# 2. We also had to pass an updated `exog` array. This array must cover
# **both**:
# - The entire period associated with `y_post`
# - Any additional datapoints after the end of `y_post` through the
# last impact date, specified by `end`
#
# Here, `y_post` ends in March 2017, so we needed our `exog` to extend
# one more period, to April 2017.
# Create the news results
# Note
const_post_plus1 = np.ones(len(y_post) + 1)
news = res_pre.news(y_post,
exog=const_post_plus1,
start='2017-03',
end='2017-04')
# > **Note**:
# >
# > In the univariate example, above, we first constructed a new results
# object, and then passed that to the `news` method. We could have done that
# here too, although there is an extra step required. Since we are
# requesting an impact for a period beyond the end of `y_post`, we would
# still need to pass the additional value for the `exog` variable during
# that period to `news`:
# >
# > ```python
# res_post = res_pre.apply(y_post, exog=const_post)
# news = res_pre.news(res_post, exog=[1.], start='2017-03', end='2017-04')
# ```
# Now that we have computed the `news`, printing `summary` is a convenient
# way to see the results.
# Show the summary of the news results
print(news.summary())
# Because we have multiple variables, by default the summary only shows
# the news from updated data along and the total impacts.
#
# From the first table, we can see that our updated dataset contains three
# new data points, with most of the "news" from these data coming from the
# very low reading in March 2017.
#
# The second table shows that these three datapoints substantially
# impacted the estimate for PCE in March 2017 (which was not yet observed).
# This estimate revised down by nearly 1.5 percentage points.
#
# The updated data also impacted the forecasts in the first out-of-sample
# month, April 2017. After incorporating the new data, the model's forecasts
# for CPI and PCE inflation in that month revised down 0.29 and 0.17
# percentage point, respectively.
# While these tables show the "news" and the total impacts, they do not
# show how much of each impact was caused by each updated datapoint. To see
# that information, we need to look at the details tables.
#
# One way to see the details tables is to pass `include_details=True` to
# the `summary` method. To avoid repeating the tables above, however, we'll
# just call the `summary_details` method directly.
print(news.summary_details())
# This table shows that most of the revisions to the estimate of PCE in
# April 2017, described above, came from the news associated with the CPI
# release in March 2017. By contrast, the CPI release in February had only a
# little effect on the April forecast, and the PCE release in February had
# essentially no effect.
# ### Bibliography
#
# Bańbura, Marta, <NAME>, and <NAME>. "Nowcasting."
# The Oxford Handbook of Economic Forecasting. July 8, 2011.
#
# Bańbura, Marta, <NAME>, <NAME>, and Lucrezia
# Reichlin. "Now-casting and the real-time data flow." In Handbook of
# economic forecasting, vol. 2, pp. 195-237. Elsevier, 2013.
#
# <NAME>, and <NAME>. "Maximum likelihood estimation of
# factor models on datasets with arbitrary pattern of missing data." Journal
# of Applied Econometrics 29, no. 1 (2014): 133-160.
#
# Knotek, <NAME>., and <NAME>. "Nowcasting US headline and core
# inflation." Journal of Money, Credit and Banking 49, no. 5 (2017):
# 931-968.
|
data/task_scripts/main/task00018.py | k101w/phyre_ODE | 432 | 11176638 | <filename>data/task_scripts/main/task00018.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import phyre.creator as creator_lib
@creator_lib.define_task_template(
bar_y=np.linspace(0.4, 0.65, 10),
bottom_jar_scale=np.linspace(0.15, 0.20, 3),
bottom_jar_x=np.linspace(0.25, 0.50, 5),
right_diag_angle=np.linspace(30, 70, 3),
bar_offset=np.linspace(0.1, 0.2, 3),
max_tasks=100,
search_params=dict(
required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
diversify_tier='ball',
),
version='2',
)
def build_task(C, bar_y, bottom_jar_scale, bottom_jar_x, right_diag_angle,
bar_offset):
scene_width = C.scene.width
scene_height = C.scene.height
if bar_y >= 0.6 and bottom_jar_x > 0.4:
# Hard tasks.
raise creator_lib.SkipTemplateParams
# Add jar on ground.
jar = C.add(
'dynamic jar',
scale=bottom_jar_scale,
center_x=scene_width * bottom_jar_x,
bottom=0.)
ball_in_jar = C.add(
'dynamic ball',
scale=0.05 + bottom_jar_scale / 8,
center_x=jar.center_x,
bottom=10)
# Add top bar.
bar = C.add(
'static bar',
scale=1.0,
left=jar.left + bar_offset * C.scene.width,
bottom=scene_height * bar_y)
# Add jar on top of bar.
cover = C.add(
'dynamic jar', scale=0.12, angle=180.0, left=bar.left, bottom=bar.top)
ball = C.add(
'dynamic ball',
scale=0.05,
center_x=cover.left + cover.width * 0.5,
bottom=bar.top)
if bar.left < 3 * ball.width:
raise creator_lib.SkipTemplateParams
C.add(
'static bar',
scale=1,
angle=right_diag_angle,
bottom=bar.bottom,
left=max(0.7 * C.scene.width, cover.right + 10))
# create assignment:
C.update_task(
body1=ball,
body2=ball_in_jar,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.BALL)
|
tests/integration/cattletest/core/test_machine_driver.py | lifecontrol/cattle | 482 | 11176683 | <reponame>lifecontrol/cattle
from common_fixtures import * # NOQA
def test_machine_driver_name(admin_user_client):
n = random_str().replace('-', 'F')
url = 'http://foo/bar/docker-machine-driver-{}-v1.0-tar.gz'.format(n)
md = admin_user_client.create_machine_driver(url=url)
assert md.name == n
|
challenges/TAINTEDLOVE/poller/for-release/machine.py | pingjuiliao/cb-multios | 473 | 11176703 | #!/usr/bin/env python
#
# Copyright (C) 2014
# <NAME> <<EMAIL>>
# Narf Industries <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
from random import choice, randint
import string
import struct
import os
# NOTE: this is super inefficient, don't care.
def random_bytes(min, max, mod=255):
return ''.join(chr(choice(range(mod))) for _ in range(randint(min,max)))
def round_up_to_align(sz_alloc):
SZ_PAGE = 0x1000
return (sz_alloc + SZ_PAGE-1) & ~(SZ_PAGE-1)
class TaintedLove(Actions):
GLOBAL_DEBUG = False
GLOBAL_POV = False
MAX_RX_BYTES = 1400 # 65535 is just unwieldy in testing...
def entanglement_razzmatazz(self):
"""
This calculation implements the challenge #1 from README.md:
Ignore / Throw Out Most Input
"""
DEBUG = self.GLOBAL_DEBUG and True
POV = self.GLOBAL_POV and True
if DEBUG:
print "entanglement_razzmatazz()"
ID = "\x01"
MIN_RX_BYTES = 1+1337
GATE_MAGIC = 0xCAFEBABE
# Half of the time, we will satisfy the gate. For these times, the
# requested mapping size must be aligned so we don't trigger the vuln.
if POV:
gate_satisfy = 1
else:
gate_satisfy = self.chance(0.5)
if gate_satisfy:
gate = GATE_MAGIC
else:
while True:
gate = randint(0, 0xFFFFFFFF)
if GATE_MAGIC != gate:
break
GATE = struct.pack("<L", gate)
# Pick a random-sized allocation.
# NOTE: We don't test 0-sized allocs. A zero-sized allocation request
# will return EINVAL and terminate the calculation anyway.
sz_alloc = randint(1, 0x2000) # Play nice for testing
if gate_satisfy:
sz_alloc = round_up_to_align(sz_alloc)
if POV:
sz_alloc -= 1
if DEBUG:
print "sz_alloc (after rounding): %d" % sz_alloc
ALLOC = struct.pack("<H", sz_alloc)
# Pick a random 2-byte OOB loop initializer.
# This only comes into play if we satisfy the gate.
oob = randint(0, 0xFFFF)
OOB = struct.pack("<H", oob)
# Construct input.
buf = [ID] # next index: 1
buf += random_bytes(200-1, 200-1) # next index: 200
buf += GATE # size_t GATE_OFF = 200; (4 bytes); next index: 204
buf += random_bytes(100-4, 100-4) # next index: 300
buf += ALLOC # size_t ALLOC_OFF = 300; (2 bytes); next index: 302
buf += random_bytes(100-2, 100-2) # next index: 400
buf += OOB # size_t OOB_OFF = 400; (2 bytes); next index: 402
buf += random_bytes(MIN_RX_BYTES-len(buf), self.MAX_RX_BYTES-len(buf))
# Compute output.
output = "\x00" # Default if gate not satisfied.
if gate_satisfy:
mapping = {}
mapping[0] = ord(OOB[0])
mapping[1] = ord(OOB[1])
if DEBUG:
print "OOB inits: mapping[0] = 0x%02x, mapping[1] = 0x%02x" % \
(mapping[0], mapping[1])
for i in range(0, sz_alloc-2):
mapping[i+2] = (mapping[i+1] + mapping[i] + i) & 0xFF
if DEBUG:
print "OOB calc #%d: mapping[i+2] = 0x%02x" % \
(i, mapping[i+2])
output = struct.pack("B", mapping[sz_alloc-1])
# Send the request, check the answer.
self.write(buf)
self.read(length=1, expect=output)
def causality_poppycock(self):
"""
This calculation implements the challenge #2 from README.md:
Sanitize Input
"""
DEBUG = self.GLOBAL_DEBUG and True
POV = self.GLOBAL_POV and False
if DEBUG:
print "causality_poppycock()"
ID = "\x07"
MIN_RX_BYTES = 1+1337
SZ_VULN_BUF = 20
SKIP_PRIME = 13
GATE_PRIME = 37
MAGIC_OFF = SKIP_PRIME*GATE_PRIME
GATE_MAGIC_PASS = 0xAA
GATE_MAGIC_FAIL = 0xBB
TOTAL_BYTES = randint(MIN_RX_BYTES, self.MAX_RX_BYTES)
# Half of the time, we will satisfy the gate. We use this opportunity to
# directly overwrite a byte in the output, deterring cheeseball defense.
# NOTE: to make PoV, this must be true
if POV:
gate_satisfy = 1
else:
gate_satisfy = self.chance(0.5)
if DEBUG:
print "gate_satisfy: %d" % gate_satisfy
if gate_satisfy:
GATE_MAGIC = GATE_MAGIC_PASS
else:
GATE_MAGIC = GATE_MAGIC_FAIL
# First, we generate a candidate random buffer, then we fix up
# according to whether we want to satisfy the gate.
# We keep all the multiples of SKIP_PRIME modulo SZ_VULN_BUF so that
# behavior is identical to expected PATCHED version (the RELEASE
# version will not "sanitize" these modulos).
buf = [ID]
for _ in range(0, TOTAL_BYTES-SKIP_PRIME, SKIP_PRIME):
buf += random_bytes(SKIP_PRIME-1, SKIP_PRIME-1)
buf += random_bytes(1, 1, SZ_VULN_BUF-1)
if DEBUG:
print "poppycock | TOTAL_BYTES = %d, len(buf) = %d, TOTAL_BYTES-len(buf) = %d" % \
(TOTAL_BYTES, len(buf), TOTAL_BYTES-len(buf))
buf += random_bytes(TOTAL_BYTES-len(buf), TOTAL_BYTES-len(buf))
if POV:
buf[SKIP_PRIME*GATE_PRIME] = chr(255)
# Solve for gate FIXUP.
GATE_CURR = 0
for i in range(0, TOTAL_BYTES, GATE_PRIME):
if POV and i == SKIP_PRIME*GATE_PRIME:
GATE_CURR = (GATE_CURR + (ord(buf[i]))) & 0xFF
else:
GATE_CURR = (GATE_CURR + (ord(buf[i]) % SZ_VULN_BUF)) & 0xFF
if DEBUG:
print "poppycock | ord(buf[%04d]) = 0x%02x, ord(buf[%04d]) %% SZ_VULN_BUF = 0x%02x" % (i, ord(buf[i]), i, ord(buf[i]) % SZ_VULN_BUF)
print "poppycock | #%08d: after adding buf[%04d] = 0x%02x, GATE_CURR = 0x%02x" % (i, i, ord(buf[i]), GATE_CURR)
FIXUP = (GATE_MAGIC - GATE_CURR) & 0xFF
if DEBUG:
print "poppycock | final GATE_CURR = 0x%02x, would need to add 0x%02x (%d) of FIXUP to match 0x%02x" % (GATE_CURR, FIXUP, FIXUP, GATE_MAGIC)
# Apply FIXUP. We need to spread the fixup over several GATE_PRIME
# offsets because a proper patch will mod them SZ_VULN_BUF.
# There is a (slim) possibility, however, that there are insufficient
# deltas to apply the FIXUP. In this case, we take the nuclear option:
# 1) zero out of all of the GATE_PRIME multiples
# 2) try again
# Yes, this is very messy.
i = GATE_PRIME
ORIG_FIXUP = FIXUP
while (FIXUP):
# This happens if there's insufficient delta to apply FIXUP
if i > len(buf)-1:
# Go nuclear
if DEBUG:
print "poppycock | deltas insufficient for FIXUP; nuclear option..."
buf[0] = ID
for j in range(GATE_PRIME, TOTAL_BYTES-GATE_PRIME, GATE_PRIME):
buf[j] = chr(0)
# Try again
i = GATE_PRIME
FIXUP = ORIG_FIXUP
continue
if DEBUG:
print "poppycock | applying FIXUP; i = %d" % i
curr = ord(buf[i]) % SZ_VULN_BUF
delta = min((SZ_VULN_BUF-1) - curr, FIXUP)
if DEBUG:
print "poppycock | adding delta = %d to buf[%04d]" % (delta, i)
buf[i] = chr(curr + delta)
FIXUP -= delta
i += GATE_PRIME
# Compute vuln_buf[]
vuln_buf = [0] * SZ_VULN_BUF
for i in range(1, TOTAL_BYTES):
tmp1 = ord(buf[i]) % SZ_VULN_BUF
tmp2 = ( tmp1 ^ vuln_buf[ (tmp1+1) % SZ_VULN_BUF ] ) % SZ_VULN_BUF
# if DEBUG:
# print "tmp2 = ( tmp1{0x%02x} ^ vuln_buf[ (tmp1+1) %% SZ_VULN_BUF ]{0x%02x} ) %% SZ_VULN_BUF" % (tmp1, vuln_buf[ (tmp2+1) % SZ_VULN_BUF ])
# print "poppycock | #%08d: vuln_buf[%02d] = 0x%02x" % (i, tmp1, tmp2)
vuln_buf[tmp1] = tmp2
if gate_satisfy:
# If we satisfy the gate, we have one final vuln_buf write (the
# vulnerable one). This will be reflected in the output.
vuln_buf[ord(buf[MAGIC_OFF]) % SZ_VULN_BUF] = 0xAA
output = 0
for i in range(0, SZ_VULN_BUF-3, 4):
output += vuln_buf[i+0] << 0 | \
vuln_buf[i+1] << 8 | \
vuln_buf[i+2] << 16 | \
vuln_buf[i+3] << 24
if DEBUG:
print "poppycock | #%08d: output = 0x%08x" % (i, output)
if DEBUG:
print "poppycock | final output: 0x%08x" % output
self.write(buf)
self.read(length=4, expect=struct.pack("<L", output))
def heisenberg_hooey(self):
"""
This calculation implements the challenge #3 from README.md:
Use "Unusual" Means to Terminate or Propagate Taint
From the poller perspective, this is a relatively simple function.
Much of the input is thrown out, but in ways that may trip up a CRS.
Really all we care about are the PROPGATE items.
The CONSTIFY items are irrelevant.
"""
DEBUG = self.GLOBAL_DEBUG and True
POV = self.GLOBAL_POV and True
if DEBUG:
print "heisenberg_hooey()"
ID = "\x03"
MIN_RX_BYTES = 1+1337
if POV:
gate_satisfy = 1
else:
gate_satisfy = self.chance(0.5)
if DEBUG:
print "gate_satisfy: %d" % gate_satisfy
buf = [ID]
buf += random_bytes(MIN_RX_BYTES-len(buf), self.MAX_RX_BYTES-len(buf))
# The following are interesting offsets; the others are ignored:
# size_t SYSCALL_PROP_OFF = 1+600;
# - vuln: 0xDE after arithmetic
# - arith: val = val*2 + 1
# - vuln: 0x6F before arithmetic
SYSCALL_PROP_OFF = 1+600
# size_t FLOAT_PROP_OFF = 1+400;
# - vuln: 0xAD after arithmetic.
# - arith: val = val*2
# - vuln: 0x56 before arithmetic
FLOAT_PROP_OFF = 1+400
# size_t BE_OFF = 1+800;
# gate: must be 0xBE
BE_OFF = 1+800
# size_t EF_OFF = 1+900;
# gate: must be 0xEF
EF_OFF = 1+900
# Another offset that factors into the output:
# size_t VULN_OFF = 1+1000;
# - factors into output
VULN_OFF = 1+1000
# If we pass the gate, we will do an assignment.
# If we pass the 'vuln', this assignment will cause a crash.
if gate_satisfy:
buf[BE_OFF] = "\xBE"
buf[EF_OFF] = "\xEF"
else:
buf[BE_OFF] = "\x42"
buf[EF_OFF] = "\x42"
if POV:
buf[SYSCALL_PROP_OFF] = "\x56"
buf[FLOAT_PROP_OFF] = "\x6F"
buf[BE_OFF] = "\xBE"
buf[EF_OFF] = "\xEF"
else:
# There's a (very small) possibility (2^32) we tripped the gate and
# will exercise vuln.
# We need to negate this possibility in the poller.
# if 0x56 == ord(buf[SYSCALL_PROP_OFF]) and \
# 0x6F == ord(buf[FLOAT_PROP_OFF]) and \
if 0xBE == ord(buf[BE_OFF]) and \
0xEF == ord(buf[EF_OFF]):
buf[SYSCALL_PROP_OFF] = "\xAA"
# If we tripped the gate, we must do the assignment:
if 0xBE == ord(buf[BE_OFF]) and \
0xEF == ord(buf[EF_OFF]):
buf[VULN_OFF] = "\x42"
self.write(buf)
# Apply transformations to offsets as CB does:
buf[SYSCALL_PROP_OFF] = chr((ord(buf[SYSCALL_PROP_OFF]) * 2 + 1) & 0xFF)
buf[FLOAT_PROP_OFF] = chr(((ord(buf[FLOAT_PROP_OFF]) * 2)) & 0xFF)
# Calculate anticipated output.
output = ( ord(buf[FLOAT_PROP_OFF]) ^ \
(ord(buf[SYSCALL_PROP_OFF]) + ord(buf[VULN_OFF])) \
) & 0xFFFFFFFF
if DEBUG:
print "buf[SYSCALL_PROP_OFF] = 0x%02x, buf[FLOAT_PROP_OFF] = 0x%02x, buf[VULN_OFF] = 0x%02x, output = 0x%08x" % \
( ord(buf[SYSCALL_PROP_OFF]), \
ord(buf[FLOAT_PROP_OFF]), \
ord(buf[VULN_OFF]), \
output)
# If we pass the gate, it sends extra data:
if gate_satisfy:
self.read(length=4, expect=struct.pack("<L", 0xDEADBEEF))
self.read(length=4, expect=struct.pack("<L", output))
def relativistic_jabberwock(self):
"""
This calculation implements the challenge #4 from README.md:
Symbolic Memory Accesses
"""
DEBUG = self.GLOBAL_DEBUG and True
POV = self.GLOBAL_POV and True
if DEBUG:
print "relativistic_jabberwock()"
ID = "\x04"
MIN_RX_BYTES = 1+1337
if POV:
gate_satisfy = 1
else:
gate_satisfy = self.chance(0.5)
if DEBUG:
print "gate_satisfy: %d" % gate_satisfy
# First, we get a random buffer, then we fix it up.
buf = [ID]
buf += random_bytes(MIN_RX_BYTES-len(buf), self.MAX_RX_BYTES-len(buf))
# Relevant offsets:
SYM_PROP_PTR_1 = 1+100 # size_t SYM_PROP_PTR_1 = 1+100;
SYM_PROP_PTR_2 = 1+200 # size_t SYM_PROP_PTR_2 = 1+200;
SYM_PROP_VAL = 1+70 # size_t SYM_PROP_VAL = 1+70;
SYM_CONST_PTR_1 = 1+50 # size_t SYM_CONST_PTR_1 = 1+50;
SYM_CONST_PTR_2 = 1+60 # size_t SYM_CONST_PTR_2 = 1+60;
VULN_OFF = 1+400 # size_t VULN_OFF = 1+400;
GATE_MAGIC_OFF = 1+10
# In order to satisfy the gate, we need to:
# 1) propagate to rx_buf[SYM_PROP_PTR_1] & point to GATE_MAGIC
# 2) re-constify at rx_buf[rx_buf[SYM_CONST_PTR_1]]
if gate_satisfy:
# 1) propagate to rx_buf[SYM_PROP_PTR_1] & point to GATE_MAGIC
buf[SYM_PROP_PTR_1] = chr(1+40) # unused
buf[SYM_PROP_PTR_2] = chr(SYM_PROP_PTR_1)
# write GATE_MAGIC_OFF into buf[buf[SYM_PROP_PTR_1]]
buf[SYM_PROP_VAL] = chr(GATE_MAGIC_OFF)
# writing backing GATE_MAGIC values
buf[GATE_MAGIC_OFF+0] = "\xd5"
buf[GATE_MAGIC_OFF+1] = "\x00"
buf[GATE_MAGIC_OFF+2] = "\x00"
buf[GATE_MAGIC_OFF+3] = "\xd0"
############################################################
# 2) re-constify at rx_buf[rx_buf[SYM_CONST_PTR_1]]
# - rx_buf[rx_buf[SYM_CONST_PTR_1]] is constified to 0
# - rx_buf[rx_buf[SYM_CONST_PTR_2]] is constified to 42
# By having them alias the same memory, we satisfy.
buf[SYM_CONST_PTR_1] = chr(1+20)
buf[SYM_CONST_PTR_2] = chr(1+20)
# Finally, we gotta make sure we do/don't trip the vuln.
if POV:
buf[VULN_OFF] = chr(255)
else:
buf[VULN_OFF] = chr(1)
# NOTE: we never try making this 0... let's cut them some slack.
else:
# Make sure #1 doesn't happen by just pointing them both to some
# place that isn't used. Throw CRSs a bone.
buf[SYM_PROP_PTR_1] = chr(1+30)
buf[SYM_PROP_PTR_2] = chr(1+31)
# Make sure #2 doesn't happen.
buf[SYM_CONST_PTR_1] = chr(1+32)
# Write out our buffer.
self.write(buf)
# Apply same transformations as CB.
buf[SYM_PROP_PTR_1] = chr(0);
buf[ord(buf[SYM_PROP_PTR_1])+0] = chr(0);
buf[ord(buf[SYM_PROP_PTR_1])+1] = chr(0);
buf[ord(buf[SYM_PROP_PTR_1])+2] = chr(0);
buf[ord(buf[SYM_PROP_PTR_1])+3] = chr(0);
buf[ord(buf[SYM_PROP_PTR_2])] = buf[SYM_PROP_VAL];
buf[ord(buf[SYM_CONST_PTR_1])] = chr(0);
buf[ord(buf[SYM_CONST_PTR_2])] = chr(0x42);
if gate_satisfy:
buf[1000] = chr(0x42);
# Calculate expected output
if gate_satisfy:
# NOTE: the first item will always be index 1000 because we don't
# want to trip vuln.
output = ( ord(buf[1000]) << 0 | \
ord(buf[SYM_CONST_PTR_1]) << 8 | \
ord(buf[ord(buf[SYM_CONST_PTR_1])]) << 16 | \
ord(buf[SYM_PROP_VAL]) << 24) & 0xFFFFFFFF
else:
output = ~(( 0 << 0 | \
ord(buf[SYM_CONST_PTR_1]) << 8 | \
ord(buf[ord(buf[SYM_CONST_PTR_1])]) << 16 | \
ord(buf[SYM_PROP_VAL]) << 24) & 0xFFFFFFFF) & 0xFFFFFFFF
self.read(length=4, expect=struct.pack("<L", output))
def exit(self):
"""
Invoke exit functionality, terminating test."
"""
DEBUG = True and self.GLOBAL_DEBUG
if DEBUG:
print "exit()"
ID = ord("\x00")
EXIT_CODE = randint(1, 255)
self.write(struct.pack("BB", ID, EXIT_CODE))
self.read(length=1, expect=struct.pack("B", EXIT_CODE))
# This node is terminal
return -1
def start(self):
"""
Intialize state; happens once, then goes to dispatch.
"""
pass
|
tools/sts-job-manager/constants/status.py | ruchirjain86/professional-services | 2,116 | 11176719 | <reponame>ruchirjain86/professional-services
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class STATUS():
DONE = 'done'
RUNNING = 'running'
WAITING = 'waiting'
PAUSED = 'paused'
ERROR = 'error'
KNOWN_STATUSES = frozenset([
STATUS.DONE,
STATUS.RUNNING,
STATUS.WAITING,
STATUS.PAUSED,
STATUS.ERROR
])
def sts_operation_status_to_table_status(s: str):
switch = {
'IN_PROGRESS': STATUS.RUNNING,
'PAUSED': STATUS.PAUSED,
'SUCCESS': STATUS.DONE,
'FAILED': STATUS.ERROR,
'ABORTED': STATUS.WAITING
}
status = switch.get(s.upper())
if not status:
raise Exception('Unknown status', s)
return status
|
leetcode/4.median-of-two-sorted-arrays.py | geemaple/algorithm | 177 | 11176778 | #
# @lc app=leetcode id=4 lang=python
#
# [4] Median of Two Sorted Arrays
#
# https://leetcode.com/problems/median-of-two-sorted-arrays/description/
#
# algorithms
# Hard (28.77%)
# Total Accepted: 644.1K
# Total Submissions: 2.2M
# Testcase Example: '[1,3]\n[2]'
#
# There are two sorted arrays nums1 and nums2 of size m and n respectively.
#
# Find the median of the two sorted arrays. The overall run time complexity
# should be O(log (m+n)).
#
# You may assume nums1 and nums2 cannot be both empty.
#
# Example 1:
#
#
# nums1 = [1, 3]
# nums2 = [2]
#
# The median is 2.0
#
#
# Example 2:
#
#
# nums1 = [1, 2]
# nums2 = [3, 4]
#
# The median is (2 + 3)/2 = 2.5
#
#
#
# log(min(M, N))
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
return self.findMedianSortedArrays(nums2, nums1)
m = len(nums1)
n = len(nums2)
start = 0
end = m
while (start <= end):
mid = start + (end - start) // 2
leftNums1 = nums1[mid - 1] if mid >= 1 else float('-inf')
rightNums1 = nums1[mid] if mid < len(nums1) else float('inf')
anchor = (m + n) // 2 - mid
leftNums2 = nums2[anchor - 1] if anchor >= 1 else float('-inf')
rightNums2 = nums2[anchor] if anchor < len(nums2) else float('inf')
if leftNums1 <= rightNums2 and leftNums2 <= rightNums1:
if (m + n) % 2 == 1:
return min(rightNums1, rightNums2)
else:
return (max(leftNums1, leftNums2) + min(rightNums1, rightNums2)) / 2.0
if leftNums1 > rightNums2:
end = mid - 1
if leftNums2 > rightNums1:
start = mid + 1
class Solution2(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
count = len(nums1) + len(nums2)
if (count % 2 == 1):
return self.find_kth_num(nums1, 0, nums2, 0, (count + 1) // 2)
else:
mid = count // 2
return (self.find_kth_num(nums1, 0, nums2, 0, mid) + self.find_kth_num(nums1, 0, nums2, 0, mid + 1)) / 2.0
def find_kth_num(self, nums1, start1, nums2, start2, k):
if start1 >= len(nums1):
return nums2[start2 + k - 1]
if start2 >= len(nums2):
return nums1[start1 + k - 1]
if k == 1:
return min(nums1[start1], nums2[start2])
half = k // 2
left_half = nums1[start1 + half - 1] if start1 + half - 1 < len(nums1) else float('inf')
right_half = nums2[start2 + half - 1] if start2 + half - 1 < len(nums2) else float('inf')
if left_half <= right_half:
return self.find_kth_num(nums1, start1 + half, nums2, start2, k - half)
else:
return self.find_kth_num(nums1, start1, nums2, start2 + half, k - half)
|
pulsar/cmds/__init__.py | PyCN/pulsar | 1,410 | 11176827 | """Useful distutils commands for continuous integration and deployment
These commands works in python 2 too
"""
from .test import Bench, Test
from .linux_wheels import ManyLinux
from .pypi_version import PyPi
from .s3data import S3Data
__all__ = [
'Bench',
'Test',
'ManyLinux',
'PyPi',
'S3Data'
]
|
tests/file/test_handle.py | Ross1503/azure-storage-python | 348 | 11176834 | <gh_stars>100-1000
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
from azure.storage.file import (
FileService,
)
from tests.testcase import (
StorageTestCase,
record,
TestMode,
)
# ------------------------------------------------------------------------------
TEST_SHARE_NAME = 'test'
# ------------------------------------------------------------------------------
class StorageHandleTest(StorageTestCase):
def setUp(self):
super(StorageHandleTest, self).setUp()
self.fs = self._create_storage_service(FileService, self.settings)
def tearDown(self):
return super(StorageHandleTest, self).tearDown()
def _validate_handles(self, handles):
# Assert
self.assertIsNotNone(handles)
self.assertGreaterEqual(len(handles), 1)
self.assertIsNotNone(handles[0])
# verify basic fields
# path may or may not be present
# last_connect_time_string has been missing in the test
self.assertIsNotNone(handles[0].handle_id)
self.assertIsNotNone(handles[0].file_id)
self.assertIsNotNone(handles[0].parent_id)
self.assertIsNotNone(handles[0].session_id)
self.assertIsNotNone(handles[0].client_ip)
self.assertIsNotNone(handles[0].open_time)
@record
def test_list_handles_on_share(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Act
handles = list(self.fs.list_handles(TEST_SHARE_NAME, recursive=True))
# Assert
self._validate_handles(handles)
#
@record
def test_list_handles_on_share_snapshot(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Act
handles = list(self.fs.list_handles(TEST_SHARE_NAME, recursive=True,
snapshot="2019-05-08T23:27:24.0000000Z"))
# Assert
self._validate_handles(handles)
@record
def test_list_handles_with_marker(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Act
handle_generator = self.fs.list_handles(TEST_SHARE_NAME, recursive=True, max_results=1)
# Assert
self.assertIsNotNone(handle_generator.next_marker)
handles = list(handle_generator)
self._validate_handles(handles)
# Note down a handle that we saw
old_handle = handles[0]
# Continue listing
remaining_handles = list(
self.fs.list_handles(TEST_SHARE_NAME, recursive=True, marker=handle_generator.next_marker))
self._validate_handles(handles)
# Make sure the old handle did not appear
# In other words, the marker worked
old_handle_not_present = all([old_handle.handle_id != handle.handle_id for handle in remaining_handles])
self.assertTrue(old_handle_not_present)
@record
def test_list_handles_on_directory(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Act
handles = list(self.fs.list_handles(TEST_SHARE_NAME, directory_name='wut', recursive=True))
# Assert
self._validate_handles(handles)
# Act
handles = list(self.fs.list_handles(TEST_SHARE_NAME, directory_name='wut', recursive=False))
# Assert recursive option is functioning when disabled
self.assertTrue(len(handles) == 0)
@record
def test_list_handles_on_file(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Act
handles = list(self.fs.list_handles(TEST_SHARE_NAME, directory_name='wut', file_name='bla.txt'))
# Assert
self._validate_handles(handles)
@record
def test_close_single_handle(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Arrange
handles = list(self.fs.list_handles(TEST_SHARE_NAME, recursive=True))
self._validate_handles(handles)
handle_id = handles[0].handle_id
# Act
num_closed = list(self.fs.close_handles(TEST_SHARE_NAME, handle_id=handle_id))
# Assert 1 handle has been closed
self.assertEqual(1, num_closed[0])
@record
def test_close_all_handle(self):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if not TestMode.need_recording_file(self.test_mode):
return
# Arrange
handles = list(self.fs.list_handles(TEST_SHARE_NAME, recursive=True))
self._validate_handles(handles)
# Act
total_num_handle_closed = 0
for num_closed in self.fs.close_handles(TEST_SHARE_NAME, handle_id="*", recursive=True):
total_num_handle_closed += num_closed
# Assert at least 1 handle has been closed
self.assertTrue(total_num_handle_closed > 1)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
src/genie/libs/parser/iosxe/tests/ShowCtsRoleBasedCounters/cli/equal/golden_output1_expected.py | balmasea/genieparser | 204 | 11176842 | <filename>src/genie/libs/parser/iosxe/tests/ShowCtsRoleBasedCounters/cli/equal/golden_output1_expected.py
expected_output = {
"cts_rb_count": {
1: {
"src_group": "*",
"dst_group": "*",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 2,
"hw_permit_count": 30802626587,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
2: {
"src_group": "2",
"dst_group": "0",
"sw_denied_count": 0,
"hw_denied_count": 4794060,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
3: {
"src_group": "7",
"dst_group": "0",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
4: {
"src_group": "99",
"dst_group": "0",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
5: {
"src_group": "100",
"dst_group": "0",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
6: {
"src_group": "103",
"dst_group": "0",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
7: {
"src_group": "104",
"dst_group": "0",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
8: {
"src_group": "2",
"dst_group": "2",
"sw_denied_count": 0,
"hw_denied_count": 4,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
9: {
"src_group": "7",
"dst_group": "2",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
10: {
"src_group": "99",
"dst_group": "2",
"sw_denied_count": 0,
"hw_denied_count": 0,
"sw_permit_count": 0,
"hw_permit_count": 0,
"sw_monitor_count": 0,
"hw_monitor_count": 0,
},
}
}
|
sentence-encoding/src/sentence_encoder.py | dumpmemory/serverless-transformers-on-aws-lambda | 103 | 11176843 | import warnings
warnings.filterwarnings("ignore")
from functools import lru_cache
from sentence_transformers import SentenceTransformer
from src import config, utils
logger = utils.create_logger(project_name=config.PREDICTION_TYPE, level="INFO")
class SentenceEncoder:
def __init__(self):
_ = self.get_sent_encoder(model_name=config.DEFAULT_MODEL_NAME) #warm up
@staticmethod
@lru_cache(maxsize=config.CACHE_MAXSIZE)
def get_sent_encoder(model_name: str) -> SentenceTransformer:
"""loads and returns a SentenceTransformer model specified by model_name argument
Args:
model_name (str): Indicating the name of the model
Returns:
SentenceTransformer model
"""
logger.info(f"Loading model: {model_name}")
model = SentenceTransformer(model_name)
return model
def get_clean_text(self, text: str) -> str:
"""Clean the text
Args:
text (str): text
Returns:
str: clean text
"""
return text.strip().lower()
def __call__(self, request: dict)-> dict:
""" embeddings of the given list of sentences
Args:
request (dict): request containing the list of snetences for encoding
Returns:
dict: list of embeddings for each sentence embedding dimension = (384,)
"""
texts = [self.get_clean_text(text) for text in request["texts"]]
logger.info(f"Generating embeddings for {len(texts)} sentences")
model_name = request.get('model_name', config.DEFAULT_MODEL_NAME)
sentence_encoder = self.get_sent_encoder(model_name)
embeddings = sentence_encoder.encode(texts)
return {
"vectors": embeddings
}
|
vlogging/tests/basic.py | emrahyldrm/visual-logging | 108 | 11176849 | import unittest
import sys
if sys.path[0].endswith("dummies"):
sys.path = sys.path[1:]
import vlogging
class BasicTestCase(unittest.TestCase):
def test_nothing(self):
s = str(vlogging.VisualRecord())
self.assertTrue("<hr/>" in s)
def test_text_only(self):
s = str(vlogging.VisualRecord(title="title", footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
def test_all_renderers(self):
self.assertEqual(len(vlogging.renderers), 3)
def test_invalid_images(self):
s = str(vlogging.VisualRecord(
title="title",
imgs="foobar",
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 0)
s = str(vlogging.VisualRecord(
title="title",
imgs=["foobar", 1, 2, dict()],
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 0)
def test_pil(self):
from PIL import Image
pil_image = Image.open('vlogging/tests/lenna.jpg')
s = str(vlogging.VisualRecord(
title="title",
imgs=pil_image,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertTrue("image/png" in s)
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[pil_image],
footnotes="footnotes"))
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[pil_image, pil_image],
footnotes="footnotes",
fmt="jpeg"))
self.assertTrue("image/jpeg" in s)
self.assertEqual(s.count("<img"), 2)
def test_opencv(self):
import cv2
cv_image = cv2.imread('vlogging/tests/lenna.jpg')
s = str(vlogging.VisualRecord(
title="title",
imgs=cv_image,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[cv_image],
footnotes="footnotes"))
self.assertEqual(s.count("<img"), 1)
s = str(vlogging.VisualRecord(
title="title",
imgs=[cv_image, cv_image],
footnotes="footnotes"))
self.assertEqual(s.count("<img"), 2)
def test_pylab_basic(self):
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0., 5., 0.2)
plt.plot(t, t, 'r--', t, t ** 2, 'bs', t, t ** 3, 'g^')
s = str(vlogging.VisualRecord(
title="title",
imgs=plt,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 1)
def test_pylab_figure(self):
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0., 5., 0.2)
fig = plt.figure()
plt.plot(t, t, 'r--', t, t ** 2, 'bs', t, t ** 3, 'g^')
s = str(vlogging.VisualRecord(
title="title",
imgs=fig,
footnotes="footnotes"))
self.assertTrue("title" in s)
self.assertTrue("footnotes" in s)
self.assertTrue("<pre>" in s)
self.assertEqual(s.count("<img"), 1)
|
test/test_ig_interface.py | stungkit/TradingBot | 218 | 11176850 | <reponame>stungkit/TradingBot
import pytest
import toml
from common.MockRequests import (
ig_request_account_details,
ig_request_confirm_trade,
ig_request_login,
ig_request_market_info,
ig_request_navigate_market,
ig_request_open_positions,
ig_request_prices,
ig_request_search_market,
ig_request_set_account,
ig_request_trade,
ig_request_watchlist,
)
from tradingbot.components import Configuration, Interval, TradeDirection
from tradingbot.components.broker import IGInterface, InterfaceNames
from tradingbot.interfaces import Market, MarketHistory, Position
@pytest.fixture
def config():
with open("test/test_data/trading_bot.toml", "r") as f:
config = toml.load(f)
# Inject ig_interface as active interface in the config file
config["stocks_interface"]["active"] = InterfaceNames.IG_INDEX.value
config["account_interface"]["active"] = InterfaceNames.IG_INDEX.value
return Configuration(config)
@pytest.fixture
def ig(requests_mock, config):
"""
Returns a instance of IGInterface
"""
ig_request_login(requests_mock)
ig_request_set_account(requests_mock)
return IGInterface(config)
# No need to use requests_mock as "ig" fixture already does that
def test_authenticate(ig):
# Call function to test
result = ig.authenticate()
# Assert results
assert ig.authenticated_headers["CST"] == "mock"
assert ig.authenticated_headers["X-SECURITY-TOKEN"] == "mock"
assert result is True
def test_authenticate_fail(requests_mock, ig):
ig_request_login(requests_mock, fail=True)
ig_request_set_account(requests_mock, fail=True)
# Call function to test
result = ig.authenticate()
# Assert results
assert result is False
# No need to use requests_mock fixture
def test_set_default_account(ig):
result = ig.set_default_account("mock")
assert result is True
def test_set_default_account_fail(requests_mock, ig):
ig_request_set_account(requests_mock, fail=True)
result = ig.set_default_account("mock")
assert result is False
def test_get_account_balances(requests_mock, ig):
ig_request_account_details(requests_mock)
balance, deposit = ig.get_account_balances()
assert balance is not None
assert deposit is not None
assert balance == 16093.12
assert deposit == 10000.0
def test_get_account_balances_fail(requests_mock, ig):
ig_request_account_details(requests_mock, fail=True)
with pytest.raises(RuntimeError):
balance, deposit = ig.get_account_balances()
def test_get_open_positions(ig, requests_mock):
ig_request_open_positions(requests_mock)
positions = ig.get_open_positions()
assert positions is not None
assert isinstance(positions, list)
assert len(positions) > 0
assert isinstance(positions[0], Position)
def test_get_open_positions_fail(ig, requests_mock):
ig_request_open_positions(requests_mock, fail=True)
with pytest.raises(RuntimeError):
_ = ig.get_open_positions()
def test_get_market_info(ig, requests_mock):
ig_request_market_info(requests_mock)
info = ig.get_market_info("mock")
assert info is not None
assert isinstance(info, Market)
def test_get_market_info_fail(ig, requests_mock):
ig_request_market_info(requests_mock, fail=True)
with pytest.raises(RuntimeError):
_ = ig.get_market_info("mock")
def test_search_market(ig, requests_mock):
ig_request_market_info(requests_mock)
ig_request_search_market(requests_mock)
markets = ig.search_market("mock")
assert markets is not None
assert isinstance(markets, list)
assert len(markets) == 8
assert isinstance(markets[0], Market)
def test_search_market_fail(ig, requests_mock):
ig_request_search_market(requests_mock, fail=True)
with pytest.raises(RuntimeError):
_ = ig.search_market("mock")
def test_get_prices(ig, requests_mock):
ig_request_market_info(requests_mock)
ig_request_prices(requests_mock)
p = ig.get_prices(ig.get_market_info("mock"), Interval.HOUR, 10)
assert p is not None
assert isinstance(p, MarketHistory)
assert len(p.dataframe) > 0
def test_get_prices_fail(ig, requests_mock):
ig_request_market_info(requests_mock)
ig_request_prices(requests_mock, fail=True)
with pytest.raises(RuntimeError):
_ = ig.get_prices(ig.get_market_info("mock"), Interval.HOUR, 10)
def test_trade(ig, requests_mock):
ig_request_trade(requests_mock)
ig_request_confirm_trade(requests_mock)
result = ig.trade("mock", TradeDirection.BUY, 0, 0)
assert result
def test_trade_fail(ig, requests_mock):
ig_request_trade(requests_mock, fail=True)
ig_request_confirm_trade(requests_mock, fail=True)
result = ig.trade("mock", TradeDirection.BUY, 0, 0)
assert result is False
def test_confirm_order(ig, requests_mock):
ig_request_confirm_trade(requests_mock)
result = ig.confirm_order("123456789")
assert result
def test_confirm_order_fail(ig, requests_mock):
ig_request_confirm_trade(
requests_mock,
data={"dealId": "123456789", "dealStatus": "REJECTED", "reason": "FAIL"},
)
result = ig.confirm_order("123456789")
assert result is False
ig_request_confirm_trade(requests_mock, fail=True)
with pytest.raises(RuntimeError):
result = ig.confirm_order("123456789")
def test_close_position(ig, requests_mock):
ig_request_trade(requests_mock)
ig_request_confirm_trade(requests_mock)
pos = Position(
deal_id="123456789",
size=1,
create_date="mock",
direction=TradeDirection.BUY,
level=100,
limit=110,
stop=90,
currency="GBP",
epic="mock",
market_id=None,
)
result = ig.close_position(pos)
assert result
def test_close_position_fail(ig, requests_mock):
ig_request_trade(requests_mock, fail=True)
ig_request_confirm_trade(requests_mock, fail=True)
pos = Position(
deal_id="123456789",
size=1,
create_date="mock",
direction=TradeDirection.BUY,
level=100,
limit=110,
stop=90,
currency="GBP",
epic="mock",
market_id=None,
)
result = ig.close_position(pos)
assert result is False
def test_close_all_positions(ig, requests_mock):
ig_request_open_positions(requests_mock)
ig_request_trade(requests_mock)
ig_request_confirm_trade(requests_mock)
result = ig.close_all_positions()
assert result
def test_close_all_positions_fail(ig, requests_mock):
ig_request_open_positions(requests_mock)
ig_request_trade(requests_mock, fail=True)
ig_request_confirm_trade(requests_mock)
result = ig.close_all_positions()
assert result is False
ig_request_open_positions(requests_mock)
ig_request_trade(requests_mock)
ig_request_confirm_trade(
requests_mock,
data={"dealId": "123456789", "dealStatus": "FAIL", "reason": "FAIL"},
)
result = ig.close_all_positions()
assert result is False
def test_get_account_used_perc(ig, requests_mock):
ig_request_account_details(requests_mock)
perc = ig.get_account_used_perc()
assert perc is not None
assert perc == 62.138354775208285
def test_get_account_used_perc_fail(ig, requests_mock):
ig_request_account_details(requests_mock, fail=True)
with pytest.raises(RuntimeError):
_ = ig.get_account_used_perc()
def test_navigate_market_node_nodes(ig, requests_mock):
ig_request_navigate_market(requests_mock)
data = ig.navigate_market_node("")
assert "nodes" in data
assert len(data["nodes"]) == 3
assert data["nodes"][0]["id"] == "668394"
assert data["nodes"][0]["name"] == "Cryptocurrency"
assert data["nodes"][1]["id"] == "77976799"
assert data["nodes"][1]["name"] == "Options (Australia 200)"
assert data["nodes"][2]["id"] == "89291253"
assert data["nodes"][2]["name"] == "Options (US Tech 100)"
assert len(data["markets"]) == 0
ig_request_navigate_market(requests_mock, fail=True)
with pytest.raises(RuntimeError):
data = ig.navigate_market_node("")
def test_navigate_market_node_markets(ig, requests_mock):
ig_request_navigate_market(
requests_mock, data="mock_navigate_markets_markets.json", args="12345678"
)
data = ig.navigate_market_node("12345678")
assert "nodes" in data
assert len(data["nodes"]) == 0
assert "markets" in data
assert len(data["markets"]) == 3
assert data["markets"][0]["epic"] == "KC.D.AVLN8875P.DEC.IP"
assert data["markets"][1]["epic"] == "KC.D.AVLN8875P.MAR.IP"
assert data["markets"][2]["epic"] == "KC.D.AVLN8875P.JUN.IP"
def test_get_watchlist_markets(ig, requests_mock):
ig_request_market_info(requests_mock)
ig_request_watchlist(requests_mock, data="mock_watchlist_list.json")
ig_request_watchlist(requests_mock, args="12345678", data="mock_watchlist.json")
data = ig.get_markets_from_watchlist("My Watchlist")
assert isinstance(data, list)
assert len(data) == 3
assert isinstance(data[0], Market)
data = ig.get_markets_from_watchlist("wrong_name")
assert len(data) == 0
ig_request_watchlist(
requests_mock, args="12345678", data="mock_watchlist.json", fail=True
)
data = ig.get_markets_from_watchlist("wrong_name")
assert len(data) == 0
|
tests/repository/test_common.py | azadoks/aiida-core | 180 | 11176854 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""Tests for the :mod:`aiida.repository.common` module."""
import pytest
from aiida.repository import File, FileType
@pytest.fixture
def file_object() -> File:
"""Test fixture to create and return a ``File`` instance."""
name = 'relative'
file_type = FileType.DIRECTORY
key = None
objects = {'sub': File('sub', file_type=FileType.FILE, key='abcdef')}
return File(name, file_type, key, objects)
def test_constructor():
"""Test the constructor defaults."""
file_object = File()
assert file_object.name == ''
assert file_object.file_type == FileType.DIRECTORY
assert file_object.key is None
assert file_object.objects == {}
def test_constructor_kwargs(file_object: File):
"""Test the constructor specifying specific keyword arguments."""
name = 'relative'
file_type = FileType.DIRECTORY
key = None
objects = {'sub': File()}
file_object = File(name, file_type, key, objects)
assert file_object.name == name
assert file_object.file_type == file_type
assert file_object.key == key
assert file_object.objects == objects
name = 'relative'
file_type = FileType.FILE
key = 'abcdef'
objects = None
file_object = File(name, file_type, key, objects)
assert file_object.name == name
assert file_object.file_type == file_type
assert file_object.key == key
assert file_object.objects == {}
def test_constructor_kwargs_invalid():
"""Test the constructor specifying invalid keyword arguments."""
name = 'relative'
file_type = FileType.FILE
key = 'abcdef'
objects = {'sub': File()}
with pytest.raises(TypeError):
File(None, file_type, key, objects)
with pytest.raises(TypeError):
File(name, None, key, objects)
with pytest.raises(TypeError):
File(name, file_type, 123, objects)
with pytest.raises(ValueError, match=r'an object of type `FileType.FILE` cannot define any objects.'):
File(name, FileType.FILE, key, {})
with pytest.raises(ValueError, match=r'an object of type `FileType.DIRECTORY` cannot define a key.'):
File(name, FileType.DIRECTORY, key, {})
def test_serialize():
"""Test the ``File.serialize`` method."""
objects = {
'empty': File('empty', file_type=FileType.DIRECTORY),
'file.txt': File('file.txt', file_type=FileType.FILE, key='abcdef'),
}
file_object = File(file_type=FileType.DIRECTORY, objects=objects)
expected = {
'o': {
'empty': {},
'file.txt': {
'k': 'abcdef',
}
}
}
assert file_object.serialize() == expected
def test_serialize_roundtrip(file_object: File):
"""Test the serialization round trip."""
serialized = file_object.serialize()
reconstructed = File.from_serialized(serialized, file_object.name)
assert isinstance(reconstructed, File)
assert file_object == reconstructed
def test_eq():
"""Test the ``File.__eq__`` method."""
file_object = File()
# Identity operation
assert file_object == file_object # pylint: disable=comparison-with-itself
# Identical default copy
assert file_object == File()
# Identical copy with different arguments
assert File(name='custom', file_type=FileType.FILE) == File(name='custom', file_type=FileType.FILE)
# Identical copies with nested objects
assert File(objects={'sub': File()}) == File(objects={'sub': File()})
assert file_object != File(name='custom')
assert file_object != File(file_type=FileType.FILE)
assert file_object != File(key='123456', file_type=FileType.FILE)
assert file_object != File(objects={'sub': File()})
# Test ordering of nested files:
objects = {
'empty': File('empty', file_type=FileType.DIRECTORY),
'file.txt': File('file.txt', file_type=FileType.FILE, key='abcdef'),
}
file_object_a = File(file_type=FileType.DIRECTORY, objects=objects)
objects = {
'file.txt': File('file.txt', file_type=FileType.FILE, key='abcdef'),
'empty': File('empty', file_type=FileType.DIRECTORY),
}
file_object_b = File(file_type=FileType.DIRECTORY, objects=objects)
assert file_object_a == file_object_b
|
test/terra/backends/test_compatibility.py | garrison/qiskit-aer | 313 | 11176859 | <filename>test/terra/backends/test_compatibility.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tests if quantum info result compatibility classes.
These can be removed when this deprecation period is finished.
"""
import copy
from test.terra.common import QiskitAerTestCase
import numpy as np
import qiskit.quantum_info as qi
import qiskit.providers.aer.backends.compatibility as cqi
class TestResultCompatibility(QiskitAerTestCase):
"""Result compatiblity class tests."""
def test_statevector_eq(self):
orig = qi.random_statevector(4, seed=10)
compat = cqi.Statevector(orig.data)
self.assertEqual(compat, orig)
self.assertEqual(orig, compat)
def test_statevector_getattr(self):
compat = cqi.Statevector([1, 1e-10])
with self.assertWarns(DeprecationWarning):
value = compat.round(5)
self.assertEqual(type(value), np.ndarray)
self.assertTrue(np.all(value == np.array([1, 0])))
def test_statevector_copy(self):
compat = cqi.Statevector([1, 1e-10])
cpy = copy.copy(compat)
self.assertEqual(cpy, compat)
def test_statevector_linop(self):
orig = qi.random_statevector(4, seed=10)
compat = cqi.Statevector(orig.data)
self.assertEqual(2 * compat - orig, orig)
self.assertEqual(2 * orig - compat, orig)
def test_statevector_tensor(self):
orig = qi.random_statevector(2, seed=10)
compat = cqi.Statevector(orig.data)
target = orig.tensor(orig)
self.assertEqual(compat.tensor(orig), target)
self.assertEqual(orig.tensor(compat), target)
def test_statevector_evolve(self):
orig = qi.random_statevector(2, seed=10)
compat = cqi.Statevector(orig.data)
orig_op = qi.random_unitary(2, seed=10)
compat_op = cqi.Operator(orig_op.data)
target = orig.evolve(orig_op)
self.assertEqual(orig.evolve(compat_op), target)
self.assertEqual(compat.evolve(orig_op), target)
self.assertEqual(compat.evolve(compat_op), target)
def test_statevector_iterable_methods(self):
"""Test that the iterable magic methods and related Numpy properties
work on the compatibility classes."""
compat = cqi.Statevector([0.5, 0.5j, -0.5, 0.5j])
compat_data = compat.data
with self.assertWarns(DeprecationWarning):
compat_len = len(compat)
self.assertEqual(compat_len, len(compat_data))
with self.assertWarns(DeprecationWarning):
compat_shape = compat.shape
self.assertEqual(compat_shape, compat_data.shape)
with self.assertWarns(DeprecationWarning):
compat_iter = tuple(compat)
self.assertEqual(compat_iter, tuple(compat.data))
def test_density_matrix_eq(self):
orig = qi.random_density_matrix(4, seed=10)
compat = cqi.DensityMatrix(orig.data)
self.assertEqual(compat, orig)
self.assertEqual(orig, compat)
def test_density_matrix_getattr(self):
compat = cqi.DensityMatrix([[1, 0], [0, 1e-10]])
with self.assertWarns(DeprecationWarning):
value = compat.round(5)
self.assertEqual(type(value), np.ndarray)
self.assertTrue(np.all(value == np.array([[1, 0], [0, 0]])))
def test_density_matrix_copy(self):
compat = cqi.DensityMatrix([[1, 0], [0, 1e-10]])
cpy = copy.copy(compat)
self.assertEqual(cpy, compat)
def test_density_matrix_linop(self):
orig = qi.random_density_matrix(4, seed=10)
compat = cqi.DensityMatrix(orig.data)
self.assertEqual(2 * compat - orig, orig)
self.assertEqual(2 * orig - compat, orig)
def test_density_matrix_tensor(self):
orig = qi.random_density_matrix(2, seed=10)
compat = cqi.DensityMatrix(orig.data)
target = orig.tensor(orig)
self.assertEqual(compat.tensor(orig), target)
self.assertEqual(orig.tensor(compat), target)
def test_density_matrix_evolve(self):
orig = qi.random_density_matrix(2, seed=10)
compat = cqi.DensityMatrix(orig.data)
orig_op = qi.random_unitary(2, seed=10)
compat_op = cqi.Operator(orig_op.data)
target = orig.evolve(orig_op)
self.assertEqual(orig.evolve(compat_op), target)
self.assertEqual(compat.evolve(orig_op), target)
self.assertEqual(compat.evolve(compat_op), target)
def test_density_matrix_iterable_methods(self):
"""Test that the iterable magic methods and related Numpy properties
work on the compatibility classes."""
compat = cqi.DensityMatrix([[0.5, 0.5j], [-0.5j, 0.5]])
compat_data = compat.data
with self.assertWarns(DeprecationWarning):
compat_len = len(compat)
self.assertEqual(compat_len, len(compat_data))
with self.assertWarns(DeprecationWarning):
compat_shape = compat.shape
self.assertEqual(compat_shape, compat_data.shape)
with self.assertWarns(DeprecationWarning):
compat_iter = tuple(compat)
np.testing.assert_array_equal(compat_iter, compat.data)
def test_unitary_eq(self):
orig = qi.random_unitary(4, seed=10)
compat = cqi.Operator(orig.data)
self.assertEqual(compat, orig)
self.assertEqual(orig, compat)
def test_unitary_getattr(self):
compat = cqi.Operator([[1, 0], [1e-10, 1]])
with self.assertWarns(DeprecationWarning):
value = compat.round(5)
self.assertEqual(type(value), np.ndarray)
self.assertTrue(np.all(value == np.eye(2)))
def test_unitary_copy(self):
compat = cqi.Operator([[1, 0], [1e-10, 1]])
cpy = copy.copy(compat)
self.assertEqual(cpy, compat)
def test_unitary_linop(self):
orig = qi.random_unitary(4, seed=10)
compat = cqi.Operator(orig.data)
self.assertEqual(2 * compat - orig, orig)
self.assertEqual(2 * orig - compat, orig)
def test_unitary_tensor(self):
orig = qi.random_unitary(2, seed=10)
compat = cqi.Operator(orig.data)
target = orig.tensor(orig)
self.assertEqual(compat.tensor(orig), target)
self.assertEqual(orig.tensor(compat), target)
def test_unitary_compose(self):
orig = qi.random_unitary(2, seed=10)
compat = cqi.Operator(orig.data)
target = orig.compose(orig)
self.assertEqual(compat.compose(orig), target)
self.assertEqual(orig.compose(compat), target)
def test_unitary_evolve(self):
orig = qi.random_unitary(2, seed=10)
compat = cqi.Operator(orig.data)
state = qi.random_statevector(2, seed=10)
target = state.evolve(orig)
self.assertEqual(state.evolve(compat), target)
def test_unitary_iterable_methods(self):
"""Test that the iterable magic methods and related Numpy properties
work on the compatibility classes."""
compat = cqi.Operator(qi.random_unitary(2, seed=10))
compat_data = compat.data
with self.assertWarns(DeprecationWarning):
compat_len = len(compat)
self.assertEqual(compat_len, len(compat_data))
with self.assertWarns(DeprecationWarning):
compat_shape = compat.shape
self.assertEqual(compat_shape, compat_data.shape)
with self.assertWarns(DeprecationWarning):
compat_iter = tuple(compat)
np.testing.assert_array_equal(compat_iter, compat.data)
def test_superop_eq(self):
orig = qi.SuperOp(qi.random_quantum_channel(4, seed=10))
compat = cqi.SuperOp(orig.data)
self.assertEqual(compat, orig)
self.assertEqual(orig, compat)
def test_superop_getattr(self):
compat = cqi.SuperOp(np.eye(4))
with self.assertWarns(DeprecationWarning):
value = compat.round(5)
self.assertEqual(type(value), np.ndarray)
self.assertTrue(np.all(value == np.eye(4)))
def test_superop_copy(self):
compat = cqi.SuperOp(np.eye(4))
cpy = copy.copy(compat)
self.assertEqual(cpy, compat)
def test_superop_linop(self):
orig = qi.SuperOp(qi.random_quantum_channel(4, seed=10))
compat = cqi.SuperOp(orig.data)
self.assertEqual(2 * compat - orig, orig)
self.assertEqual(2 * orig - compat, orig)
def test_superop_iterable_methods(self):
"""Test that the iterable magic methods and related Numpy properties
work on the compatibility classes."""
compat = cqi.SuperOp(np.eye(4))
compat_data = compat.data
with self.assertWarns(DeprecationWarning):
compat_len = len(compat)
self.assertEqual(compat_len, len(compat_data))
with self.assertWarns(DeprecationWarning):
compat_shape = compat.shape
self.assertEqual(compat_shape, compat_data.shape)
with self.assertWarns(DeprecationWarning):
compat_iter = tuple(compat)
np.testing.assert_array_equal(compat_iter, compat.data)
def test_stabilizer_eq(self):
orig = qi.StabilizerState(qi.random_clifford(4, seed=10))
compat = cqi.StabilizerState(orig.clifford)
self.assertEqual(compat, orig)
self.assertEqual(orig, compat)
def test_stabilizer_getattr(self):
clifford = qi.random_clifford(4, seed=10)
compat = cqi.StabilizerState(clifford)
with self.assertWarns(DeprecationWarning):
value = compat.keys()
self.assertEqual(value, clifford.to_dict().keys())
def test_stabilizer_getitem(self):
clifford = qi.random_clifford(4, seed=10)
cliff_dict = clifford.to_dict()
compat = cqi.StabilizerState(clifford)
with self.assertWarns(DeprecationWarning):
stabs = compat['stabilizer']
self.assertEqual(stabs, cliff_dict['stabilizer'])
with self.assertWarns(DeprecationWarning):
destabs = compat['destabilizer']
self.assertEqual(destabs, cliff_dict['destabilizer'])
def test_stabilizer_copy(self):
clifford = qi.random_clifford(4, seed=10)
compat = cqi.StabilizerState(clifford)
cpy = copy.copy(compat)
self.assertEqual(cpy, compat)
def test_stabilizer_iterable_methods(self):
"""Test that the iterable magic methods and related dict properties
work on the compatibility classes."""
clifford = qi.random_clifford(4, seed=10)
cliff_dict = clifford.to_dict()
compat = cqi.StabilizerState(clifford)
with self.assertWarns(DeprecationWarning):
compat_keys = compat.keys()
self.assertEqual(compat_keys, cliff_dict.keys())
with self.assertWarns(DeprecationWarning):
compat_iter = set(compat)
self.assertEqual(compat_iter, set(cliff_dict))
with self.assertWarns(DeprecationWarning):
compat_items = compat.items()
self.assertEqual(sorted(compat_items), sorted(cliff_dict.items()))
with self.assertWarns(DeprecationWarning):
compat_len = len(compat)
self.assertEqual(compat_len, len(cliff_dict))
|
core/notification.py | 0x20Man/Watcher3 | 320 | 11176873 | <filename>core/notification.py
import core
import logging
logging = logging.getLogger(__name__)
def add(data, type_='success'):
''' Adds notification to core.NOTIFICATIONS
data (dict): notification information
type_ (str): style of notification, see javascript docs for available styles <optional - default 'success'>
Merges supplied 'data' with 'options' dict to ensure no fields are missing
Appends notif to core.NOTIFICATIONS
Notif structure is tuple of two dicts. [0] containing 'options' dict and [1] with 'settings' dict
Does not return
'''
logging.info('Adding notification to queue.')
options = {'title': '',
'message': '',
'type': None
}
settings = {'type': type_,
'delay': 0
}
options.update(data)
logging.debug(options)
# if it already exists, ignore it
if options in core.NOTIFICATIONS:
return
# if this is an update notif, remove other update notifs first
if options['type'] == 'update':
for i, v in enumerate(core.NOTIFICATIONS):
if v[0]['type'] == 'update':
core.NOTIFICATIONS[i] = None
new_notif = [options, settings]
# if there is a None in the list, overwrite it. If not, just append
for i, v in enumerate(core.NOTIFICATIONS):
if v is None:
new_notif[1]['index'] = i
core.NOTIFICATIONS[i] = new_notif
return
new_notif[1]['index'] = len(core.NOTIFICATIONS)
core.NOTIFICATIONS.append(new_notif)
return
def remove(index):
''' Removes notification from core.NOTIFICATIONS
index (int): index of notification to remove
Replaces list item with None as to not affect other indexes.
When adding new notifs through core.notification, any None values
will be overwritten before appending to the end of the list.
Removes all trailing 'None' entries in list.
This ensures the list will always be as small as possible without
changing existing indexes.
Does not return
'''
logging.info('Remove notification #{}.'.format(index))
try:
core.NOTIFICATIONS[int(index)] = None
except Exception as e:
pass
logging.debug('Cleaning notification queue.')
while len(core.NOTIFICATIONS) > 0 and core.NOTIFICATIONS[-1] is None:
core.NOTIFICATIONS.pop()
return
|
benchexec/tools/cmaesfuzz.py | MartinSpiessl/benchexec | 137 | 11176880 | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 <NAME> <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Fuzzing with stochastic optimization guided by CMA-ES
<NAME>, <NAME>
https://github.com/lazygrey/fuzzing_with_cmaes
"""
REQUIRED_PATHS = [
"fuzzer",
"fuzzer.py",
"cma",
"verifiers_bytes",
"verifiers_real",
]
def executable(self, tool_locator):
return tool_locator.find_executable("fuzzer")
def cmdline(self, executable, options, task, rlimits):
# add a time limit if not given
# that is hopefully sufficient to write all tests
if "-t" not in options and rlimits.cputime:
# at least 10 seconds + 1% of overall time
timeout = int(rlimits.cputime * 0.99 - 10)
# but don't add negative timeout
if timeout > 0:
options = options + ["-t", str(timeout)]
else:
options = options + ["-t", str(rlimits.cputime)]
return [executable] + options + [task.single_input_file]
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "CMA-ES Fuzz"
def get_value_from_output(self, output, identifier):
for line in reversed(output):
if line.startswith(identifier):
return line[len(identifier) :]
return None
|
src/ralph/lib/transitions/__init__.py | DoNnMyTh/ralph | 1,668 | 11176917 | <gh_stars>1000+
# -*- coding: utf-8 -*-
default_app_config = 'ralph.lib.transitions.apps.TransitionAppConfig'
|
CircleciScripts/prepare_xcframework_archives.py | hardikdevios/aws-sdk-ios | 1,026 | 11176935 | <gh_stars>1000+
import os
import sys
import shutil
import re
import logging
from semver_util import validate_version
from framework_list import xcframeworks
from functions import run_command, setup_logging
def create_archives(xcframework_path, archive_path, version):
os.makedirs(archive_path, exist_ok=True)
for framework in xcframeworks:
xcframework_sdk = f"{framework}.xcframework"
xcframework_sdk_path = os.path.join(xcframework_path, xcframework_sdk)
archive_name = f"{framework}-{version}"
final_archive_name_with_ext = f"{archive_name}.zip"
logging.info(f"Creating zip file for {archive_name}")
temp_folder = os.path.join(xcframework_path, framework)
logging.info(f"Copying the zip to a temp location {temp_folder}")
shutil.copytree(xcframework_sdk_path, os.path.join(temp_folder, xcframework_sdk))
logging.info(f"Generate the archive and move it to the archive directory")
shutil.make_archive(archive_name, "zip", root_dir=temp_folder, base_dir=xcframework_sdk)
final_archived_path = os.path.join(archive_path, final_archive_name_with_ext)
shutil.move(final_archive_name_with_ext, final_archived_path)
logging.info(f"Remove the temp folder")
shutil.rmtree(temp_folder)
def create_checksum(archive_path, spm_manifest_repo, version):
framework_to_checksum = {}
for framework in xcframeworks:
final_archive_name_with_ext = f"{framework}-{version}.zip"
zipfile_path = os.path.join(archive_path, final_archive_name_with_ext)
cmd = [
"swift",
"package",
"--package-path",
spm_manifest_repo,
"compute-checksum",
zipfile_path
]
(exit_code, out, err) = run_command(cmd, keepalive_interval=300, timeout=7200)
if exit_code == 0:
logging.info(f"Created check sum for archive {framework} {out}")
else:
logging.error(f"Could not create checksum for archive: {framework} output: {out}; error: {err}")
sys.exit(exit_code)
framework_to_checksum[framework] = out.decode("utf-8").rstrip()
return framework_to_checksum
def update_spm_manifest(framework_to_checksum, spm_manifest_repo, version):
with open (f"{spm_manifest_repo}/Package.swift", 'r+') as package_manifest_file:
content = package_manifest_file.read()
# Update the checksum
for framework in xcframeworks:
checksum = framework_to_checksum[framework]
content = re.sub('(^ +"'+framework+'"\: ")([\w.]+)', r'\g<1>' + checksum, content, flags=re.M)
# Update the version
content = re.sub('(^let latestVersion = \")([\w.]+)', r'\g<1>' + version, content, flags=re.M)
package_manifest_file.seek(0)
package_manifest_file.write(content)
package_manifest_file.truncate()
setup_logging()
version = str(sys.argv[1])
if not validate_version(version):
logging.error("Version is invalid, exiting")
sys.exit(1)
project_dir = os.getcwd()
xcframework_path = os.path.join(project_dir, "xcframeworks", "output", "XCF")
archive_path = os.path.join(project_dir, "xcframeworks", "output", "archives")
spm_manifest_repo = './aws-sdk-ios-spm'
logging.info(f"Creating archives from {xcframework_path}")
create_archives(xcframework_path, archive_path, version)
logging.info(f"Calculating checksum from {archive_path}")
framework_to_checksum = create_checksum(archive_path, spm_manifest_repo, version)
logging.info(f"Updating checksum to {spm_manifest_repo}")
update_spm_manifest(framework_to_checksum, spm_manifest_repo, version)
|
tests/test_part_units.py | vkleen/skidl | 700 | 11176961 | # -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
import pytest
from skidl import Part
from .setup_teardown import setup_function, teardown_function
def test_part_unit_1():
vreg = Part("xess.lib", "1117")
vreg.match_pin_regex = True
vreg.make_unit("A", 1, 2)
vreg.make_unit("B", 3)
assert len(vreg.unit["A"][".*"]) == 2
assert len((vreg.unit["B"][".*"],)) == 1
def test_part_unit_2():
vreg = Part("xess.lib", "1117")
vreg.match_pin_regex = True
vreg.make_unit("A", 1, 2)
vreg.make_unit("A", 3)
assert len((vreg.unit["A"][".*"],)) == 1
def test_part_unit_3():
vreg = Part("xess.lib", "1117")
vreg.make_unit("1", 1, 2)
def test_part_unit_4():
mem = Part("xess", "SDRAM_16Mx16_TSOPII-54")
mem.match_pin_regex = True
data_pin_names = [p.name for p in mem[".*DQ[0:15].*"]]
mem.make_unit("A", data_pin_names)
# Wildcard pin matching OFF globally.
mem.match_pin_regex = False
assert mem[".*"] == None
assert mem.A[".*"] == None
# Wildcard pin matching ON globally.
mem.match_pin_regex = True
assert len(mem[".*"]) != 0
assert len(mem.A[".*"]) == 16
# Wildcard matching OFF for part unit, but ON globally.
mem.A.match_pin_regex = False
assert len(mem[".*"]) != 0
assert mem.A[".*"] == None
|
Vecihi/Backend/vecihi/ask/serializers.py | developertqw2017/migrationDjango | 220 | 11176983 | <gh_stars>100-1000
from rest_framework import serializers
from rest_framework.serializers import HyperlinkedRelatedField
from vecihi.users.serializers import UserSerializer
from vecihi.users.models import User
from .models import Question, Answer
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('id', 'whom', 'question', 'created_at')
def to_representation(self, obj):
return_obj = super(QuestionSerializer, self).to_representation(obj)
new_obj = {}
try:
new_obj["answer"] = {
"content": obj.answer.answer,
"created_at": obj.answer.created_at
}
except: # RelatedObjectDoesNotExist
new_obj["answer"] = None
return_obj.update(new_obj)
return return_obj
class AnswerSerializer(serializers.ModelSerializer):
question = QuestionSerializer(read_only=True)
class Meta:
model = Answer
fields = ('id', 'question', 'answer', 'created_at') |
desktop/core/ext-py/nose-1.3.7/functional_tests/test_issue_649.py | kokosing/hue | 5,079 | 11177001 | # -*- coding: utf-8 -*-
import os
import sys
import unittest
from nose.plugins.capture import Capture
from nose.plugins import PluginTester
support = os.path.join(os.path.dirname(__file__), 'support')
class TestIssue649(PluginTester, unittest.TestCase):
activate = ''
args = ['-v']
plugins = [Capture()]
suitepath = os.path.join(support, 'issue649')
def runTest(self):
print str(self.output)
assert 'UnicodeDecodeError' not in self.output
|
qf_lib/backtesting/events/time_flow_controller.py | webclinic017/qf-lib | 198 | 11177007 | <gh_stars>100-1000
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from qf_lib.backtesting.events.empty_queue_event.empty_queue_event import EmptyQueueEvent
from qf_lib.backtesting.events.empty_queue_event.empty_queue_event_listener import EmptyQueueEventListener
from qf_lib.backtesting.events.empty_queue_event.empty_queue_event_notifier import EmptyQueueEventNotifier
from qf_lib.backtesting.events.end_trading_event.end_trading_event import EndTradingEvent
from qf_lib.backtesting.events.event_manager import EventManager
from qf_lib.backtesting.events.time_event.scheduler import Scheduler
from qf_lib.common.utils.dateutils.timer import SettableTimer, RealTimer
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
class TimeFlowController(EmptyQueueEventListener, metaclass=ABCMeta):
"""
Abstract class for all TimeFlowControllers. TimeFlowController is an object which is responsible for controlling
time flow of a backtest or live trading session, i.e. it's responsible for producing TimeEvents and putting
them in the EventManager's event queue.
"""
def __init__(self, event_manager: EventManager, empty_queue_event_notifier: EmptyQueueEventNotifier):
self.event_manager = event_manager
empty_queue_event_notifier.subscribe(self)
def on_empty_queue_event(self, event: EmptyQueueEvent):
self.generate_time_event()
@abstractmethod
def generate_time_event(self):
"""
Checks when the next planned TimeEvent should occur, lets the time pass until that moment (or fast-forwards
the time if it's a backtest) and then publishes the TimeEvent.
"""
pass
class BacktestTimeFlowController(TimeFlowController):
def __init__(self, scheduler: Scheduler, event_manager: EventManager, settable_timer: SettableTimer,
empty_queue_event_notifier: EmptyQueueEventNotifier, backtest_end_date: datetime):
super().__init__(event_manager, empty_queue_event_notifier)
self.scheduler = scheduler
self.settable_timer = settable_timer
self.backtest_end_datetime = self._end_of_the_day(backtest_end_date)
def generate_time_event(self):
time_events_list, next_time_of_event = self.scheduler.get_next_time_events()
if next_time_of_event > self.backtest_end_datetime:
self.event_manager.publish(EndTradingEvent())
else:
# because it's a backtest we don't really need to wait until the time of next TimeEvent; we can simply
# fast-forward the timer to that time
self.settable_timer.set_current_time(next_time_of_event)
for time_event in time_events_list:
self.event_manager.publish(time_event)
def _end_of_the_day(self, end_date: datetime):
return datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59, 999999)
class LiveSessionTimeFlowController(TimeFlowController):
def __init__(self, scheduler: Scheduler, event_manager: EventManager, real_timer: RealTimer,
empty_queue_event_notifier: EmptyQueueEventNotifier):
super().__init__(event_manager, empty_queue_event_notifier)
self.scheduler = scheduler
self.real_timer = real_timer
self.logger = qf_logger.getChild(self.__class__.__name__)
def generate_time_event(self):
time_events_list, next_time_of_event = self.scheduler.get_next_time_events()
for time_event in time_events_list:
self.logger.info("Next time event: {}".format(time_event))
self.sleep_until(next_time_of_event)
for time_event in time_events_list:
self.logger.info("Wake up! Current event: {}, Next event: {}".format(
time_event.__class__.__name__, next_time_of_event))
self.event_manager.publish(time_event)
def sleep_until(self, time_of_next_time_event: datetime):
# if we're in the live session we need to put the program to sleep until the next TimeEvent
now = self.real_timer.now()
waiting_time = time_of_next_time_event - now
self.logger.info("Going to sleep for {} ".format(waiting_time))
time.sleep(waiting_time.total_seconds())
|
data_management/databases/classification/convert_json_detections_to_pickle.py | dnarqq/WildHack | 402 | 11177009 | <reponame>dnarqq/WildHack<gh_stars>100-1000
# This file converts the JSON output of the batch processing API to a pickle file, which can be used by the
# script ./make_classification_dataset.py
import argparse
import os
import json
import pickle
import pandas
import numpy as np
import tqdm
parser = argparse.ArgumentParser('This file converts the JSON output of the batch processing API to a pickle file, ' + \
'which can be used by the script ./make_classification_dataset.py')
parser.add_argument("input_json", type=str, help='Path to the JSON file that contains the API output')
parser.add_argument("output_pkl", type=str, help='Path to the desired output pickle file')
parser.add_argument("--detection_category_whitelist", nargs='+', default=['1'], metavar='CAT_ID',
help='List of detection categories to use. Default: ["1"]')
args = parser.parse_args()
assert os.path.isfile(args.input_json), 'ERROR: The input CSV file could not be found!'
assert not os.path.isfile(args.output_pkl), 'ERROR: The output file exists already!'
assert isinstance(args.detection_category_whitelist, list)
assert len(args.detection_category_whitelist) > 0
with open(args.input_json, 'rt') as fi:
j = json.load(fi)
detection_dict = dict()
for row in tqdm.tqdm(list(range(len(j['images'])))):
cur_image = j['images'][row]
key = cur_image['file']
max_conf = 0
conf = []
boxes = []
for det in cur_image['detections']:
if det['category'] in args.detection_category_whitelist:
max_conf = max(max_conf, float(det['conf']))
conf.append(float(det['conf']))
# Convert boxes from JSON [x_min, y_min, width_of_box, height_of_box]
# to PICKLE [ymin, xmin, ymax, xmax]
box = det['bbox']
boxes.append([box[1], box[0], box[1] + box[3], box[0]+ box[2]])
detection_dict[key] = dict(detection_scores=conf, detection_boxes=boxes)
# Write detections to file with pickle
with open(args.output_pkl, 'wb') as f:
pickle.dump(detection_dict, f, pickle.HIGHEST_PROTOCOL)
|
staintools/preprocessing/read_image.py | BostonMeditechGroup/StainTools | 197 | 11177020 | import cv2 as cv
import os
def read_image(path):
"""
Read an image to RGB uint8.
Read with opencv (cv) and covert from BGR colorspace to RGB.
:param path: The path to the image.
:return: RGB uint8 image.
"""
assert os.path.isfile(path), "File not found"
im = cv.imread(path)
# Convert from cv2 standard of BGR to our convention of RGB.
im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
return im |
Lib/test/bugs/pr171.py | jeff5/jython-whinchat | 577 | 11177056 | # PR#171. pdb's "c" command fails. The underlying problem is that frame
# objects didn't have a writeable f_trace attribute.
import sys
try: 1/0
except: frame = sys.exc_info()[2].tb_frame
del frame.f_trace
|
Tests/test_strptime_stdlib.py | aisk/ironpython3 | 1,872 | 11177063 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Run selected tests from test_strptime from StdLib
##
import unittest
import sys
from iptest import run_test
import test.test_strptime
def load_tests(loader, standard_tests, pattern):
if sys.implementation.name == 'ironpython':
suite = unittest.TestSuite()
suite.addTest(test.test_strptime.CacheTests('test_TimeRE_recreation_locale'))
suite.addTest(test.test_strptime.CacheTests('test_TimeRE_recreation_timezone'))
suite.addTest(test.test_strptime.CacheTests('test_new_localetime'))
suite.addTest(test.test_strptime.CacheTests('test_regex_cleanup'))
suite.addTest(test.test_strptime.CacheTests('test_time_re_recreation'))
suite.addTest(unittest.expectedFailure(test.test_strptime.CalculationTests('test_day_of_week_calculation'))) # https://github.com/IronLanguages/ironpython3/issues/1121
suite.addTest(unittest.expectedFailure(test.test_strptime.CalculationTests('test_gregorian_calculation'))) # https://github.com/IronLanguages/ironpython3/issues/1121
suite.addTest(unittest.expectedFailure(test.test_strptime.CalculationTests('test_julian_calculation'))) # https://github.com/IronLanguages/ironpython3/issues/1121
suite.addTest(test.test_strptime.CalculationTests('test_week_0'))
suite.addTest(test.test_strptime.CalculationTests('test_week_of_year_and_day_of_week_calculation'))
suite.addTest(test.test_strptime.JulianTests('test_all_julian_days'))
suite.addTest(test.test_strptime.LocaleTime_Tests('test_am_pm'))
suite.addTest(test.test_strptime.LocaleTime_Tests('test_date_time'))
suite.addTest(test.test_strptime.LocaleTime_Tests('test_lang'))
suite.addTest(test.test_strptime.LocaleTime_Tests('test_month'))
suite.addTest(test.test_strptime.LocaleTime_Tests('test_timezone'))
suite.addTest(test.test_strptime.LocaleTime_Tests('test_weekday'))
suite.addTest(test.test_strptime.Strptime12AMPMTests('test_twelve_noon_midnight'))
suite.addTest(test.test_strptime.StrptimeTests('test_ValueError'))
suite.addTest(test.test_strptime.StrptimeTests('test_bad_timezone'))
suite.addTest(test.test_strptime.StrptimeTests('test_caseinsensitive'))
suite.addTest(test.test_strptime.StrptimeTests('test_date'))
suite.addTest(test.test_strptime.StrptimeTests('test_date_time'))
suite.addTest(test.test_strptime.StrptimeTests('test_day'))
suite.addTest(test.test_strptime.StrptimeTests('test_defaults'))
suite.addTest(test.test_strptime.StrptimeTests('test_escaping'))
suite.addTest(unittest.expectedFailure(test.test_strptime.StrptimeTests('test_feb29_on_leap_year_without_year')))
suite.addTest(test.test_strptime.StrptimeTests('test_fraction'))
suite.addTest(test.test_strptime.StrptimeTests('test_hour'))
suite.addTest(test.test_strptime.StrptimeTests('test_julian'))
suite.addTest(unittest.expectedFailure(test.test_strptime.StrptimeTests('test_mar1_comes_after_feb29_even_when_omitting_the_year')))
suite.addTest(test.test_strptime.StrptimeTests('test_minute'))
suite.addTest(test.test_strptime.StrptimeTests('test_month'))
suite.addTest(test.test_strptime.StrptimeTests('test_percent'))
suite.addTest(test.test_strptime.StrptimeTests('test_second'))
suite.addTest(test.test_strptime.StrptimeTests('test_strptime_exception_context'))
suite.addTest(test.test_strptime.StrptimeTests('test_time'))
suite.addTest(unittest.expectedFailure(test.test_strptime.StrptimeTests('test_timezone'))) # https://github.com/IronLanguages/ironpython3/issues/1121
suite.addTest(test.test_strptime.StrptimeTests('test_unconverteddata'))
suite.addTest(test.test_strptime.StrptimeTests('test_weekday'))
suite.addTest(test.test_strptime.StrptimeTests('test_year'))
suite.addTest(test.test_strptime.TimeRETests('test_blankpattern'))
suite.addTest(unittest.expectedFailure(test.test_strptime.TimeRETests('test_compile')))
suite.addTest(test.test_strptime.TimeRETests('test_locale_data_w_regex_metacharacters'))
suite.addTest(test.test_strptime.TimeRETests('test_matching_with_escapes'))
suite.addTest(test.test_strptime.TimeRETests('test_pattern'))
suite.addTest(test.test_strptime.TimeRETests('test_pattern_escaping'))
suite.addTest(test.test_strptime.TimeRETests('test_whitespace_substitution'))
suite.addTest(test.test_strptime.getlang_Tests('test_basic'))
return suite
else:
return loader.loadTestsFromModule(test.test_strptime, pattern)
run_test(__name__)
|
discord/auth/helpers/__init__.py | telugu-boy/discord.py-self | 175 | 11177070 | from .captcha import *
from .email import *
|
tests/warp_drive/test_env_reset.py | salesforce/warp-drive | 255 | 11177099 | <reponame>salesforce/warp-drive
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import unittest
import numpy as np
import torch
from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAEnvironmentReset,
CUDAFunctionManager,
)
from warp_drive.utils.common import get_project_root
from warp_drive.utils.data_feed import DataFeed
pytorch_cuda_init_success = torch.cuda.FloatTensor(8)
_CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin"
class TestEnvironmentReset(unittest.TestCase):
"""
Unit tests for the CUDA environment resetter
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dm = CUDADataManager(num_agents=5, num_envs=2, episode_length=2)
self.fm = CUDAFunctionManager(
num_agents=int(self.dm.meta_info("n_agents")),
num_envs=int(self.dm.meta_info("n_envs")),
)
self.fm.load_cuda_from_binary_file(f"{_CUBIN_FILEPATH}/test_build.fatbin")
self.resetter = CUDAEnvironmentReset(function_manager=self.fm)
def test_reset_for_different_dim(self):
self.dm.data_on_device_via_torch("_done_")[:] = torch.from_numpy(
np.array([1, 0])
).cuda()
done = self.dm.pull_data_from_device("_done_")
self.assertSequenceEqual(list(done), [1, 0])
data_feed = DataFeed()
data_feed.add_data(
name="a", data=np.random.randn(2, 10, 3), save_copy_and_apply_at_reset=True
)
data_feed.add_data(
name="b", data=np.random.randn(2, 10), save_copy_and_apply_at_reset=True
)
data_feed.add_data(
name="c", data=np.random.randn(2), save_copy_and_apply_at_reset=True
)
data_feed.add_data(
name="d",
data=np.random.randint(10, size=(2, 10, 3), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
data_feed.add_data(
name="e",
data=np.random.randint(10, size=(2, 10), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
data_feed.add_data(
name="f",
data=np.random.randint(10, size=2, dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
self.dm.push_data_to_device(data_feed)
torch_data_feed = DataFeed()
torch_data_feed.add_data(
name="at", data=np.random.randn(2, 10, 3), save_copy_and_apply_at_reset=True
)
torch_data_feed.add_data(
name="bt", data=np.random.randn(2, 10), save_copy_and_apply_at_reset=True
)
torch_data_feed.add_data(
name="ct", data=np.random.randn(2), save_copy_and_apply_at_reset=True
)
torch_data_feed.add_data(
name="dt",
data=np.random.randint(10, size=(2, 10, 3), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
torch_data_feed.add_data(
name="et",
data=np.random.randint(10, size=(2, 10), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
torch_data_feed.add_data(
name="ft",
data=np.random.randint(10, size=2, dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
self.dm.push_data_to_device(torch_data_feed, torch_accessible=True)
a = self.dm.pull_data_from_device("a")
b = self.dm.pull_data_from_device("b")
c = self.dm.pull_data_from_device("c")
d = self.dm.pull_data_from_device("d")
e = self.dm.pull_data_from_device("e")
f = self.dm.pull_data_from_device("f")
at = self.dm.pull_data_from_device("at")
bt = self.dm.pull_data_from_device("bt")
ct = self.dm.pull_data_from_device("ct")
dt = self.dm.pull_data_from_device("dt")
et = self.dm.pull_data_from_device("et")
ft = self.dm.pull_data_from_device("ft")
# change the value in place
self.dm.data_on_device_via_torch("at")[:] = torch.rand(2, 10, 3).cuda()
self.dm.data_on_device_via_torch("bt")[:] = torch.rand(2, 10).cuda()
self.dm.data_on_device_via_torch("ct")[:] = torch.rand(2).cuda()
self.dm.data_on_device_via_torch("dt")[:] = torch.randint(
10, size=(2, 10, 3)
).cuda()
self.dm.data_on_device_via_torch("et")[:] = torch.randint(
10, size=(2, 10)
).cuda()
self.dm.data_on_device_via_torch("ft")[:] = torch.randint(10, size=(2,)).cuda()
self.resetter.reset_when_done(self.dm)
a_after_reset = self.dm.pull_data_from_device("a")
b_after_reset = self.dm.pull_data_from_device("b")
c_after_reset = self.dm.pull_data_from_device("c")
d_after_reset = self.dm.pull_data_from_device("d")
e_after_reset = self.dm.pull_data_from_device("e")
f_after_reset = self.dm.pull_data_from_device("f")
at_after_reset = self.dm.pull_data_from_device("at")
bt_after_reset = self.dm.pull_data_from_device("bt")
ct_after_reset = self.dm.pull_data_from_device("ct")
dt_after_reset = self.dm.pull_data_from_device("dt")
et_after_reset = self.dm.pull_data_from_device("et")
ft_after_reset = self.dm.pull_data_from_device("ft")
self.assertTrue(np.absolute((a - a_after_reset).mean()) < 1e-5)
self.assertTrue(np.absolute((b - b_after_reset).mean()) < 1e-5)
self.assertTrue(np.absolute((c - c_after_reset).mean()) < 1e-5)
self.assertTrue(np.count_nonzero(d - d_after_reset) == 0)
self.assertTrue(np.count_nonzero(e - e_after_reset) == 0)
self.assertTrue(np.count_nonzero(f - f_after_reset) == 0)
# so after the soft reset, only env_0 got reset because it has done flag on
self.assertTrue(np.absolute((at - at_after_reset)[0].mean()) < 1e-5)
self.assertTrue(np.absolute((bt - bt_after_reset)[0].mean()) < 1e-5)
self.assertTrue(np.absolute((ct - ct_after_reset)[0].mean()) < 1e-5)
self.assertTrue(np.absolute((at - at_after_reset)[1].mean()) > 1e-5)
self.assertTrue(np.absolute((bt - bt_after_reset)[1].mean()) > 1e-5)
self.assertTrue(np.absolute((ct - ct_after_reset)[1].mean()) > 1e-5)
self.assertTrue(np.count_nonzero((dt - dt_after_reset)[0]) == 0)
self.assertTrue(np.count_nonzero((et - et_after_reset)[0]) == 0)
self.assertTrue(np.count_nonzero((ft - ft_after_reset)[0]) == 0)
self.assertTrue(np.count_nonzero((dt - dt_after_reset)[1]) > 0)
self.assertTrue(np.count_nonzero((et - et_after_reset)[1]) > 0)
self.assertTrue(np.count_nonzero((ft - ft_after_reset)[1]) >= 0)
done = self.dm.pull_data_from_device("_done_")
self.assertSequenceEqual(list(done), [0, 0])
# Now test if mode="force_reset" works
torch_data_feed2 = DataFeed()
torch_data_feed2.add_data(
name="af", data=np.random.randn(2, 10, 3), save_copy_and_apply_at_reset=True
)
torch_data_feed2.add_data(
name="bf", data=np.random.randn(2, 10), save_copy_and_apply_at_reset=True
)
torch_data_feed2.add_data(
name="cf", data=np.random.randn(2), save_copy_and_apply_at_reset=True
)
torch_data_feed2.add_data(
name="df",
data=np.random.randint(10, size=(2, 10, 3), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
torch_data_feed2.add_data(
name="ef",
data=np.random.randint(10, size=(2, 10), dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
torch_data_feed2.add_data(
name="ff",
data=np.random.randint(10, size=2, dtype=np.int32),
save_copy_and_apply_at_reset=True,
)
self.dm.push_data_to_device(torch_data_feed2, torch_accessible=True)
af = self.dm.pull_data_from_device("af")
bf = self.dm.pull_data_from_device("bf")
cf = self.dm.pull_data_from_device("cf")
df = self.dm.pull_data_from_device("df")
ef = self.dm.pull_data_from_device("ef")
ff = self.dm.pull_data_from_device("ff")
# change the value in place
self.dm.data_on_device_via_torch("af")[:] = torch.rand(2, 10, 3).cuda()
self.dm.data_on_device_via_torch("bf")[:] = torch.rand(2, 10).cuda()
self.dm.data_on_device_via_torch("cf")[:] = torch.rand(2).cuda()
self.dm.data_on_device_via_torch("df")[:] = torch.randint(
10, size=(2, 10, 3)
).cuda()
self.dm.data_on_device_via_torch("ef")[:] = torch.randint(
10, size=(2, 10)
).cuda()
self.dm.data_on_device_via_torch("ff")[:] = torch.randint(10, size=(2,)).cuda()
self.resetter.reset_when_done(self.dm)
af_after_soft_reset = self.dm.pull_data_from_device("af")
bf_after_soft_reset = self.dm.pull_data_from_device("bf")
cf_after_soft_reset = self.dm.pull_data_from_device("cf")
df_after_soft_reset = self.dm.pull_data_from_device("df")
ef_after_soft_reset = self.dm.pull_data_from_device("ef")
ff_after_soft_reset = self.dm.pull_data_from_device("ff")
self.assertTrue(np.absolute((af - af_after_soft_reset).mean()) > 1e-5)
self.assertTrue(np.absolute((bf - bf_after_soft_reset).mean()) > 1e-5)
self.assertTrue(np.absolute((cf - cf_after_soft_reset).mean()) > 1e-5)
self.assertTrue(np.count_nonzero(df - df_after_soft_reset) > 0)
self.assertTrue(np.count_nonzero(ef - ef_after_soft_reset) > 0)
self.assertTrue(np.count_nonzero(ff - ff_after_soft_reset) > 0)
self.resetter.reset_when_done(self.dm, mode="force_reset")
af_after_hard_reset = self.dm.pull_data_from_device("af")
bf_after_hard_reset = self.dm.pull_data_from_device("bf")
cf_after_hard_reset = self.dm.pull_data_from_device("cf")
df_after_hard_reset = self.dm.pull_data_from_device("df")
ef_after_hard_reset = self.dm.pull_data_from_device("ef")
ff_after_hard_reset = self.dm.pull_data_from_device("ff")
self.assertTrue(np.absolute((af - af_after_hard_reset).mean()) < 1e-5)
self.assertTrue(np.absolute((bf - bf_after_hard_reset).mean()) < 1e-5)
self.assertTrue(np.absolute((cf - cf_after_hard_reset).mean()) < 1e-5)
self.assertTrue(np.count_nonzero(df - df_after_hard_reset) == 0)
self.assertTrue(np.count_nonzero(ef - ef_after_hard_reset) == 0)
self.assertTrue(np.count_nonzero(ff - ff_after_hard_reset) == 0)
|
tests/nnapi/specs/Ex/batch_matmul_ex_dynamic_nnfw.mod.py | periannath/ONE | 255 | 11177109 | import dynamic_tensor
model = Model()
input1_shape = [2, 2, 3]
dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, input1_shape, "TENSOR_FLOAT32")
input1 = dynamic_layer.getTestNodeInput()
input2 = Input("op2", "TENSOR_FLOAT32", "{3, 4}")
adj_x = False
adj_y = False
model_output = Output("output", "TENSOR_FLOAT32", "{2, 2, 4}")
model = model.Operation("BATCH_MATMUL_EX", input1, input2, adj_x, adj_y).To(model_output)
input1_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
input2_data = [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
model_output_data = [74, 80, 86, 92, 173, 188, 203, 218, 272, 296,
320, 344, 371, 404, 437, 470]
input_list = {
dynamic_layer.getModelInput(): input1_data,
dynamic_layer.getShapeInput() : input1_shape,
input2 : input2_data,
}
output_list= {
model_output: model_output_data
}
Example((input_list, output_list))
|
src/lib/modulefinder.py | DTenore/skulpt | 2,671 | 11177123 | import _sk_fail; _sk_fail._("modulefinder")
|
tests/python/twitter/common/lang/test_lockable.py | zhouyijiaren/commons | 1,143 | 11177132 | <filename>tests/python/twitter/common/lang/test_lockable.py
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import threading
from twitter.common.lang import Lockable
def test_basic_mutual_exclusion():
class Foo(Lockable):
def __init__(self):
self.counter = 0
self.start_event = threading.Event()
self.finish_event = threading.Event()
Lockable.__init__(self)
@Lockable.sync
def pooping(self):
self.counter += 1
self.start_event.set()
self.finish_event.wait()
f = Foo()
class FooSetter(threading.Thread):
def run(self):
f.pooping()
fs1 = FooSetter()
fs2 = FooSetter()
fs1.start()
fs2.start()
# yield threads
f.start_event.wait(timeout=1.0)
assert f.start_event.is_set()
# assert mutual exclusion
assert f.counter == 1
# unblock ==> other wakes up
f.start_event.clear()
f.finish_event.set()
f.start_event.wait(timeout=1.0)
assert f.start_event.is_set()
assert f.counter == 2
|
pystock_crawler/exporters.py | breakhearts/pystock-crawler | 320 | 11177138 | <filename>pystock_crawler/exporters.py
from scrapy.conf import settings
from scrapy.contrib.exporter import BaseItemExporter, CsvItemExporter
class CsvItemExporter2(CsvItemExporter):
'''
The standard CsvItemExporter class does not pass the kwargs through to the
CSV writer, resulting in EXPORT_FIELDS and EXPORT_ENCODING being ignored
(EXPORT_EMPTY is not used by CSV).
http://stackoverflow.com/questions/6943778/python-scrapy-how-to-get-csvitemexporter-to-write-columns-in-a-specific-order
'''
def __init__(self, *args, **kwargs):
kwargs['fields_to_export'] = settings.getlist('EXPORT_FIELDS') or None
kwargs['encoding'] = settings.get('EXPORT_ENCODING', 'utf-8')
super(CsvItemExporter2, self).__init__(*args, **kwargs)
def _write_headers_and_set_fields_to_export(self, item):
# HACK: Override this private method to filter fields that are in
# fields_to_export but not in item
if self.include_headers_line:
item_fields = item.fields.keys()
if self.fields_to_export:
self.fields_to_export = filter(lambda a: a in item_fields, self.fields_to_export)
else:
self.fields_to_export = item_fields
self.csv_writer.writerow(self.fields_to_export)
class SymbolListExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs, dont_fail=True)
self.file = file
def export_item(self, item):
self.file.write('%s\t%s\n' % (item['symbol'], item['name']))
|
tests/test_who.py | listuser/jc | 3,215 | 11177140 | <filename>tests/test_who.py
import os
import sys
import time
import json
import unittest
import jc.parsers.who
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Set the timezone on POSIX systems. Need to manually set for Windows tests
if not sys.platform.startswith('win32'):
os.environ['TZ'] = 'America/Los_Angeles'
time.tzset()
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/who.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_who = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/who.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_who = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/who.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_who = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/who-a.out'), 'r', encoding='utf-8') as f:
self.centos_7_7_who_a = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/who-a.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_who_a = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/who-a.out'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_who_a = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/who.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_who_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/who.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_who_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/who.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_who_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/who-a.json'), 'r', encoding='utf-8') as f:
self.centos_7_7_who_a_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/who-a.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_who_a_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/who-a.json'), 'r', encoding='utf-8') as f:
self.osx_10_14_6_who_a_json = json.loads(f.read())
def test_who_nodata(self):
"""
Test 'who' with no data
"""
self.assertEqual(jc.parsers.who.parse('', quiet=True), [])
def test_who_centos_7_7(self):
"""
Test 'who' on Centos 7.7
"""
self.assertEqual(jc.parsers.who.parse(self.centos_7_7_who, quiet=True), self.centos_7_7_who_json)
def test_who_ubuntu_18_4(self):
"""
Test 'who' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.who.parse(self.ubuntu_18_4_who, quiet=True), self.ubuntu_18_4_who_json)
def test_who_osx_10_14_6(self):
"""
Test 'who' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.who.parse(self.osx_10_14_6_who, quiet=True), self.osx_10_14_6_who_json)
def test_who_a_centos_7_7(self):
"""
Test 'who -a' on Centos 7.7
"""
self.assertEqual(jc.parsers.who.parse(self.centos_7_7_who_a, quiet=True), self.centos_7_7_who_a_json)
def test_who_a_ubuntu_18_4(self):
"""
Test 'who -a' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.who.parse(self.ubuntu_18_4_who_a, quiet=True), self.ubuntu_18_4_who_a_json)
def test_who_a_osx_10_14_6(self):
"""
Test 'who -a' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.who.parse(self.osx_10_14_6_who_a, quiet=True), self.osx_10_14_6_who_a_json)
if __name__ == '__main__':
unittest.main()
|
babi_runner.py | parakrama1995/mem | 645 | 11177146 | import glob
import os
import random
import sys
import argparse
import numpy as np
from config import BabiConfig, BabiConfigJoint
from train_test import train, train_linear_start, test
from util import parse_babi_task, build_model
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val) # for reproducing
def run_task(data_dir, task_id):
"""
Train and test for each task
"""
print("Train and test for task %d ..." % task_id)
# Parse data
train_files = glob.glob('%s/qa%d_*_train.txt' % (data_dir, task_id))
test_files = glob.glob('%s/qa%d_*_test.txt' % (data_dir, task_id))
dictionary = {"nil": 0}
train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)
test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)
general_config = BabiConfig(train_story, train_questions, dictionary)
memory, model, loss = build_model(general_config)
if general_config.linear_start:
train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)
else:
train(train_story, train_questions, train_qstory, memory, model, loss, general_config)
test(test_story, test_questions, test_qstory, memory, model, loss, general_config)
def run_all_tasks(data_dir):
"""
Train and test for all tasks
"""
print("Training and testing for all tasks ...")
for t in range(20):
run_task(data_dir, task_id=t + 1)
def run_joint_tasks(data_dir):
"""
Train and test for all tasks but the trained model is built using training data from all tasks.
"""
print("Jointly train and test for all tasks ...")
tasks = range(20)
# Parse training data
train_data_path = []
for t in tasks:
train_data_path += glob.glob('%s/qa%d_*_train.txt' % (data_dir, t + 1))
dictionary = {"nil": 0}
train_story, train_questions, train_qstory = parse_babi_task(train_data_path, dictionary, False)
# Parse test data for each task so that the dictionary covers all words before training
for t in tasks:
test_data_path = glob.glob('%s/qa%d_*_test.txt' % (data_dir, t + 1))
parse_babi_task(test_data_path, dictionary, False) # ignore output for now
general_config = BabiConfigJoint(train_story, train_questions, dictionary)
memory, model, loss = build_model(general_config)
if general_config.linear_start:
train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)
else:
train(train_story, train_questions, train_qstory, memory, model, loss, general_config)
# Test on each task
for t in tasks:
print("Testing for task %d ..." % (t + 1))
test_data_path = glob.glob('%s/qa%d_*_test.txt' % (data_dir, t + 1))
dc = len(dictionary)
test_story, test_questions, test_qstory = parse_babi_task(test_data_path, dictionary, False)
assert dc == len(dictionary) # make sure that the dictionary already covers all words
test(test_story, test_questions, test_qstory, memory, model, loss, general_config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data-dir", default="data/tasks_1-20_v1-2/en",
help="path to dataset directory (default: %(default)s)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-t", "--task", default="1", type=int,
help="train and test for a single task (default: %(default)s)")
group.add_argument("-a", "--all-tasks", action="store_true",
help="train and test for all tasks (one by one) (default: %(default)s)")
group.add_argument("-j", "--joint-tasks", action="store_true",
help="train and test for all tasks (all together) (default: %(default)s)")
args = parser.parse_args()
# Check if data is available
data_dir = args.data_dir
if not os.path.exists(data_dir):
print("The data directory '%s' does not exist. Please download it first." % data_dir)
sys.exit(1)
print("Using data from %s" % args.data_dir)
if args.all_tasks:
run_all_tasks(data_dir)
elif args.joint_tasks:
run_joint_tasks(data_dir)
else:
run_task(data_dir, task_id=args.task)
|
recipes/Python/577252_lreplace_rreplace_Replace_beginning_ends/recipe-577252.py | tdiprima/code | 2,023 | 11177155 | <gh_stars>1000+
import re
def lreplace(pattern, sub, string):
"""
Replaces 'pattern' in 'string' with 'sub' if 'pattern' starts 'string'.
"""
return re.sub('^%s' % pattern, sub, string)
def rreplace(pattern, sub, string):
"""
Replaces 'pattern' in 'string' with 'sub' if 'pattern' ends 'string'.
"""
return re.sub('%s$' % pattern, sub, string)
|
src/python/nimbusml/tests/test_data_stream.py | michaelgsharp/NimbusML | 134 | 11177162 | <gh_stars>100-1000
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import unittest
import numpy
import pandas
from nimbusml import DataSchema
from nimbusml import FileDataStream
try:
from pandas.testing import assert_frame_equal
except ImportError:
# earlier versions
from pandas.util.testing import assert_frame_equal
class TestDataStream(unittest.TestCase):
def test_data_stream(self):
df = pandas.DataFrame(dict(a=[0, 1], b=[0.1, 0.2]))
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
df.to_csv(f, sep=',', index=False)
fi = FileDataStream.read_csv(f.name, sep=',')
fi2 = fi.clone()
assert repr(fi) == repr(fi2)
os.remove(f.name)
def test_data_header_no_dataframe(self):
li = [1.0, 1.0, 2.0]
df = pandas.DataFrame(li)
schema0 = DataSchema.read_schema(df)
assert str(schema0) == 'col=c0:R8:0 quote+ header=+'
li = [[1.0, 1.0, 2.0], [3.0, 5.0, 6.0]]
schema1 = DataSchema.read_schema(li)
assert str(schema1) == 'col=c0:R8:0 col=c1:R8:1 col=c2:R8:2 quote+ header=+'
df = pandas.DataFrame([[1.0, 1.0, 2.0], [3.0, 5.0, 6.0]])
schema2 = DataSchema.read_schema(df)
assert str(schema2) == 'col=c0:R8:0 col=c1:R8:1 col=c2:R8:2 quote+ header=+'
mat = numpy.array([[1.0, 1.0, 2.0], [3.0, 5.0, 6.0]])
schema3 = DataSchema.read_schema(mat)
assert str(schema3) == 'col=Data:R8:0-2 quote+ header=+'
li = [1.0, 1.0, 2.0]
df = pandas.DataFrame(li)
schema0 = DataSchema.read_schema(df, header=False)
assert str(schema0) == 'col=c0:R8:0 quote+ header=-'
def test_data_stream_head_file(self):
df = pandas.DataFrame(dict(a=[0, 1], b=[0.1, 0.2]))
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
df.to_csv(f, sep=',', index=False)
df1 = df.head(1)
df2 = df[1:].reset_index(drop=True)
fi = FileDataStream.read_csv(f.name, sep=',')
head = fi.head(1)
head2 = fi.head(1, 1)
assert_frame_equal(head, df1)
assert_frame_equal(head2, df2)
head3 = fi.head(1, 1, collect=False).transform(fi, verbose=0)
assert_frame_equal(head3, df2)
dff = fi.to_df()
assert_frame_equal(df, dff)
os.remove(f.name)
if __name__ == "__main__":
unittest.main()
|
chapter-04/recipe-09/example/test/benchmark-a.py | istupsm/cmake-cookbook | 1,600 | 11177188 | import sys
import time
# wait for 0.5 seconds
time.sleep(0.5)
# finally report success
sys.exit(0)
|
flask__webservers/show_lunch_menu_from_email/config.py | DazEB2/SimplePyScripts | 117 | 11177199 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Например: <<EMAIL>
username = "<username>"
password = "<password>"
# Например: smtp.mail.ru
smtp_server = "<smtp_server>"
# email отправителя писем с прикрепленными файлами обеденных меню в docx
lunch_email = "<lunch_email>"
header_date_format = "%d/%m/%Y %H:%M:%S"
debug = True
|
musicautobot/multitask_transformer/transform.py | HalleyYoung/musicautobot | 402 | 11177207 | from ..music_transformer.transform import *
class MultitrackItem():
def __init__(self, melody:MusicItem, chords:MusicItem, stream=None):
self.melody,self.chords = melody, chords
self.vocab = melody.vocab
self._stream = stream
@classmethod
def from_file(cls, midi_file, vocab):
return cls.from_stream(file2stream(midi_file), vocab)
@classmethod
def from_stream(cls, stream, vocab):
if not isinstance(stream, music21.stream.Score): stream = stream.voicesToParts()
num_parts = len(stream.parts)
sort_pitch = False
if num_parts > 2:
raise ValueError('Could not extract melody and chords from midi file. Please make sure file contains exactly 2 tracks')
elif num_parts == 1:
print('Warning: only 1 track found. Inferring melody/chords')
stream = separate_melody_chord(stream)
sort_pitch = False
mpart, cpart = stream2npenc_parts(stream, sort_pitch=sort_pitch)
return cls.from_npenc_parts(mpart, cpart, vocab, stream)
@classmethod
def from_npenc_parts(cls, mpart, cpart, vocab, stream=None):
mpart = npenc2idxenc(mpart, seq_type=SEQType.Melody, vocab=vocab, add_eos=False)
cpart = npenc2idxenc(cpart, seq_type=SEQType.Chords, vocab=vocab, add_eos=False)
return MultitrackItem(MusicItem(mpart, vocab), MusicItem(cpart, vocab), stream)
@classmethod
def from_idx(cls, item, vocab):
m, c = item
return MultitrackItem(MusicItem.from_idx(m, vocab), MusicItem.from_idx(c, vocab))
def to_idx(self): return np.array((self.melody.to_idx(), self.chords.to_idx()))
@property
def stream(self):
self._stream = self.to_stream() if self._stream is None else self._stream
return self._stream
def to_stream(self, bpm=120):
ps = self.melody.to_npenc(), self.chords.to_npenc()
ps = [npenc2chordarr(p) for p in ps]
chordarr = chordarr_combine_parts(ps)
return chordarr2stream(chordarr, bpm=bpm)
def show(self, format:str=None):
return self.stream.show(format)
def play(self): self.stream.show('midi')
def transpose(self, val):
return MultitrackItem(self.melody.transpose(val), self.chords.transpose(val))
def pad_to(self, val):
return MultitrackItem(self.melody.pad_to(val), self.chords.pad_to(val))
def trim_to_beat(self, beat):
return MultitrackItem(self.melody.trim_to_beat(beat), self.chords.trim_to_beat(beat))
def combine2chordarr(np1, np2, vocab):
if len(np1.shape) == 1: np1 = idxenc2npenc(np1, vocab)
if len(np2.shape) == 1: np2 = idxenc2npenc(np2, vocab)
p1 = npenc2chordarr(np1)
p2 = npenc2chordarr(np2)
return chordarr_combine_parts((p1, p2))
|
tests/python/twitter/checkstyle/test_common.py | zhouyijiaren/commons | 1,143 | 11177208 | <reponame>zhouyijiaren/commons<gh_stars>1000+
import ast
import textwrap
from twitter.checkstyle.common import (
CheckstylePlugin,
Nit,
OffByOneList,
PythonFile
)
import pytest
def make_statement(statement):
return '\n'.join(textwrap.dedent(statement).splitlines()[1:])
PYTHON_STATEMENT = make_statement("""
import ast
from os.path import (
join,
split,
)
import zookeeper
class Keeper(object):
def __init__(self):
self._session = None
def session(self):
return self._session
""")
def test_python_file():
pf = PythonFile(PYTHON_STATEMENT, 'keeper.py')
assert pf.filename == 'keeper.py'
assert pf.logical_lines == {
1: (1, 2, 0),
2: (2, 6, 0),
7: (7, 8, 0),
10: (10, 11, 0),
11: (11, 12, 2),
12: (12, 13, 4),
14: (14, 15, 2),
15: (15, 16, 4)
}
with pytest.raises(IndexError):
pf[0]
with pytest.raises(IndexError):
pf[len(PYTHON_STATEMENT.splitlines()) + 1]
assert pf[1] == ["import ast"]
assert pf[2] == ["from os.path import (", " join,", " split,", ")"]
assert pf[3] == [" join,"]
assert '\n'.join(pf) == PYTHON_STATEMENT
assert list(pf.enumerate()) == list(enumerate(PYTHON_STATEMENT.splitlines(), 1))
def test_style_error():
pf = PythonFile(PYTHON_STATEMENT, 'keeper.py')
class ActualCheckstylePlugin(CheckstylePlugin):
def nits(self):
return []
cp = ActualCheckstylePlugin(pf)
se = cp.error('A123', 'You have a terrible taste in libraries.')
assert se.line_number is None
assert se.code == 'A123'
str(se)
se = cp.error('A123', 'You have a terrible taste in libraries.', 7)
assert se.line_number == '007'
str(se)
se = cp.error('A123', 'You have a terrible taste in libraries.', 2)
assert se.line_number == '002-005'
assert se.severity == Nit.ERROR
str(se)
sw = cp.warning('A321', 'You have a terrible taste in libraries.', 2)
assert sw.severity == Nit.WARNING
assert sw.code == 'A321'
str(sw)
import_from = None
for node in ast.walk(pf.tree):
if isinstance(node, ast.ImportFrom):
import_from = node
assert import_from is not None
ase = cp.error('B380', "I don't like your from import!", import_from)
assert ase.severity == Nit.ERROR
se = cp.error('B380', "I don't like your from import!", 2)
assert str(se) == str(ase)
def test_off_by_one():
obl = OffByOneList([])
for index in (-1, 0, 1):
with pytest.raises(IndexError):
obl[index]
for s in (slice(1, 1), slice(1, 2), slice(-2, -1)):
assert obl[s] == []
for s in (slice(-1, 0), slice(0, 1)):
with pytest.raises(IndexError):
obl[s]
obl = OffByOneList([1, 2, 3])
for k in (1, 2, 3):
assert obl[k] == k
assert obl[k:k + 1] == [k]
assert obl.index(k) == k
assert obl.count(k) == 1
assert list(reversed(obl)) == [3, 2, 1]
for k in (0, 4):
with pytest.raises(IndexError):
obl[k]
for value in (None, 2.0, type):
with pytest.raises(TypeError):
obl[value]
|
code_examples/miner.py | pavlovdog/bitcoin_in_a_nutshell | 121 | 11177210 | <filename>code_examples/miner.py
import hashlib
import struct
import time
import sys
# ======= Header =======
ver = 2
prev_block = "000000000000000000e5fb3654e0ae9a2b7d7390e37ee0a7c818ca09fde435f0"
mrkl_root = "6f3ef687979a1f4866cd8842dcbcebd2e47171e54d1cc76c540faecafe133c39"
bits = 0x10004379
time_ = 0x58777e25
# Calculate current time with this code:
# hex(int(time.mktime(time.strptime('2017-01-12 13:01:25', '%Y-%m-%d %H:%M:%S'))) - time.timezone)
exp = bits >> 24
mant = bits & 0xffffff
target_hexstr = '%064x' % (mant * (1 << (8 * (exp - 3))))
target_str = target_hexstr.decode('hex')
# ======== Header =========
nonce = 0
while nonce < 0x100000000:
header = ( struct.pack("<L", ver) + prev_block.decode('hex')[::-1] +
mrkl_root.decode('hex')[::-1] + struct.pack("<LLL", time_, bits, nonce))
hash = hashlib.sha256(hashlib.sha256(header).digest()).digest()
sys.stdout.write("\rNonce: {}, hash: {}".format(nonce, hash[::-1].encode('hex')))
sys.stdout.flush()
if hash[::-1] < target_str:
print 'Success!'
break
nonce += 1
|
src/odb/test/unitTestsPython/TestITerm.py | erictaur/OpenROAD | 525 | 11177213 | import opendbpy as odb
import helper
import odbUnitTest
class TestITerm(odbUnitTest.TestCase):
def setUp(self):
self.db, self.lib = helper.createSimpleDB()
blockName = '1LevelBlock'
self.block = odb.dbBlock_create(self.db.getChip(), blockName)
self.and2 = self.lib.findMaster('and2')
self.inst = odb.dbInst.create(self.block, self.and2, "inst")
self.iterm_a = self.inst.findITerm('a')
def tearDown(self):
self.db.destroy(self.db)
def test_idle(self):
self.assertIsNone(self.iterm_a.getNet())
def test_connection_from_iterm(self):
#Create net and Connect
n = odb.dbNet_create(self.block, 'n1')
self.assertEqual(n.getITermCount(), 0)
self.assertEqual(n.getITerms(), [])
self.iterm_a.connect(self.iterm_a, n)
self.iterm_a.setConnected()
self.assertEqual(self.iterm_a.getNet().getName(), 'n1')
self.assertEqual(n.getITermCount(), 1)
self.assertEqual(n.getITerms()[0].getMTerm().getName(), 'a')
self.assertTrue(self.iterm_a.isConnected())
#disconnect
self.iterm_a.disconnect(self.iterm_a)
self.iterm_a.clearConnected()
self.assertEqual(n.getITermCount(), 0)
self.assertEqual(n.getITerms(), [])
self.assertIsNone(self.iterm_a.getNet())
self.assertFalse(self.iterm_a.isConnected())
def test_connection_from_inst(self):
#Create net and Connect
n = odb.dbNet_create(self.block, 'n1')
self.assertEqual(n.getITermCount(), 0)
self.assertEqual(n.getITerms(), [])
self.iterm_a.connect(self.inst, n, self.inst.getMaster().findMTerm('a'))
self.iterm_a.setConnected()
self.assertEqual(self.iterm_a.getNet().getName(), 'n1')
self.assertEqual(n.getITermCount(), 1)
self.assertEqual(n.getITerms()[0].getMTerm().getName(), 'a')
self.assertTrue(self.iterm_a.isConnected())
#disconnect
self.iterm_a.disconnect(self.iterm_a)
self.iterm_a.clearConnected()
self.assertEqual(n.getITermCount(), 0)
self.assertEqual(n.getITerms(), [])
self.assertIsNone(self.iterm_a.getNet())
self.assertFalse(self.iterm_a.isConnected())
def test_avgxy_R0(self):
x = odb.new_int(0)
y = odb.new_int(0)
self.assertFalse(self.iterm_a.getAvgXY(x, y)) #no mpin to work on
mterm_a = self.and2.findMTerm('a')
mpin_a = odb.dbMPin_create(mterm_a)
self.assertFalse(self.iterm_a.getAvgXY(x, y)) #no boxes to work on
geo_box_a_1 = odb.dbBox_create(mpin_a, self.lib.getTech().getLayers()[0], 0, 0, 50, 50)
self.assertTrue(self.iterm_a.getAvgXY(x, y))
self.assertEqual(odb.get_int(x), int((0+50)/2))
self.assertEqual(odb.get_int(y), int((0+50)/2))
geo_box_a_2 = odb.dbBox_create(mpin_a, self.lib.getTech().getLayers()[0], 5, 10, 100, 100)
self.assertTrue(self.iterm_a.getAvgXY(x, y))
self.assertEqual(odb.get_int(x), int( ((0+50)+(5+100))/4 ) )
self.assertEqual(odb.get_int(y), int( ((0+50)+(10+100))/4 ) )
def test_avgxy_R90(self):
x = odb.new_int(0)
y = odb.new_int(0)
mterm_a = self.and2.findMTerm('a')
mpin_a = odb.dbMPin_create(mterm_a)
geo_box_a_1 = odb.dbBox_create(mpin_a, self.lib.getTech().getLayers()[0], 0, 0, 50, 50)
geo_box_a_2 = odb.dbBox_create(mpin_a, self.lib.getTech().getLayers()[0], 0, 0, 100, 100)
self.inst.setOrient('R90')
self.assertTrue(self.iterm_a.getAvgXY(x, y))
self.assertEqual(odb.get_int(x), int( ((0+50)+(0+100))/4 )*-1 )
self.assertEqual(odb.get_int(y), int( ((0+50)+(0+100))/4 ) )
if __name__=='__main__':
odbUnitTest.mainParallel(TestITerm)
# odbUnitTest.main()
|
ote/ote/modules/compression/nncf_reid_config_transformer.py | dqawami/openvino_training_extensions | 256 | 11177227 | """
Copyright (c) 2020-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import yaml
from ..registry import COMPRESSION
from .nncf_config_transformer import (save_config,
generate_config_path,
NNCFConfigTransformer)
@COMPRESSION.register_module()
class NNCFReidConfigTransformer:
CONFIG_ARG_TO_SUBSTITUTE = 'config'
NAME_FIELD_TO_EXTRACT_TO_NNCF_CONFIG = 'nncf_config'
NAME_FIELD_FOR_EXTRACTED_NNCF_CONFIG_PATH = 'nncf_config_path'
NAME_FIELD_TO_EXTRACT_TO_AUX_CONFIG_CHANGE = 'changes_aux_config'
NAME_FIELD_FOR_EXTRACTED_AUX_CONFIG_CHANGE_PATH = 'changes_in_aux_train_config'
def process_args(self, template_path, kwargs):
assert self.CONFIG_ARG_TO_SUBSTITUTE in kwargs, (
f'Error: kwargs does not contain {self.CONFIG_ARG_TO_SUBSTITUTE}, kwargs={kwargs}')
kwargs, is_optimisation_enabled = NNCFConfigTransformer().process_args(template_path, kwargs)
if not is_optimisation_enabled:
return kwargs, is_optimisation_enabled
assert self.CONFIG_ARG_TO_SUBSTITUTE in kwargs, (
f'Error: kwargs after NNCFConfigTransformer does not contain {self.CONFIG_ARG_TO_SUBSTITUTE}, '
f'kwargs={kwargs}')
cur_config_path = kwargs[self.CONFIG_ARG_TO_SUBSTITUTE]
with open(cur_config_path) as f:
cur_config = yaml.safe_load(f)
if ( (self.NAME_FIELD_TO_EXTRACT_TO_NNCF_CONFIG not in cur_config) or
(self.NAME_FIELD_TO_EXTRACT_TO_AUX_CONFIG_CHANGE not in cur_config) ):
raise RuntimeError(f'The fields {self.NAME_FIELD_TO_EXTRACT_TO_NNCF_CONFIG} '
f'and {self.NAME_FIELD_TO_EXTRACT_TO_AUX_CONFIG_CHANGE} may be absent '
f'in generated config file {cur_config_path} -- but they are required for '
f'{type(self).__name__} -- check the original config file '
f'and nncf config')
nncf_part_to_extract = cur_config[self.NAME_FIELD_TO_EXTRACT_TO_NNCF_CONFIG]
del cur_config[self.NAME_FIELD_TO_EXTRACT_TO_NNCF_CONFIG]
aux_changes_part_to_extract = cur_config[self.NAME_FIELD_TO_EXTRACT_TO_AUX_CONFIG_CHANGE]
del cur_config[self.NAME_FIELD_TO_EXTRACT_TO_AUX_CONFIG_CHANGE]
new_config_path = generate_config_path(cur_config_path, 'yml')
extracted_nncf_cfg_path = generate_config_path(cur_config_path, 'nncf_part.json')
changes_aux_config_path = generate_config_path(cur_config_path, 'aux_changes.yml')
save_config(nncf_part_to_extract, extracted_nncf_cfg_path)
logging.debug(f'Extracted NNCF part of config saved to the file {extracted_nncf_cfg_path}')
save_config(aux_changes_part_to_extract, changes_aux_config_path)
logging.debug(f'Extracted aux changes of config saved to the file {changes_aux_config_path}')
cur_config['nncf'][self.NAME_FIELD_FOR_EXTRACTED_NNCF_CONFIG_PATH] = extracted_nncf_cfg_path
cur_config['nncf'][self.NAME_FIELD_FOR_EXTRACTED_AUX_CONFIG_CHANGE_PATH] = changes_aux_config_path
save_config(cur_config, new_config_path)
logging.debug(f'After extracting NNCF part: saved new config to the file {new_config_path}')
kwargs[self.CONFIG_ARG_TO_SUBSTITUTE] = new_config_path
return kwargs, is_optimisation_enabled
|
hardware/demo_i2c.py | 708yamaguchi/MaixPy_scripts | 485 | 11177232 | from machine import I2C
from fpioa_manager import fm
# i2c = I2C(I2C.I2C0, freq=100000, scl=28, sda=29) # hardware i2c
i2c = I2C(I2C.I2C3, freq=100000, scl=28, sda=29) # software i2c
#i2c = I2C(I2C.I2C_SOFT, freq=100000, scl=28, sda=29,
#gscl = fm.fpioa.GPIOHS1, gsda = fm.fpioa.GPIOHS2) # software i2c for the latest firmware
devices = i2c.scan()
print(devices)
for device in devices:
i2c.writeto(device, b'123')
i2c.readfrom(device, 3)
# tmp = bytearray(6)
# i2c.readfrom_into(device, tmp, True)
|
tempest/lib/services/image/v1/image_members_client.py | rishabh20111990/tempest | 254 | 11177246 | <gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class ImageMembersClient(rest_client.RestClient):
api_version = "v1"
def list_image_members(self, image_id):
"""List all members of an image."""
url = 'images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List image memberships for the given tenant.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v1/#list-shared-images
"""
url = 'shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_image_member(self, image_id, member_id, **kwargs):
"""Add a member to an image.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v1/#add-member-to-image
"""
url = 'images/%s/members/%s' % (image_id, member_id)
body = json.dumps({'member': kwargs})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_image_member(self, image_id, member_id):
"""Removes a membership from the image.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v1/#remove-member
"""
url = 'images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
|
tests/test_test_utils.py | decentral1se/purerpc | 143 | 11177274 | import os
import sys
import time
import pytest
import traceback
import multiprocessing
from purerpc.test_utils import run_tests_in_workers, _run_context_manager_generator_in_process
def test_run_tests_in_workers_error():
def target_fn():
def inner_2():
def inner_1():
raise ValueError("42")
inner_1()
inner_2()
with pytest.raises(ValueError, match="42"):
run_tests_in_workers(target=target_fn, num_workers=1)
def test_run_tests_in_workers_error_traceback():
def target_fn():
def inner_2():
def inner_1():
raise ValueError("42")
inner_1()
inner_2()
try:
run_tests_in_workers(target=target_fn, num_workers=1)
except ValueError:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_tb(exc_traceback)
expected_traceback = ("target_fn", "inner_2", "inner_1")
for expected_fname, line in zip(expected_traceback[::-1], lines[::-1]):
assert expected_fname in line
def test_run_tests_in_workers():
num_workers = 10
queue = multiprocessing.Queue()
def target_fn():
queue.put(os.getpid())
run_tests_in_workers(target=target_fn, num_workers=num_workers)
pids = set()
for _ in range(num_workers):
pid = queue.get_nowait()
pids.add(pid)
assert len(pids) == num_workers
def test_run_context_manager_generator_in_process():
def gen():
yield 42
with _run_context_manager_generator_in_process(gen) as result:
assert result == 42
def test_run_context_manager_generator_in_process_error_before():
def gen():
raise ValueError("42")
with pytest.raises(ValueError, match="42"):
with _run_context_manager_generator_in_process(gen) as result:
assert result == 42
def test_run_context_manager_generator_in_process_error_after():
def gen():
yield 42
raise ValueError("42")
with pytest.raises(ValueError, match="42"):
with _run_context_manager_generator_in_process(gen) as result:
assert result == 42
time.sleep(0.1)
def test_run_context_manager_generator_in_process_error_traceback():
def gen():
def inner_2():
def inner_1():
raise ValueError("42")
inner_1()
inner_2()
try:
with _run_context_manager_generator_in_process(gen):
pass
except ValueError:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_tb(exc_traceback)
expected_traceback = ("gen", "inner_2", "inner_1")
for expected_fname, line in zip(expected_traceback[::-1], lines[::-1]):
assert expected_fname in line
|
streamalert/shared/lookup_tables/errors.py | cninja1/streamalert | 2,770 | 11177276 |
class LookupTablesError(RuntimeError):
"""Generic class for errors raised from LookupTables systems"""
class LookupTablesInitializationError(LookupTablesError):
"""Any error raised when a specific table/driver is attempting to initialize"""
class LookupTablesCommitError(LookupTablesError):
"""Any error raised when a LookupTable or driver fails to successfully commit changes"""
class LookupTablesConfigurationError(LookupTablesError):
"""Errors raised that detect a misconfiguration for any LookupTables system"""
|
office365/onedrive/analytics/item_activity.py | rikeshtailor/Office365-REST-Python-Client | 544 | 11177277 | <reponame>rikeshtailor/Office365-REST-Python-Client
from office365.entity import Entity
class ItemActivity(Entity):
"""
The itemActivity resource provides information about activities that took place on an item or within a container.
Currently only available on SharePoint and OneDrive for Business.
"""
pass
|
Src/StdLib/Lib/test/bad_coding3.py | cwensley/ironpython2 | 2,209 | 11177284 | <gh_stars>1000+
# coding: string-escape
\x70\x72\x69\x6e\x74\x20\x32\x2b\x32\x0a
|
tests/nnapi/specs/Ex/range_ex_float_1_all_constant_inputs.mod.py | periannath/ONE | 255 | 11177310 | <reponame>periannath/ONE
# model
model = Model()
shape = Input("lhs", "TENSOR_INT32", "{2}")
start = Parameter("start", "TENSOR_FLOAT32", "{}", [1])
limit = Parameter("limit", "TENSOR_FLOAT32", "{}", [5])
delta = Parameter("delta", "TENSOR_FLOAT32", "{}", [0.5])
range_out = Internal("range_out", "TENSOR_FLOAT32", "{8}")
out = Output("output", "TENSOR_FLOAT32", "{1, 8}")
model = model.Operation("RANGE_EX", start, limit, delta).To(range_out)
model = model.Operation("RESHAPE", range_out, shape).To(out)
input0 = {shape: [1, 8]}
output0 = {out: # output 0
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5]}
# Instantiate an example
Example((input0,output0))
|
recnn/data/dataset_functions.py | XinruLiu/RecNN | 495 | 11177313 | <filename>recnn/data/dataset_functions.py
from .pandas_backend import pd
import numpy as np
from typing import List, Dict, Callable
"""
What?
+++++
RecNN is designed to work with your data flow.
Set kwargs in the beginning of prepare_dataset function.
Kwargs you set are immutable.
args_mut are mutable arguments, you can access the following:
base: data.EnvBase, df: DataFrame, users: List[int],
user_dict: Dict[int, Dict[str, np.ndarray]
Access args_mut and modify them in functions defined by you.
Best to use function chaining with build_data_pipeline.
recnn.data.prepare_dataset is a function that is used by default in Env.__init__
But sometimes you want some extra. I have also predefined truncate_dataset.
This function truncates the number of items to specified one.
In reinforce example I modify it to look like::
def prepare_dataset(args_mut, kwargs):
kwargs.set('reduce_items_to', num_items) # set kwargs for your functions here!
pipeline = [recnn.data.truncate_dataset, recnn.data.prepare_dataset]
recnn.data.build_data_pipeline(pipeline, kwargs, args_mut)
# embeddgings: https://drive.google.com/open?id=1EQ_zXBR3DKpmJR3jBgLvt-xoOvArGMsL
env = recnn.data.env.FrameEnv('..',
'...', frame_size, batch_size,
embed_batch=embed_batch, prepare_dataset=prepare_dataset,
num_workers=0)
"""
def try_progress_apply(dataframe, function):
try:
return dataframe.progress_apply(function)
except AttributeError:
return dataframe.apply(function)
# Plain args. Shouldn't be mutated
class DataFuncKwargs:
def __init__(self, **kwargs):
self.kwargs = kwargs
def keys(self):
return self.kwargs.keys()
def get(self, name: str):
if name not in self.kwargs:
example = """
# example on how to use kwargs:
def prepare_dataset(args, args_mut):
args.set_kwarg('{}', your_value) # set kwargs for your functions here!
pipeline = [recnn.data.truncate_dataset, recnn.data.prepare_dataset]
recnn.data.build_data_pipeline(pipeline, args, args_mut)
"""
raise AttributeError(
"No kwarg with name {} found!\n{}".format(name, example.format(example))
)
return self.kwargs[name]
def set(self, name: str, value):
self.kwargs[name] = value
# Used for returning, arguments are mutable
class DataFuncArgsMut:
def __init__(
self, df, base, users: List[int], user_dict: Dict[int, Dict[str, np.ndarray]]
):
self.base = base
self.users = users
self.user_dict = user_dict
self.df = df
def prepare_dataset(args_mut: DataFuncArgsMut, kwargs: DataFuncKwargs):
"""
Basic prepare dataset function. Automatically makes index linear, in ml20 movie indices look like:
[1, 34, 123, 2000], recnn makes it look like [0,1,2,3] for you.
"""
# get args
frame_size = kwargs.get("frame_size")
key_to_id = args_mut.base.key_to_id
df = args_mut.df
# rating range mapped from [0, 5] to [-5, 5]
df["rating"] = try_progress_apply(df["rating"], lambda i: 2 * (i - 2.5))
# id's tend to be inconsistent and sparse so they are remapped here
df["movieId"] = try_progress_apply(df["movieId"], key_to_id.get)
users = df[["userId", "movieId"]].groupby(["userId"]).size()
users = users[users > frame_size].sort_values(ascending=False).index
if pd.get_type() == "modin":
df = df._to_pandas() # pandas groupby is sync and doesnt affect performance
ratings = (
df.sort_values(by="timestamp")
.set_index("userId")
.drop("timestamp", axis=1)
.groupby("userId")
)
# Groupby user
user_dict = {}
def app(x):
userid = x.index[0]
user_dict[userid] = {}
user_dict[userid]["items"] = x["movieId"].values
user_dict[userid]["ratings"] = x["rating"].values
try_progress_apply(ratings, app)
args_mut.user_dict = user_dict
args_mut.users = users
return args_mut, kwargs
def truncate_dataset(args_mut: DataFuncArgsMut, kwargs: DataFuncKwargs):
"""
Truncate #items to reduce_items_to provided in kwargs
"""
# here are adjusted n items to keep
num_items = kwargs.get("reduce_items_to")
df = args_mut.df
counts = df["movieId"].value_counts().sort_values()
to_remove = counts[:-num_items].index
to_keep = counts[-num_items:].index
to_keep_id = pd.get().Series(to_keep).apply(args_mut.base.key_to_id.get).values
to_keep_mask = np.zeros(len(counts))
to_keep_mask[to_keep_id] = 1
args_mut.df = df.drop(df[df["movieId"].isin(to_remove)].index)
key_to_id_new = {}
id_to_key_new = {}
count = 0
for idx, i in enumerate(list(args_mut.base.key_to_id.keys())):
if i in to_keep:
key_to_id_new[i] = count
id_to_key_new[idx] = i
count += 1
args_mut.base.embeddings = args_mut.base.embeddings[to_keep_mask]
args_mut.base.key_to_id = key_to_id_new
args_mut.base.id_to_key = id_to_key_new
print(
"action space is reduced to {} - {} = {}".format(
num_items + len(to_remove), len(to_remove), num_items
)
)
return args_mut, kwargs
def build_data_pipeline(
chain: List[Callable], kwargs: DataFuncKwargs, args_mut: DataFuncArgsMut
):
"""
Higher order function
:param chain: array of callable
:param **kwargs: any kwargs you like
"""
for call in chain:
# note: returned kwargs are not utilized to guarantee immutability
args_mut, _ = call(args_mut, kwargs)
return args_mut, kwargs
|
OnePy/sys_module/base_cleaner.py | Chandlercjy/OnePyfx | 321 | 11177338 | import abc
from collections import defaultdict, deque
from functools import partial
from itertools import count
import arrow
from OnePy.sys_module.components.market_maker import MarketMaker
from OnePy.sys_module.metabase_env import OnePyEnvBase
class CleanerBase(OnePyEnvBase):
counter = count(1)
def __init__(self, rolling_window: int, buffer_day: int,
frequency: str = None) -> None:
self.name = f'{self.__class__.__name__}_{next(self.counter)}'
self.env.cleaners.update({self.name: self})
self.rolling_window = rolling_window
self.buffer_day = buffer_day
self.frequency = frequency
self.data = defaultdict(dict) # type:defaultdict
assert buffer_day <= 500, 'buffer_day should not bigger than 500!'
def _check_length(self, key: str, buffer_day: int):
ticker = key.replace(f'_{self.frequency}', '')
if buffer_day > 500 or not self.data[key]: # 超过长度说明没有数据了
del self.data[key]
self.env.logger.warning(
f'{ticker}_{self.frequency} is not enough for cleaners. Deleted!!!!')
if ticker in self.env.tickers:
self.env.tickers.remove(ticker)
return
elif len(self.data[key]['close']) < self.data[key]['close'].maxlen:
buffer_day += 2
self.buffer_day = buffer_day
self.initialize_buffer_data(ticker, buffer_day)
self.env.logger.warning(
f'Retry {self.name}, perfect buffer_day = {buffer_day}')
def initialize_buffer_data(self, ticker: str, buffer_day: int):
self._settle_frequency(ticker)
reader = self.env.readers[ticker]
buffer_start_date = arrow.get(self.env.sys_date).shift(
days=-buffer_day).format('YYYY-MM-DD HH:mm:ss')
buffer_end_date = arrow.get(self.env.sys_date).shift(
seconds=-1).format('YYYY-MM-DD HH:mm:ss')
key = f'{ticker}_{self.frequency}'
single_data = defaultdict(partial(deque, maxlen=self.rolling_window))
buffer_data = reader.load_by_cleaner(fromdate=buffer_start_date,
todate=buffer_end_date,
frequency=self.frequency)
for value in buffer_data:
single_data['open'].append(value['open'])
single_data['high'].append(value['high'])
single_data['low'].append(value['low'])
single_data['close'].append(value['close'])
single_data['volume'].append(value['volume'])
single_data['date'].append(value['date'])
self.data[key].update(single_data)
self._check_length(key, buffer_day)
def _append_data_to_buffer(self):
for key in list(self.data):
ticker = key.replace(f'_{self.frequency}', '')
if ticker not in self.env.cur_suspended_tickers: # 不停牌才进行更新
if self.frequency == self.env.sys_frequency:
cleaners_ohlc = self.env.feeds[ticker]
self._save_data(key, cleaners_ohlc)
else:
cleaners_ohlc = self.env.cleaners_feeds[f'{key}_{self.name}']
next_datetime = arrow.get(cleaners_ohlc.next_ohlc['date'])
sys_date = arrow.get(self.env.sys_date)
while next_datetime <= sys_date:
try:
cleaners_ohlc.next_directly()
self._save_data(key, cleaners_ohlc)
except StopIteration:
# 报错的话可能是因为cleaner的frequency比sys大
cur_datetime = arrow.get(
cleaners_ohlc.current_ohlc['date'])
if cur_datetime > sys_date: # 希望永不触发
self.env.logger.warning(
"框架回测逻辑出现错误!!")
break
next_datetime = arrow.get(
cleaners_ohlc.next_ohlc['date'])
sys_date = arrow.get(self.env.sys_date)
def _settle_frequency(self, ticker):
if self.frequency:
self._save_cleaners_feeds(ticker)
else:
self.frequency = self.env.sys_frequency
def _save_cleaners_feeds(self, ticker: str):
key = f'{ticker}_{self.frequency}_{self.name}'
value = MarketMaker.get_bar(ticker, self.frequency)
if value.initialize(7):
self.env.cleaners_feeds.update({key: value})
def _save_data(self, key, cleaners_ohlc):
self.data[key]['date'].append(cleaners_ohlc.date)
self.data[key]['open'].append(cleaners_ohlc.open)
self.data[key]['high'].append(cleaners_ohlc.high)
self.data[key]['low'].append(cleaners_ohlc.low)
self.data[key]['close'].append(cleaners_ohlc.close)
self.data[key]['volume'].append(cleaners_ohlc.volume)
def run(self):
self._append_data_to_buffer()
@abc.abstractmethod
def calculate(self, ticker: str):
raise NotImplementedError
|
tests/ex03_company/models.py | RodrigoDeRosa/related | 190 | 11177355 | import related
@related.mutable
class Company(object):
name = related.StringField()
uuid = related.UUIDField()
email = related.RegexField("[^@]+@[^@]+", required=False)
is_active = related.BooleanField(required=False)
url = related.URLField(required=False)
meta = related.ChildField(dict, required=False)
nicknames = related.SequenceField(str, required=False)
temperature = related.FloatField(required=False)
guess = related.SetField(int, required=False)
established = related.DateField('%m/%d/%Y', required=False)
closed = related.DateField(required=False) # default formatter (%Y-%m-%d)
|
tests/config/test_config.py | kargaranamir/emerge | 142 | 11177364 | """
All unit tests that are related to configuration.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
import unittest
from emerge.config import Configuration
from emerge.config import Analysis, YamlLoader
import coloredlogs
import logging
LOGGER = logging.getLogger('TESTS')
coloredlogs.install(level='INFO', logger=LOGGER, fmt='\n%(asctime)s %(name)s %(levelname)s %(message)s')
# pylint: disable=protected-access
class ConfigurationTestCase(unittest.TestCase):
def setUp(self):
self.version = "1.0.0"
self.configuration = Configuration(self.version)
self.analysis = Analysis()
def tearDown(self):
pass
def test_config_init(self):
self.assertIsNotNone(self.configuration)
self.assertIsNotNone(self.configuration.analyses)
self.assertTrue(len(self.configuration.analyses) == 0)
self.assertTrue(self.configuration.project_name == "unnamed")
self.assertIsNotNone(self.configuration._yaml_loader)
self.assertIs(type(self.configuration._yaml_loader), YamlLoader)
LOGGER.info(f'completed testing of CurrentConfiguration init')
if __name__ == '__main__':
unittest.main()
|
tests/client/base.py | Allerter/tekore | 135 | 11177371 | <filename>tests/client/base.py
import pytest
from unittest.mock import MagicMock
from tekore import HTTPError
from tekore.model import PlayerErrorReason
from tekore import Spotify
from ._resources import album_id
@pytest.fixture
def client():
return Spotify('token')
class TestSpotifyBaseUnits:
def test_repr(self):
s = Spotify()
assert repr(s).startswith('Spotify(')
def test_token_is_given_token(self):
token = MagicMock()
client = Spotify(token)
assert token is client.token
def test_token_assignable(self, client):
client.token = 'new'
assert client.token == 'new'
def test_bad_request_is_parsed_for_error_reason(self, client):
error = list(PlayerErrorReason)[0]
class BadResponse:
status_code = 404
url = 'example.com'
content = {'error': {
'message': 'Error message',
'reason': error.name
}}
sender = MagicMock()
sender.send.return_value = BadResponse()
sender.is_async = False
client.sender = sender
try:
client.album('not-an-id')
raise AssertionError()
except HTTPError as e:
assert error.value in str(e)
class TestSpotifyBase:
def test_album_nonexistent_market_error_message_parsed(self, app_client):
try:
app_client.album(album_id, market='__')
raise AssertionError()
except HTTPError as e:
assert 'Invalid market code' in str(e)
|
dnachisel/builtin_specifications/codon_optimization/BaseCodonOptimizationClass.py | simone-pignotti/DnaChisel | 124 | 11177374 | from ..CodonSpecification import CodonSpecification
from python_codon_tables import get_codons_table
import numpy as np
from ...Location import Location
from ...biotools import group_nearby_indices
class BaseCodonOptimizationClass(CodonSpecification):
best_possible_score = 0 # Don't forget to change in subclasses if needed
localization_group_spread = 3
def __init__(
self, species=None, location=None, codon_usage_table=None, boost=1.0
):
self.boost = boost
self.location = Location.from_data(location)
self.species = species
self.codon_usage_table = self.get_codons_table(
species, codon_usage_table
)
def get_codons(self, problem):
subsequence = self.location.extract_sequence(problem.sequence)
if len(subsequence) % 3:
raise ValueError(
"Spec. %s is on a window/sequence with size not multiple of 3)"
% (self.label())
)
return [
subsequence[3 * i : 3 * (i + 1)]
for i in range(int(len(subsequence) / 3))
]
@staticmethod
def get_codons_table(species, codon_usage_table):
if codon_usage_table is None:
if species is None:
raise ValueError(
"Provide either an species name or a codon usage table"
)
else:
codon_usage_table = get_codons_table(species)
return codon_usage_table
def initialized_on_problem(self, problem, role):
"""Get location from sequence if no location provided."""
return self._copy_with_full_span_if_no_location(problem)
def codons_indices_to_locations(self, indices):
"""Convert a list of codon positions to a list of Locations"""
indices = np.array(indices)
if self.location.strand == -1:
indices = sorted(self.location.end - 3 * indices)
return [
Location(group[0] - 3, group[-1], strand=-1)
for group in group_nearby_indices(
indices, max_group_spread=self.localization_group_spread
)
]
else:
indices = self.location.start + 3 * indices
return [
Location(group[0], group[-1] + 3)
for group in group_nearby_indices(
indices, max_group_spread=self.localization_group_spread
)
]
def get_codons_synonyms(self):
"""Return a dict {"GTG": [GTG, GTC, ...]} of synonymous codons."""
return {
codon: [c for c in aa_codons]
for aa, aa_codons in self.codon_usage_table.items()
if len(aa) == 1
for codon in aa_codons
}
def get_codons_translations(self):
"""Return a dict {"ATG": "M", "TAG": "*", ...}."""
return {
codon: aa
for aa, aa_codons in self.codon_usage_table.items()
if len(aa) == 1
for codon in aa_codons.keys()
}
def localized_on_window(self, new_location, start_codon, end_codon):
"""Relocate without changing much."""
# The "new_location" already has exactly the right span and strand
# thanks to superclass CodonSpecification
return self.copy_with_changes(location=new_location)
|
examples/cloudml-sklearn-pipeline/trainer/metadata.py | ruchirjain86/professional-services | 2,116 | 11177394 | <gh_stars>1000+
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset metadata."""
# Usage: Modify below based on the dataset used.
CSV_COLUMNS = None # Schema of the data. Necessary for data stored in GCS
# In the following, I provided an example based on census dataset.
NUMERIC_FEATURES = [
'age',
'hours_per_week',
]
CATEGORICAL_FEATURES = [
'workclass',
'education',
'marital_status',
'occupation',
'relationship',
'race',
'sex',
'native_country'
]
FEATURE_COLUMNS = NUMERIC_FEATURES + CATEGORICAL_FEATURES
LABEL = 'income_bracket'
PROBLEM_TYPE = 'classification' # 'regression' or 'classification'
BASE_QUERY = '''
SELECT
*
FROM
`{table}`
'''
|
scripts/step3.py | awesome-archive/subreddit-analyzer | 497 | 11177403 | """
This script contains several functions that will create plots and generate insights from
the 4 datasets (submissions, comments, tokens and entities).
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import wordcloud
from pandas.plotting import register_matplotlib_converters
from PIL import Image
register_matplotlib_converters()
sns.set(style="ticks",
rc={
"figure.figsize": [12, 7],
"text.color": "white",
"axes.labelcolor": "white",
"axes.edgecolor": "white",
"xtick.color": "white",
"ytick.color": "white",
"axes.facecolor": "#222222",
"figure.facecolor": "#222222"}
)
MASK_FILE = "./assets/cloud.png"
FONT_FILE = "./assets/sofiapro-light.otf"
EN_STOPWORDS = "./assets/stopwords-en.txt"
ES_STOPWORDS = "./assets/stopwords-es.txt"
def get_most_common_domains(df):
"""Prints the 20 most frequent domains from submissions.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
"""
df = df["domain"].value_counts()[0:20]
print(df)
def get_most_common_submitters(df):
"""Prints the 20 most frequent submitters.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
"""
# Optional: Remove the [deleted] user.
df.drop(df[df["author"] == "[deleted]"].index, inplace=True)
df = df["author"].value_counts()[0:20]
print(df)
def get_most_common_commenters(df):
"""Prints the 20 most frequent commenters.
Parameters
----------
df : pandas.DataFrame
The comments DataFrame.
"""
# Optional: Remove the [deleted] user.
df.drop(df[df["author"] == "[deleted]"].index, inplace=True)
df = df["author"].value_counts()[0:20]
print(df)
def get_insights(df, df2):
"""Prints several interesting insights.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
df2 : pandas.DataFrame
The comments DataFrame.
"""
# Get DataFrame totals.
print("Total submissions:", len(df))
print("Total comments:", len(df2))
# Get unique submitters and commenters.
submitters_set = set(df.groupby("author").count().index.tolist())
commenters_set = set(df2.groupby("author").count().index.tolist())
print("Total Submitters:", len(submitters_set))
print("Total Commenters:", len(commenters_set))
print("Common Submitters and Commenters:", len(
submitters_set.intersection(commenters_set)))
print("Not common submitters:", len(submitters_set.difference(commenters_set)))
print("Not common commenters:", len(
commenters_set.difference(submitters_set)))
print("\Submissions stats:\n")
resampled_submissions = df.resample("D").count()
print("Most submissions on:", resampled_submissions.idxmax()["author"])
print("Least submissions on:", resampled_submissions.idxmin()["author"])
print(resampled_submissions.describe())
print("\nComments stats:\n")
resampled_comments = df2.resample("D").count()
print("Most comments on:", resampled_comments.idxmax()["author"])
print("Least comments on:", resampled_comments.idxmin()["author"])
print(resampled_comments.describe())
def plot_submissions_and_comments_by_weekday(df, df2):
"""Creates a vertical bar plot with the percentage of
submissions and comments by weekday.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
df2 : pandas.DataFrame
The comments DataFrame.
"""
# Days of the week in English.
labels = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
# These will be used for calculating percentages.
total = len(df)
total2 = len(df2)
# 0 to 6 (Monday to Sunday).
submissions_weekdays = {i: 0 for i in range(0, 7)}
comments_weekdays = {i: 0 for i in range(0, 7)}
# We filter the DataFrames and set each weekday value
# equal to its number of records.
for k, v in submissions_weekdays.items():
submissions_weekdays[k] = len(df[df.index.weekday == k])
for k, v in comments_weekdays.items():
comments_weekdays[k] = len(df2[df2.index.weekday == k])
# The first set of vertical bars have a little offset to the left.
# This is so the next set of bars can fit in the same place.
bars = plt.bar([i - 0.2 for i in submissions_weekdays.keys()], [(i / total) * 100 for i in submissions_weekdays.values()], 0.4,
color="#1565c0", linewidth=0)
# This loop creates small texts with the absolute values above each bar.
for bar in bars:
height = bar.get_height()
real_value = int((height * total) / 100)
plt.text(bar.get_x() + bar.get_width()/2.0, height,
"{:,}".format(real_value), ha="center", va="bottom")
# This set of bars have a little offset to the right so they can fit
# with the previous ones.
bars2 = plt.bar([i + 0.2 for i in comments_weekdays.keys()], [(i / total2) * 100 for i in comments_weekdays.values()], 0.4,
color="#f9a825", linewidth=0)
# This loop creates small texts with the absolute values above each bar (second set of bars).
for bar2 in bars2:
height2 = bar2.get_height()
real_value2 = int((height2 * total2) / 100)
plt.text(bar2.get_x() + bar2.get_width()/2.0, height2,
"{:,}".format(real_value2), ha="center", va="bottom")
# We remove the top and right spines.
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
# For the xticks we use the previously defined English weekdays.
plt.xticks(list(submissions_weekdays.keys()), labels)
# We add final customizations.
plt.xlabel("Day of the Week")
plt.ylabel("Percentage")
plt.title("Submissions and Comments by Day")
plt.legend(["Submissions", "Comments"])
plt.tight_layout()
plt.savefig("submissionsandcommentsbyweekday.png", facecolor="#222222")
def plot_submissions_and_comments_by_hour(df, df2):
"""Creates a horizontal bar plot with the percentage of
submissions and comments by hour of the day.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
df2 : pandas.DataFrame
The comments DataFrame.
"""
# The hours of the day labels, from midnight to 11 pm.
labels = ["00:00", "01:00", "02:00", "03:00", "04:00", "05:00",
"06:00", "07:00", "08:00", "09:00", "10:00", "11:00",
"12:00", "13:00", "14:00", "15:00", "16:00", "17:00",
"18:00", "19:00", "20:00", "21:00", "22:00", "23:00"]
# This plot will require a lot of vertical space, we increase it.
plt.figure(figsize=(12, 20))
# These will be used for calculating percentages.
total = len(df)
total2 = len(df2)
# We create dictionaries with keys from 0 to 23 (11 pm) hours.
submissions_hours = {i: 0 for i in range(0, 24)}
comments_hours = {i: 0 for i in range(0, 24)}
# We filter the DataFrames and set each hour value
# equal to its number of records.
for k, v in submissions_hours.items():
submissions_hours[k] = len(df[df.index.hour == k])
for k, v in comments_hours.items():
comments_hours[k] = len(df2[df2.index.hour == k])
# The first set of horizontal bars have a little offset to the top.
# This is so the next set of bars can fit in the same place.
bars = plt.barh(y=[i + 0.2 for i in submissions_hours.keys()],
width=[(i / total) * 100 for i in submissions_hours.values()],
height=0.4, color="#1565c0", linewidth=0)
# This loop creates small texts with the absolute values next to each bar.
for bar in bars:
width = bar.get_width()
real_value = int((width * total) / 100)
plt.text(width + 0.03, bar.get_y() + 0.08,
"{:,}".format(real_value), ha="left", va="bottom")
# This set of bars have a little offset to the bottom so they can fit
# with the previous ones.
bars2 = plt.barh(y=[i - 0.2 for i in comments_hours.keys()],
width=[(i / total2) * 100 for i in comments_hours.values()],
height=0.4, color="#f9a825", linewidth=0)
# This loop creates small texts with the absolute values next to each bar (second set of bars).
for bar2 in bars2:
width2 = bar2.get_width()
real_value2 = int((width2 * total2) / 100)
plt.text(width2 + 0.03, bar2.get_y() + 0.08,
"{:,}".format(real_value2), ha="left", va="bottom")
# We remove the top and right spines.
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
# For the yticks we use the previously defined hours labels.
plt.yticks(list(submissions_hours.keys()), labels)
# We add final customizations.
plt.xlabel("Percentage")
plt.ylabel("Hour of the Day")
plt.title("Submissions and comments by Hour")
plt.legend(["Submissions", "Comments"])
plt.tight_layout()
plt.savefig("submissionsandcommentsbyhour.png", facecolor="#222222")
def plot_yearly_submissions_and_comments(df, df2):
"""Creates 2 line subplots with the counts of
submissions and comments by day.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
df2 : pandas.DataFrame
The comments DataFrame.
"""
# we first resample both DataFrames for daily counts.
df = df.resample("D").count()
df2 = df2.resample("D").count()
# We create a fig with 2 subplots that will shere their x-axis (date).
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
# We set tht title now.
fig.suptitle("Daily Submissions and Comments")
# We plot the first DataFrame and remove the top spine.
ax1.plot(df.index, df.author, color="#1565c0")
ax1.spines["top"].set_visible(False)
ax1.legend(["Submissions"])
# We plot the second DataFrame.
ax2.plot(df2.index, df2.author, color="#f9a825")
ax2.legend(["Comments"])
# We add the final customization.
fig.tight_layout()
plt.savefig("dailysubmissionsandcomments.png", facecolor="#222222")
def plot_submissions_by_user(df):
"""Plots a pie chart with the distribution
of submissions by user groups.
Parameters
----------
df : pandas.DataFrame
The submissions DataFrame.
"""
# We first get the total submissions by each user.
df = df["author"].value_counts()
total = len(df)
# We define our custom buckets, feel free to tweak them as you need.
one = len(df[df.between(1, 1, inclusive=True)])
two_to_five = len(df[df.between(2, 5, inclusive=True)])
six_to_ten = len(df[df.between(6, 10, inclusive=True)])
eleven_to_twenty = len(df[df.between(11, 20, inclusive=True)])
twentyone_to_fifty = len(df[df.between(21, 50, inclusive=True)])
fiftyone_to_onehundred = len(df[df.between(51, 100, inclusive=True)])
more_than_onehundred = len(df[df.between(101, 10000, inclusive=True)])
print("One:", one)
print("Two to Five:", two_to_five)
print("Six to Ten:", six_to_ten)
print("Eleven to Twenty:", eleven_to_twenty)
print("Twenty One to Fifty:", twentyone_to_fifty)
print("Fifty One to One Hundrer:", fiftyone_to_onehundred)
print("More than One Hundred:", more_than_onehundred)
# We define labels, explodes and values, they must have the same length.
labels = ["1", "2-5", "6-10", "11-20", "21-50", "51-100", "100+"]
explode = (0, 0, 0, 0, 0, 0, 0)
values = [one, two_to_five, six_to_ten, eleven_to_twenty,
twentyone_to_fifty, fiftyone_to_onehundred, more_than_onehundred]
# We will make our own legend labels calculating the percentages of each bucket.
final_labels = list()
for index, item in enumerate(values):
final_labels.append("{} - {:.2f}% ({:,})".format(
labels[index], item / total * 100, item))
# We eemove the lines that separate the pie sections.
plt.rcParams["patch.linewidth"] = 0
# We plot our values, remove labels and shadows.
plt.pie(values, explode=explode, labels=None, shadow=False)
# We draw a circle in the Pie chart to make it a donut chart.
centre_circle = plt.Circle(
(0, 0), 0.75, color="#222222", fc="#222222", linewidth=0)
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# We add the final customization.
plt.axis("equal")
plt.legend(final_labels)
plt.savefig("submissionsbyuser.png", facecolor="#222222")
def plot_comments_by_user(df):
"""Plots a pie chart with the distribution
of comments by user groups.
Parameters
----------
df : pandas.DataFrame
The comments DataFrame.
"""
# We first get the total comments by each user.
df = df["author"].value_counts()
total = len(df)
# We define our custom buckets, feel free to tweak them as you need.
one = len(df[df.between(1, 1, inclusive=True)])
two_to_ten = len(df[df.between(2, 10, inclusive=True)])
eleven_to_twenty = len(df[df.between(11, 20, inclusive=True)])
twentyone_to_fifty = len(df[df.between(21, 50, inclusive=True)])
fiftyone_to_onehundred = len(df[df.between(51, 100, inclusive=True)])
onehundredone_to_fivehundred = len(
df[df.between(101, 500, inclusive=True)])
fivehundredone_to_onethousand = len(
df[df.between(501, 1000, inclusive=True)])
morethanonethousand = len(df[df.between(1001, 100000, inclusive=True)])
print("One:", one)
print("Two to Ten:", two_to_ten)
print("Eleven to Twenty:", eleven_to_twenty)
print("Twenty One to Fifty:", twentyone_to_fifty)
print("Fifty One to One Hundred:", fiftyone_to_onehundred)
print("One Hundred One to Five Hundred:", onehundredone_to_fivehundred)
print("Five Hundred One to One Thousand:", fivehundredone_to_onethousand)
print("More than One Thousand:", morethanonethousand)
# We define labels, explodes and values, they must have the same length.
labels = ["1", "2-10", "11-20", "21-50",
"51-100", "101-500", "501-1000", "1000+"]
explode = (0, 0, 0, 0, 0, 0, 0, 0)
values = [one, two_to_ten, eleven_to_twenty, twentyone_to_fifty, fiftyone_to_onehundred,
onehundredone_to_fivehundred, fivehundredone_to_onethousand, morethanonethousand]
# We will make our own legend labels calculating the percentages of each bucket.
final_labels = list()
for index, item in enumerate(values):
final_labels.append(
"{} - {:.2f}% ({:,})".format(labels[index], item / total * 100, item))
# We eemove the lines that separate the pie sections.
plt.rcParams["patch.linewidth"] = 0
# We plot our values, remove labels and shadows.
plt.pie(values, explode=explode, labels=None, shadow=False)
# We draw a circle in the Pie chart to make it a donut chart.
centre_circle = plt.Circle(
(0, 0), 0.75, color="#222222", fc="#222222", linewidth=0)
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# We add the final customization.
plt.axis("equal")
plt.legend(final_labels)
plt.savefig("commentsbyuser.png", facecolor="#222222")
def generate_most_common_words_word_cloud(df):
"""Generates a word cloud with the most used tokens.
Parameters
----------
df : pandas.DataFrame
The tokens DataFrame.
"""
# We load English and Spanish stop words that will be
# get better results in our word cloud.
stopwords = list()
stopwords.extend(
open(EN_STOPWORDS, "r", encoding="utf-8").read().splitlines())
stopwords.extend(
open(ES_STOPWORDS, "r", encoding="utf-8").read().splitlines())
# We remove all the rows that are in our stopwords list.
df = df[~df["lemma_lower"].isin(stopwords)]
# We only take into account the top 1,000 words that are not numbers
# are not stop words and are longer than one character.
words = df[
(df["is_alphabet"] == True) &
(df["is_stopword"] == False) &
(df["lemma_lower"].str.len() > 1)
]["lemma_lower"].value_counts()[:1000]
# Now that we have the words and their counts we will create a list
# with the words repeated equally to their counts.
words_list = list()
for index, value in words.items():
for _ in range(value):
words_list.append(index)
# We create the mask from our cloud image.
mask = np.array(Image.open(MASK_FILE))
# We prepare our word cloud object and save it to disk.
wc = wordcloud.WordCloud(background_color="#222222",
max_words=1000,
mask=mask,
contour_width=2,
colormap="summer",
font_path=FONT_FILE,
contour_color="white",
collocations=False)
wc.generate(" ".join(words_list))
wc.to_file("mostusedwords.png")
def generate_most_common_entities_word_cloud(df):
"""Generates a word cloud with the most used entities.
Parameters
----------
df : pandas.DataFrame
The entities DataFrame.
"""
# We load English and Spanish stop words that will be
# get better results in our word cloud.
stopwords = list()
stopwords.extend(
open(EN_STOPWORDS, "r", encoding="utf-8").read().splitlines())
stopwords.extend(
open(ES_STOPWORDS, "r", encoding="utf-8").read().splitlines())
# We remove all the rows that are in our stopwords list.
df = df[~df["text_lower"].isin(stopwords)]
# We only take into account the top 1,000 entities that are longer than one character
# and are in the the Location, Organization or Person categories.
entities = df[
(df["label"].isin(["LOC", "ORG", "PER"])) &
(df["text"].str.len() > 1)]["text"].value_counts()[:1000]
# Now that we have the entities and their counts we will create a list
# with the entities repeated equally to their counts.
entities_list = list()
for index, value in entities.items():
# This is specific to my dataset, feel free to remove it.
if index == "Mexico":
index = "México"
for _ in range(value):
entities_list.append(index)
# We create the mask from our cloud image.
mask = np.array(Image.open(MASK_FILE))
# We prepare our word cloud object and save it to disk.
wc = wordcloud.WordCloud(background_color="#222222",
max_words=1000,
mask=mask,
contour_width=2,
colormap="spring",
font_path=FONT_FILE,
contour_color="white",
collocations=False)
wc.generate(" ".join(entities_list))
wc.to_file("mostusedentities.png")
if __name__ == "__main__":
submissions_df = pd.read_csv("mexico-submissions.csv",
parse_dates=["datetime"], index_col=0)
comments_df = pd.read_csv("mexico-comments.csv",
parse_dates=["datetime"], index_col=0)
tokens_df = pd.read_csv("tokens.csv")
entities_df = pd.read_csv("entities.csv")
|
exercises/de/test_03_14_01.py | Jette16/spacy-course | 2,085 | 11177415 | def test():
assert (
"for doc in nlp.pipe(TEXTS)" in __solution__
), "Iterierst du über die Docs, die per yield von nlp.pipe ausgegeben werden?"
__msg__.good("Super!")
|
pysnmp/smi/mibs/SNMP-COMMUNITY-MIB.py | RKinsey/pysnmp | 492 | 11177428 | <reponame>RKinsey/pysnmp
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
# ASN.1 source http://mibs.snmplabs.com/asn1/SNMP-COMMUNITY-MIB.txt
# Produced by pysmi-0.4.0 at Sat Feb 16 08:57:23 2019
#
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
(Integer,
OctetString,
ObjectIdentifier) = mibBuilder.importSymbols(
"ASN1",
"Integer",
"OctetString",
"ObjectIdentifier")
(NamedValues,) = mibBuilder.importSymbols(
"ASN1-ENUMERATION",
"NamedValues")
(ConstraintsIntersection,
SingleValueConstraint,
ValueRangeConstraint,
ValueSizeConstraint,
ConstraintsUnion) = mibBuilder.importSymbols(
"ASN1-REFINEMENT",
"ConstraintsIntersection",
"SingleValueConstraint",
"ValueRangeConstraint",
"ValueSizeConstraint",
"ConstraintsUnion")
(SnmpEngineID,
SnmpAdminString) = mibBuilder.importSymbols(
"SNMP-FRAMEWORK-MIB",
"SnmpEngineID",
"SnmpAdminString")
(SnmpTagValue,
snmpTargetAddrEntry) = mibBuilder.importSymbols(
"SNMP-TARGET-MIB",
"SnmpTagValue",
"snmpTargetAddrEntry")
(NotificationGroup,
ObjectGroup,
ModuleCompliance) = mibBuilder.importSymbols(
"SNMPv2-CONF",
"NotificationGroup",
"ObjectGroup",
"ModuleCompliance")
(Unsigned32,
Gauge32,
iso,
ObjectIdentity,
snmpModules,
TimeTicks,
Counter32,
Counter64,
IpAddress,
NotificationType,
MibScalar,
MibTable,
MibTableRow,
MibTableColumn,
MibIdentifier,
Bits,
ModuleIdentity,
Integer32) = mibBuilder.importSymbols(
"SNMPv2-SMI",
"Unsigned32",
"Gauge32",
"iso",
"ObjectIdentity",
"snmpModules",
"TimeTicks",
"Counter32",
"Counter64",
"IpAddress",
"NotificationType",
"MibScalar",
"MibTable",
"MibTableRow",
"MibTableColumn",
"MibIdentifier",
"Bits",
"ModuleIdentity",
"Integer32")
(TextualConvention,
RowStatus,
DisplayString,
StorageType) = mibBuilder.importSymbols(
"SNMPv2-TC",
"TextualConvention",
"RowStatus",
"DisplayString",
"StorageType")
snmpCommunityMIB = ModuleIdentity(
(1, 3, 6, 1, 6, 3, 18)
)
snmpCommunityMIB.setRevisions(
("2000-03-06 00:00",
"1999-05-13 00:00")
)
snmpCommunityMIB.setLastUpdated("200003060000Z")
if mibBuilder.loadTexts:
snmpCommunityMIB.setOrganization("""\
SNMPv3 Working Group
""")
snmpCommunityMIB.setContactInfo("""\
WG-email: <EMAIL> Subscribe: <EMAIL> In
msg body: subscribe snmpv3 Chair: Russ Mundy TIS Labs at Network Associates
Postal: 3060 Washington Rd Glenwood MD 21738 USA Email: <EMAIL>
Phone: +1-301-854-6889 Co-editor: <NAME> CoSine Communications Postal: 1200
Bridge Parkway Redwood City, CA 94065 USA E-mail: <EMAIL> Phone: +1
703 725 1130 Co-editor: <NAME> Nortel Networks Postal: 3505 Kesterwood
Drive Knoxville, TN 37918 E-mail: <EMAIL> Phone: +1 423 686
0432 Co-editor: <NAME>. Routhier Integrated Systems Inc. Postal: 333 North Ave
4th Floor Wakefield, MA 01880 E-mail: <EMAIL> Phone: +1 781 245 0804
Co-editor: <NAME> Lucent Technologies Postal: Schagen 33 3461 GL
Linschoten Netherlands Email: <EMAIL> Phone: +31-348-407-775
""")
if mibBuilder.loadTexts:
snmpCommunityMIB.setDescription("""\
This MIB module defines objects to help support coexistence between SNMPv1,
SNMPv2c, and SNMPv3.
""")
_SnmpCommunityMIBObjects_ObjectIdentity = ObjectIdentity
snmpCommunityMIBObjects = _SnmpCommunityMIBObjects_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 18, 1)
)
_SnmpCommunityTable_Object = MibTable
snmpCommunityTable = _SnmpCommunityTable_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1)
)
if mibBuilder.loadTexts:
snmpCommunityTable.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityTable.setDescription("""\
The table of community strings configured in the SNMP engine's Local
Configuration Datastore (LCD).
""")
_SnmpCommunityEntry_Object = MibTableRow
snmpCommunityEntry = _SnmpCommunityEntry_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1)
)
snmpCommunityEntry.setIndexNames(
(1, "SNMP-COMMUNITY-MIB", "snmpCommunityIndex"),
)
if mibBuilder.loadTexts:
snmpCommunityEntry.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityEntry.setDescription("""\
Information about a particular community string.
""")
class _SnmpCommunityIndex_Type(SnmpAdminString):
subtypeSpec = SnmpAdminString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 32),
)
_SnmpCommunityIndex_Type.__name__ = "SnmpAdminString"
_SnmpCommunityIndex_Object = MibTableColumn
snmpCommunityIndex = _SnmpCommunityIndex_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 1),
_SnmpCommunityIndex_Type()
)
snmpCommunityIndex.setMaxAccess("not-accessible")
if mibBuilder.loadTexts:
snmpCommunityIndex.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityIndex.setDescription("""\
The unique index value of a row in this table.
""")
_SnmpCommunityName_Type = OctetString
_SnmpCommunityName_Object = MibTableColumn
snmpCommunityName = _SnmpCommunityName_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 2),
_SnmpCommunityName_Type()
)
snmpCommunityName.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunityName.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityName.setDescription("""\
The community string for which a row in this table represents a configuration.
""")
class _SnmpCommunitySecurityName_Type(SnmpAdminString):
subtypeSpec = SnmpAdminString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 32),
)
_SnmpCommunitySecurityName_Type.__name__ = "SnmpAdminString"
_SnmpCommunitySecurityName_Object = MibTableColumn
snmpCommunitySecurityName = _SnmpCommunitySecurityName_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 3),
_SnmpCommunitySecurityName_Type()
)
snmpCommunitySecurityName.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunitySecurityName.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunitySecurityName.setDescription("""\
A human readable string representing the corresponding value of
snmpCommunityName in a Security Model independent format.
""")
_SnmpCommunityContextEngineID_Type = SnmpEngineID
_SnmpCommunityContextEngineID_Object = MibTableColumn
snmpCommunityContextEngineID = _SnmpCommunityContextEngineID_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 4),
_SnmpCommunityContextEngineID_Type()
)
snmpCommunityContextEngineID.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunityContextEngineID.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityContextEngineID.setDescription("""\
The contextEngineID indicating the location of the context in which management
information is accessed when using the community string specified by the
corresponding instance of snmpCommunityName. The default value is the
snmpEngineID of the entity in which this object is instantiated.
""")
class _SnmpCommunityContextName_Type(SnmpAdminString):
defaultHexValue = ""
subtypeSpec = SnmpAdminString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(0, 32),
)
_SnmpCommunityContextName_Type.__name__ = "SnmpAdminString"
_SnmpCommunityContextName_Object = MibTableColumn
snmpCommunityContextName = _SnmpCommunityContextName_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 5),
_SnmpCommunityContextName_Type()
)
snmpCommunityContextName.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunityContextName.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityContextName.setDescription("""\
The context in which management information is accessed when using the
community string specified by the corresponding instance of snmpCommunityName.
""")
class _SnmpCommunityTransportTag_Type(SnmpTagValue):
defaultHexValue = ""
_SnmpCommunityTransportTag_Object = MibTableColumn
snmpCommunityTransportTag = _SnmpCommunityTransportTag_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 6),
_SnmpCommunityTransportTag_Type()
)
snmpCommunityTransportTag.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunityTransportTag.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityTransportTag.setDescription("""\
This object specifies a set of transport endpoints from which a command
responder application will accept management requests. If a management request
containing this community is received on a transport endpoint other than the
transport endpoints identified by this object, the request is deemed
unauthentic. The transports identified by this object are specified in the
snmpTargetAddrTable. Entries in that table whose snmpTargetAddrTagList contains
this tag value are identified. If the value of this object has zero-length,
transport endpoints are not checked when authenticating messages containing
this community string.
""")
_SnmpCommunityStorageType_Type = StorageType
_SnmpCommunityStorageType_Object = MibTableColumn
snmpCommunityStorageType = _SnmpCommunityStorageType_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 7),
_SnmpCommunityStorageType_Type()
)
snmpCommunityStorageType.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunityStorageType.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityStorageType.setDescription("""\
The storage type for this conceptual row in the snmpCommunityTable. Conceptual
rows having the value 'permanent' need not allow write-access to any columnar
object in the row.
""")
_SnmpCommunityStatus_Type = RowStatus
_SnmpCommunityStatus_Object = MibTableColumn
snmpCommunityStatus = _SnmpCommunityStatus_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 1, 1, 8),
_SnmpCommunityStatus_Type()
)
snmpCommunityStatus.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpCommunityStatus.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityStatus.setDescription("""\
The status of this conceptual row in the snmpCommunityTable. An entry in this
table is not qualified for activation until instances of all corresponding
columns have been initialized, either through default values, or through Set
operations. The snmpCommunityName and snmpCommunitySecurityName objects must be
explicitly set. There is no restriction on setting columns in this table when
the value of snmpCommunityStatus is active(1).
""")
_SnmpTargetAddrExtTable_Object = MibTable
snmpTargetAddrExtTable = _SnmpTargetAddrExtTable_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 2)
)
if mibBuilder.loadTexts:
snmpTargetAddrExtTable.setStatus("current")
if mibBuilder.loadTexts:
snmpTargetAddrExtTable.setDescription("""\
The table of mask and mms values associated with the snmpTargetAddrTable. The
snmpTargetAddrExtTable augments the snmpTargetAddrTable with a transport
address mask value and a maximum message size value. The transport address mask
allows entries in the snmpTargetAddrTable to define a set of addresses instead
of just a single address. The maximum message size value allows the maximum
message size of another SNMP entity to be configured for use in SNMPv1 (and
SNMPv2c) transactions, where the message format does not specify a maximum
message size.
""")
_SnmpTargetAddrExtEntry_Object = MibTableRow
snmpTargetAddrExtEntry = _SnmpTargetAddrExtEntry_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 2, 1)
)
snmpTargetAddrEntry.registerAugmentions(
("SNMP-COMMUNITY-MIB",
"snmpTargetAddrExtEntry")
)
snmpTargetAddrExtEntry.setIndexNames(*snmpTargetAddrEntry.getIndexNames())
if mibBuilder.loadTexts:
snmpTargetAddrExtEntry.setStatus("current")
if mibBuilder.loadTexts:
snmpTargetAddrExtEntry.setDescription("""\
Information about a particular mask and mms value.
""")
class _SnmpTargetAddrTMask_Type(OctetString):
defaultHexValue = ""
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(0, 255),
)
_SnmpTargetAddrTMask_Type.__name__ = "OctetString"
_SnmpTargetAddrTMask_Object = MibTableColumn
snmpTargetAddrTMask = _SnmpTargetAddrTMask_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 2, 1, 1),
_SnmpTargetAddrTMask_Type()
)
snmpTargetAddrTMask.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpTargetAddrTMask.setStatus("current")
if mibBuilder.loadTexts:
snmpTargetAddrTMask.setDescription("""\
The mask value associated with an entry in the snmpTargetAddrTable. The value
of this object must have the same length as the corresponding instance of
snmpTargetAddrTAddress, or must have length 0. An attempt to set it to any
other value will result in an inconsistentValue error. The value of this object
allows an entry in the snmpTargetAddrTable to specify multiple addresses. The
mask value is used to select which bits of a transport address must match bits
of the corresponding instance of snmpTargetAddrTAddress, in order for the
transport address to match a particular entry in the snmpTargetAddrTable. Bits
which are 1 in the mask value indicate bits in the transport address which must
match bits in the snmpTargetAddrTAddress value. Bits which are 0 in the mask
indicate bits in the transport address which need not match. If the length of
the mask is 0, the mask should be treated as if all its bits were 1 and its
length were equal to the length of the corresponding value of
snmpTargetAddrTable. This object may not be modified while the value of the
corresponding instance of snmpTargetAddrRowStatus is active(1). An attempt to
set this object in this case will result in an inconsistentValue error.
""")
class _SnmpTargetAddrMMS_Type(Integer32):
defaultValue = 484
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueRangeConstraint(0, 0),
ValueRangeConstraint(484, 2147483647),
)
_SnmpTargetAddrMMS_Type.__name__ = "Integer32"
_SnmpTargetAddrMMS_Object = MibTableColumn
snmpTargetAddrMMS = _SnmpTargetAddrMMS_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 2, 1, 2),
_SnmpTargetAddrMMS_Type()
)
snmpTargetAddrMMS.setMaxAccess("read-create")
if mibBuilder.loadTexts:
snmpTargetAddrMMS.setStatus("current")
if mibBuilder.loadTexts:
snmpTargetAddrMMS.setDescription("""\
The maximum message size value associated with an entry in the
snmpTargetAddrTable.
""")
_SnmpTrapAddress_Type = IpAddress
_SnmpTrapAddress_Object = MibScalar
snmpTrapAddress = _SnmpTrapAddress_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 3),
_SnmpTrapAddress_Type()
)
snmpTrapAddress.setMaxAccess("accessible-for-notify")
if mibBuilder.loadTexts:
snmpTrapAddress.setStatus("current")
if mibBuilder.loadTexts:
snmpTrapAddress.setDescription("""\
The value of the agent-addr field of a Trap PDU which is forwarded by a proxy
forwarder application using an SNMP version other than SNMPv1. The value of
this object SHOULD contain the value of the agent-addr field from the original
Trap PDU as generated by an SNMPv1 agent.
""")
_SnmpTrapCommunity_Type = OctetString
_SnmpTrapCommunity_Object = MibScalar
snmpTrapCommunity = _SnmpTrapCommunity_Object(
(1, 3, 6, 1, 6, 3, 18, 1, 4),
_SnmpTrapCommunity_Type()
)
snmpTrapCommunity.setMaxAccess("accessible-for-notify")
if mibBuilder.loadTexts:
snmpTrapCommunity.setStatus("current")
if mibBuilder.loadTexts:
snmpTrapCommunity.setDescription("""\
The value of the community string field of an SNMPv1 message containing a Trap
PDU which is forwarded by a a proxy forwarder application using an SNMP version
other than SNMPv1. The value of this object SHOULD contain the value of the
community string field from the original SNMPv1 message containing a Trap PDU
as generated by an SNMPv1 agent.
""")
_SnmpCommunityMIBConformance_ObjectIdentity = ObjectIdentity
snmpCommunityMIBConformance = _SnmpCommunityMIBConformance_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 18, 2)
)
_SnmpCommunityMIBCompliances_ObjectIdentity = ObjectIdentity
snmpCommunityMIBCompliances = _SnmpCommunityMIBCompliances_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 18, 2, 1)
)
_SnmpCommunityMIBGroups_ObjectIdentity = ObjectIdentity
snmpCommunityMIBGroups = _SnmpCommunityMIBGroups_ObjectIdentity(
(1, 3, 6, 1, 6, 3, 18, 2, 2)
)
snmpCommunityGroup = ObjectGroup(
(1, 3, 6, 1, 6, 3, 18, 2, 2, 1)
)
snmpCommunityGroup.setObjects(
*(("SNMP-COMMUNITY-MIB", "snmpCommunityName"),
("SNMP-COMMUNITY-MIB", "snmpCommunitySecurityName"),
("SNMP-COMMUNITY-MIB", "snmpCommunityContextEngineID"),
("SNMP-COMMUNITY-MIB", "snmpCommunityContextName"),
("SNMP-COMMUNITY-MIB", "snmpCommunityTransportTag"),
("SNMP-COMMUNITY-MIB", "snmpCommunityStorageType"),
("SNMP-COMMUNITY-MIB", "snmpCommunityStatus"),
("SNMP-COMMUNITY-MIB", "snmpTargetAddrTMask"),
("SNMP-COMMUNITY-MIB", "snmpTargetAddrMMS"))
)
if mibBuilder.loadTexts:
snmpCommunityGroup.setStatus("current")
if mibBuilder.loadTexts:
snmpCommunityGroup.setDescription("""\
A collection of objects providing for configuration of community strings for
SNMPv1 (and SNMPv2c) usage.
""")
snmpProxyTrapForwardGroup = ObjectGroup(
(1, 3, 6, 1, 6, 3, 18, 2, 2, 3)
)
snmpProxyTrapForwardGroup.setObjects(
*(("SNMP-COMMUNITY-MIB", "snmpTrapAddress"),
("SNMP-COMMUNITY-MIB", "snmpTrapCommunity"))
)
if mibBuilder.loadTexts:
snmpProxyTrapForwardGroup.setStatus("current")
if mibBuilder.loadTexts:
snmpProxyTrapForwardGroup.setDescription("""\
Objects which are used by proxy forwarding applications when translating traps
between SNMP versions. These are used to preserve SNMPv1-specific information
when translating to SNMPv2c or SNMPv3.
""")
snmpCommunityMIBCompliance = ModuleCompliance(
(1, 3, 6, 1, 6, 3, 18, 2, 1, 1)
)
if mibBuilder.loadTexts:
snmpCommunityMIBCompliance.setStatus(
"current"
)
if mibBuilder.loadTexts:
snmpCommunityMIBCompliance.setDescription("""\
The compliance statement for SNMP engines which implement the SNMP-COMMUNITY-
MIB.
""")
snmpProxyTrapForwardCompliance = ModuleCompliance(
(1, 3, 6, 1, 6, 3, 18, 2, 1, 2)
)
if mibBuilder.loadTexts:
snmpProxyTrapForwardCompliance.setStatus(
"current"
)
if mibBuilder.loadTexts:
snmpProxyTrapForwardCompliance.setDescription("""\
The compliance statement for SNMP engines which contain a proxy forwarding
application which is capable of forwarding SNMPv1 traps using SNMPv2c or
SNMPv3.
""")
mibBuilder.exportSymbols(
"SNMP-COMMUNITY-MIB",
**{"snmpCommunityMIB": snmpCommunityMIB,
"snmpCommunityMIBObjects": snmpCommunityMIBObjects,
"snmpCommunityTable": snmpCommunityTable,
"snmpCommunityEntry": snmpCommunityEntry,
"snmpCommunityIndex": snmpCommunityIndex,
"snmpCommunityName": snmpCommunityName,
"snmpCommunitySecurityName": snmpCommunitySecurityName,
"snmpCommunityContextEngineID": snmpCommunityContextEngineID,
"snmpCommunityContextName": snmpCommunityContextName,
"snmpCommunityTransportTag": snmpCommunityTransportTag,
"snmpCommunityStorageType": snmpCommunityStorageType,
"snmpCommunityStatus": snmpCommunityStatus,
"snmpTargetAddrExtTable": snmpTargetAddrExtTable,
"snmpTargetAddrExtEntry": snmpTargetAddrExtEntry,
"snmpTargetAddrTMask": snmpTargetAddrTMask,
"snmpTargetAddrMMS": snmpTargetAddrMMS,
"snmpTrapAddress": snmpTrapAddress,
"snmpTrapCommunity": snmpTrapCommunity,
"snmpCommunityMIBConformance": snmpCommunityMIBConformance,
"snmpCommunityMIBCompliances": snmpCommunityMIBCompliances,
"snmpCommunityMIBCompliance": snmpCommunityMIBCompliance,
"snmpProxyTrapForwardCompliance": snmpProxyTrapForwardCompliance,
"snmpCommunityMIBGroups": snmpCommunityMIBGroups,
"snmpCommunityGroup": snmpCommunityGroup,
"snmpProxyTrapForwardGroup": snmpProxyTrapForwardGroup}
)
|
gcp_variant_transforms/libs/vcf_reserved_fields.py | tsa87/gcp-variant-transforms | 113 | 11177432 | <reponame>tsa87/gcp-variant-transforms<gh_stars>100-1000
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides reserved INFO and FORMAT fields based on VCF 4.3 spec.
See http://samtools.github.io/hts-specs/VCFv4.3.pdf for more details.
"""
import collections
from typing import Optional # pylint: disable=unused-import
from gcp_variant_transforms.beam_io import vcf_parser
FIELD_COUNT_ALTERNATE_ALLELE = vcf_parser.FIELD_COUNT_ALTERNATE_ALLELE
FIELD_COUNT_ALL_ALLELE = vcf_parser.FIELD_COUNT_ALL_ALLELE
FIELD_COUNT_GENOTYPE = vcf_parser.FIELD_COUNT_GENOTYPE
_ReservedDefinition = collections.namedtuple('ReservedDefinition',
['id', 'num', 'type', 'desc'])
def _get_field_count(value):
# type: (str) -> Optional[int]
return value
INFO_FIELDS = {
'AA': _ReservedDefinition('AA', 1, 'String', 'Ancestral allele'),
'AC': _ReservedDefinition('AC', FIELD_COUNT_ALTERNATE_ALLELE, 'Integer',
'Allele count in genotypes, for each ALT allele, '
'in the same order as listed'),
'AD': _ReservedDefinition('AD', FIELD_COUNT_ALL_ALLELE, 'Integer',
'Total read depth for each allele'),
'ADF': _ReservedDefinition('ADF', FIELD_COUNT_ALL_ALLELE, 'Integer',
'Read depth for each allele on the forward '
'strand'),
'ADR': _ReservedDefinition('ADR', FIELD_COUNT_ALL_ALLELE, 'Integer',
'Read depth for each allele on the reverse '
'strand'),
'AF': _ReservedDefinition('AF', FIELD_COUNT_ALTERNATE_ALLELE, 'Float',
'Allele frequency for each ALT allele in the '
'same order as listed (estimated from primary '
'data, not called genotypes'),
'AN': _ReservedDefinition('AN', 1, 'Integer',
'Total number of alleles in called genotypes'),
'BQ': _ReservedDefinition('BQ', 1, 'Float', 'RMS base quality'),
'CIGAR': _ReservedDefinition('CIGAR', FIELD_COUNT_ALTERNATE_ALLELE,
'String',
'Cigar string describing how to align an '
'alternate allele to the reference allele'),
'DB': _ReservedDefinition('DB', 0, 'Flag', 'dbSNP membership'),
'DP': _ReservedDefinition('DP', 1, 'Integer',
'Combined depth across samples'),
'END': _ReservedDefinition('END', 1, 'Integer',
'End position (for use with symbolic alleles)'),
'H2': _ReservedDefinition('H2', 0, 'Flag', 'HapMap2 membership'),
'H3': _ReservedDefinition('H3', 0, 'Flag', 'HapMap3 membership'),
'MQ': _ReservedDefinition('MQ', 1, 'Integer', 'RMS mapping quality'),
'MQ0': _ReservedDefinition('MQ0', 1, 'Integer',
'Number of MAPQ == 0 reads'),
'NS': _ReservedDefinition('NS', 1, 'Integer',
'Number of samples with data'),
'SB': _ReservedDefinition('SB', 4, 'Integer', 'Strand bias'),
'SOMATIC': _ReservedDefinition('SOMATIC', 0, 'Flag',
'Somatic mutation (for cancer genomics)'),
'VALIDATED': _ReservedDefinition('VALIDATED', 0, 'Flag',
'Validated by follow-up experiment'),
'1000G': _ReservedDefinition('1000G', 0, 'Flag', '1000 Genomes membership')
}
FORMAT_FIELDS = {
'AD': _ReservedDefinition('AD', FIELD_COUNT_ALL_ALLELE, 'Integer',
'Read depth for each allele'),
'ADF': _ReservedDefinition('ADF', FIELD_COUNT_ALL_ALLELE, 'Integer',
'Read depth for each allele on the forward '
'strand'),
'ADR': _ReservedDefinition('ADR', FIELD_COUNT_ALL_ALLELE, 'Integer',
'Read depth for each allele on the reverse '
'strand'),
'DP': _ReservedDefinition('DP', 1, 'Integer', 'Read depth'),
'EC': _ReservedDefinition('EC', FIELD_COUNT_ALTERNATE_ALLELE, 'Integer',
'Expected alternate allele counts'),
'FT': _ReservedDefinition('FT', 1, 'String',
'Filter indicating if this genotype was '
'''called'''),
'GL': _ReservedDefinition('GL', FIELD_COUNT_GENOTYPE, 'Float',
'Genotype likelihoods'),
'GP': _ReservedDefinition('GP', FIELD_COUNT_GENOTYPE, 'Float',
'Genotype posterior probabilities'),
'GQ': _ReservedDefinition('GQ', 1, 'Integer',
'Conditional genotype quality'),
'GT': _ReservedDefinition('GT', 1, 'String', 'Genotype'),
'HQ': _ReservedDefinition('HQ', 2, 'Integer', 'Haplotype quality'),
'MQ': _ReservedDefinition('MQ', 1, 'Integer', 'RMS mapping quality'),
'PL': _ReservedDefinition('PL', FIELD_COUNT_GENOTYPE, 'Integer',
'Phred-scaled genotype likelihoods rounded to '
'the closest integer'),
'PQ': _ReservedDefinition('PQ', 1, 'Integer', 'Phasing quality'),
'PS': _ReservedDefinition('PS', 1, 'Integer', 'Phase set')
}
|
indy_node/test/upgrade/test_pool_upgrade_cancel.py | Rob-S/indy-node | 627 | 11177573 | <gh_stars>100-1000
from copy import deepcopy
from indy_common.constants import CANCEL, \
ACTION, SCHEDULE, JUSTIFICATION
from indy_node.test import waits
from indy_node.test.upgrade.helper import checkNoUpgradeScheduled, sdk_send_upgrade
from stp_core.loop.eventually import eventually
whitelist = ['Failed to upgrade node']
def testTrustyCancelsUpgrade(validUpgradeSent, looper, nodeSet, sdk_pool_handle,
sdk_wallet_trustee, validUpgrade):
validUpgradeCopy = deepcopy(validUpgrade)
validUpgradeCopy[ACTION] = CANCEL
validUpgradeCopy[JUSTIFICATION] = '"never gonna give you one"'
validUpgradeCopy.pop(SCHEDULE, None)
sdk_send_upgrade(looper, sdk_pool_handle, sdk_wallet_trustee, validUpgradeCopy)
looper.run(eventually(checkNoUpgradeScheduled, nodeSet, retryWait=1,
timeout=waits.expectedNoUpgradeScheduled()))
|
tests/import/same-level.test/mod2.py | nanolikeyou/pysonar2 | 2,574 | 11177578 | class B:
a = 'hi'
def foo(x):
return x + 1
|
RunAll/makeRootWithAllLinks.py | san0808/websitesVulnerableToSSTI | 288 | 11177591 | import os.path
fname = "../README.md"
if os.path.isfile(fname):
markup_file = open(fname,"r")
else:
markup_file = open("./README.md","r")
lines = markup_file.readlines()
markup_file.close()
html='''<!DOCTYPE html><html><head><style>table {
font-family: arial, sans-serif;
border-collapse: collapse;
}
td, th {
border: 0.8px solid #dddddd;
text-align: left;
padding: 1px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style></head><body><table>'''
for line in lines:
if "|" in line:
splited = line.split("|")
if len(splited)==10 and ("--" not in line) and ("exploit" not in line):
if splited[7].strip().isdigit():
html+='<tr><th><a href="http://1172.16.58.3:{0}">{1}</a></td><td>{2}</td></tr>\n'.format(splited[7].strip(),splited[1].strip(),splited[2].strip())
html+="</table></body></html>"
from flask import *
app = Flask(__name__)
@app.route('/all',methods=['GET', 'POST'])
def base():
return html
@app.route('/',methods=['GET','POST'])
def other():
return '''<!DOCTYPE html><html><head><style>table {
font-family: arial, sans-serif;
border-collapse: collapse;
}
td, th {
border: 0.8px solid #dddddd;
text-align: left;
padding: 1px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style></head><body><table><tr><th><a href="http://127.0.0.1:5000">jinja2</a></td><td>Python</td></tr>
<tr><th><a href="http://127.0.0.1:5001">Mako</a></td><td>Python</td></tr>
<tr><th><a href="http://127.0.0.1:5002">Tornado</a></td><td>Python</td></tr>
<tr><th><a href="http://127.0.0.1:5020">Smarty</a></td><td>PHP</td></tr>
<tr><th><a href="http://127.0.0.1:5021">Smarty(secure mode)</a></td><td>PHP</td></tr>
<tr><th><a href="http://127.0.0.1:5022">Twig</a></td><td>PHP</td></tr>
<tr><th><a href="http://127.0.0.1:5051">FreeMarker</a></td><td>Java</td></tr>
<tr><th><a href="http://127.0.0.1:5052">Velocity</a></td><td>Java</td></tr>
<tr><th><a href="http://127.0.0.1:5053">Thymeleaf</a></td><td>Java</td></tr>
<tr><th><a href="http://127.0.0.1:5061">jade</a></td><td>Nodejs</td></tr>
<tr><th><a href="http://127.0.0.1:5062">Nunjucks</a></td><td>JavaScript</td></tr>
<tr><th><a href="http://127.0.0.1:5063">doT</a></td><td>JavaScript</td></tr>
<tr><th><a href="http://127.0.0.1:5066">EJS</a></td><td>JavaScript</td></tr>
<tr><th><a href="http://127.0.0.1:5068">vuejs</a></td><td>JavaScript</td></tr>
<tr><th><a href="http://127.0.0.1:5080">Slim</a></td><td>Ruby</td></tr>
<tr><th><a href="http://127.0.0.1:5081">ERB</a></td><td>Ruby</td></tr>
</table></body></html>'''
if __name__=="__main__":
app.run("0.0.0.0",port = 4000,debug=False)
|
notebooks/thunderdome/eth2spec/test/phase0/epoch_processing/test_process_slashings_reset.py | casparschwa/beaconrunner | 2,161 | 11177604 | <reponame>casparschwa/beaconrunner<filename>notebooks/thunderdome/eth2spec/test/phase0/epoch_processing/test_process_slashings_reset.py
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)
def run_process_slashings_reset(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_slashings_reset')
@with_all_phases
@spec_state_test
def test_flush_slashings(spec, state):
next_epoch = spec.get_current_epoch(state) + 1
state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] = 100
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] != 0
yield from run_process_slashings_reset(spec, state)
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] == 0
|
examples/dbm_example.py | Ressmann/starthinker | 138 | 11177608 | ###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dbm.run import dbm
def recipe_dbm(config, auth_read, report, delete):
"""Create a DV360 report.
Args:
auth_read (authentication) - Credentials used for reading data.
report (json) - Report body and filters.
delete (boolean) - If report exists, delete it before creating a new one.
"""
dbm(config, {
'auth':auth_read,
'report':report,
'delete':delete
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Create a DV360 report.
1. Reference field values from the <a href='https://developers.google.com/bid-manager/v1/reports'>DV360 API</a> to build a report.
2. Copy and paste the JSON definition of a report, <a href='https://github.com/google/starthinker/blob/master/tests/scripts/dbm_to_bigquery.json#L9-L40' target='_blank'>sample for reference</a>.
3. The report is only created, a seperate script is required to move the data.
4. To reset a report, delete it from DV360 reporting.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-report", help="Report body and filters.", default='{}')
parser.add_argument("-delete", help="If report exists, delete it before creating a new one.", default=False)
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_dbm(config, args.auth_read, args.report, args.delete)
|
kubernetes-py-guestbook/simple/__main__.py | jandom/examples | 1,628 | 11177616 | # Copyright 2016-2020, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pulumi
from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs
from pulumi_kubernetes.core.v1 import (
ContainerArgs,
ContainerPortArgs,
EnvVarArgs,
PodSpecArgs,
PodTemplateSpecArgs,
ResourceRequirementsArgs,
Service,
ServicePortArgs,
ServiceSpecArgs,
)
from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs
# Minikube does not implement services of type `LoadBalancer`; require the user to specify if we're
# running on minikube, and if so, create only services of type ClusterIP.
config = pulumi.Config()
isMinikube = config.get_bool("isMinikube")
redis_leader_labels = {
"app": "redis-leader",
}
redis_leader_deployment = Deployment(
"redis-leader",
spec=DeploymentSpecArgs(
selector=LabelSelectorArgs(
match_labels=redis_leader_labels,
),
replicas=1,
template=PodTemplateSpecArgs(
metadata=ObjectMetaArgs(
labels=redis_leader_labels,
),
spec=PodSpecArgs(
containers=[ContainerArgs(
name="redis-leader",
image="redis",
resources=ResourceRequirementsArgs(
requests={
"cpu": "100m",
"memory": "100Mi",
},
),
ports=[ContainerPortArgs(
container_port=6379,
)],
)],
),
),
))
redis_leader_service = Service(
"redis-leader",
metadata=ObjectMetaArgs(
name="redis-leader",
labels=redis_leader_labels
),
spec=ServiceSpecArgs(
ports=[ServicePortArgs(
port=6379,
target_port=6379,
)],
selector=redis_leader_labels
))
redis_replica_labels = {
"app": "redis-replica",
}
redis_replica_deployment = Deployment(
"redis-replica",
spec=DeploymentSpecArgs(
selector=LabelSelectorArgs(
match_labels=redis_replica_labels
),
replicas=1,
template=PodTemplateSpecArgs(
metadata=ObjectMetaArgs(
labels=redis_replica_labels,
),
spec=PodSpecArgs(
containers=[ContainerArgs(
name="redis-replica",
image="pulumi/guestbook-redis-replica",
resources=ResourceRequirementsArgs(
requests={
"cpu": "100m",
"memory": "100Mi",
},
),
env=[EnvVarArgs(
name="GET_HOSTS_FROM",
value="dns",
# If your cluster config does not include a dns service, then to instead access an environment
# variable to find the leader's host, comment out the 'value: dns' line above, and
# uncomment the line below:
# value: "env"
)],
ports=[ContainerPortArgs(
container_port=6379,
)],
)],
),
),
))
redis_replica_service = Service(
"redis-replica",
metadata=ObjectMetaArgs(
name="redis-replica",
labels=redis_replica_labels
),
spec=ServiceSpecArgs(
ports=[ServicePortArgs(
port=6379,
target_port=6379,
)],
selector=redis_replica_labels
))
# Frontend
frontend_labels = {
"app": "frontend",
}
frontend_deployment = Deployment(
"frontend",
spec=DeploymentSpecArgs(
selector=LabelSelectorArgs(
match_labels=frontend_labels,
),
replicas=3,
template=PodTemplateSpecArgs(
metadata=ObjectMetaArgs(
labels=frontend_labels,
),
spec=PodSpecArgs(
containers=[ContainerArgs(
name="php-redis",
image="pulumi/guestbook-php-redis",
resources=ResourceRequirementsArgs(
requests={
"cpu": "100m",
"memory": "100Mi",
},
),
env=[EnvVarArgs(
name="GET_HOSTS_FROM",
value="dns",
# If your cluster config does not include a dns service, then to instead access an environment
# variable to find the leader's host, comment out the 'value: dns' line above, and
# uncomment the line below:
# "value": "env"
)],
ports=[ContainerPortArgs(
container_port=80,
)],
)],
),
),
))
frontend_service = Service(
"frontend",
metadata=ObjectMetaArgs(
name="frontend",
labels=frontend_labels,
),
spec=ServiceSpecArgs(
type="ClusterIP" if isMinikube else "LoadBalancer",
ports=[ServicePortArgs(
port=80
)],
selector=frontend_labels,
))
frontend_ip = ""
if isMinikube:
frontend_ip = frontend_service.spec.apply(lambda spec: spec.cluster_ip or "")
else:
ingress = frontend_service.status.apply(lambda status: status.load_balancer.ingress[0])
frontend_ip = ingress.apply(lambda ingress: ingress.ip or ingress.hostname or "")
pulumi.export("frontend_ip", frontend_ip)
|
tiler/tiler-scripts/bng2wgs84.py | Geovation/tiler | 134 | 11177643 | <filename>tiler/tiler-scripts/bng2wgs84.py
import subprocess
import sys
import os
from tiler_helpers import absolute_file_paths
def bng2wgs84(file_path, out_path):
"""Convert a shapefiles coordinate reference system from British National Grid to WGS84"""
gsb = "/tiler-scripts/bin/OSTN02_NTv2.gsb"
if not os.path.isfile(gsb):
raise OSError("OSTN02_NTv2.gsb not found at : " + gsb)
print "\n Commencing conversion from BNG to WGS84 using OSTN02: ", file_path
command = 'ogr2ogr -s_srs "+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.999601 \
+x_0=400000 +y_0=-100000 +ellps=airy +units=m +no_defs +nadgrids={}" \
-t_srs EPSG:4326 {} {}'.format(gsb, out_path, file_path)
print "\n Running: ", command
wgs84_process = subprocess.Popen(command, shell=True)
exit_code = wgs84_process.wait()
_, stderr = wgs84_process.communicate()
if stderr:
raise IOError(stderr)
if exit_code != 0:
raise IOError("Exit code was not 0 for bng2wgs84 process")
def convert_file(directory, file_path):
"""Run the British National Grid to WGS84 conversion to seperate file with a WGS84 extension"""
if file_path.endswith(".shp"):
base = os.path.basename(file_path)
noext = os.path.splitext(base)[0]
out_path = directory + "/" + noext + "_WGS84.shp"
bng2wgs84(file_path, out_path)
def convert_folder(directory):
"""Convert a folder of shapefiles from British National Grid to WGS84"""
for file_path in absolute_file_paths(directory):
convert_file(directory, file_path)
if __name__ == '__main__':
if len(sys.argv) > 1:
INPUT_DIR = sys.argv[1]
else:
raise ValueError("INPUT_PATH not defined")
convert_folder(INPUT_DIR)
|
Lib/objc/_UserManagement.py | snazari/Pyto | 701 | 11177656 | <reponame>snazari/Pyto<filename>Lib/objc/_UserManagement.py
"""
Classes from the 'UserManagement' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
UMUserPersonaContext = _Class("UMUserPersonaContext")
UMUser = _Class("UMUser")
UMMutableUser = _Class("UMMutableUser")
UMUserSwitchContext = _Class("UMUserSwitchContext")
UMPersonaCallbackListener = _Class("UMPersonaCallbackListener")
UMXPCServer = _Class("UMXPCServer")
UMLogMessage = _Class("UMLogMessage")
UMUserPersonaAttributes = _Class("UMUserPersonaAttributes")
UMLog = _Class("UMLog")
UMAbort = _Class("UMAbort")
UMTask = _Class("UMTask")
UMUserSyncTask = _Class("UMUserSyncTask")
UMUserSwitchBlockingTask = _Class("UMUserSwitchBlockingTask")
UMError = _Class("UMError")
UMQueue = _Class("UMQueue")
UMUserPersona = _Class("UMUserPersona")
UMUserMutablePersona = _Class("UMUserMutablePersona")
UMMobileKeyBag = _Class("UMMobileKeyBag")
UMUserManager = _Class("UMUserManager")
|
models/dvsa/dvsa.py | MichiganCOG/ViP | 210 | 11177694 | #Code heavily adapted from: https://github.com/MichiganCOG/Video-Grounding-from-Text/blob/master/model/dvsa.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from functools import partial
import os
from models.dvsa.dvsa_utils.transformer import Transformer
class DVSA(nn.Module):
"""
Deep Visual-Semantic Alignments (DVSA).
Implementation used as baseline in Weakly-Supervised Video Object Grounding...
Source: https://arxiv.org/pdf/1805.02834.pdf
Original paper: Deep visual-semantic alignments for generating image descriptions
https://cs.stanford.edu/people/karpathy/cvpr2015.pdf
"""
def __init__(self, **kwargs):
super().__init__()
num_class = kwargs['labels']
input_size = kwargs['input_size']
enc_size = kwargs['enc_size']
dropout = kwargs['dropout']
hidden_size = kwargs['hidden_size']
n_layers = kwargs['n_layers']
n_heads = kwargs['n_heads']
attn_drop = kwargs['attn_drop']
num_frm = kwargs['yc2bb_num_frm']
has_loss_weighting = kwargs['has_loss_weighting']
# encode the region feature
self.feat_enc = nn.Sequential(
nn.Linear(input_size, enc_size),
nn.Dropout(p=dropout),
nn.ReLU()
)
self.sigmoid = nn.Sigmoid()
# lookup table for object label embedding
self.obj_emb = nn.Embedding(num_class+1, enc_size) # +1 for the dummy paddings
self.num_class = num_class
self.obj_interact = Transformer(enc_size, 0, 0,
d_hidden=hidden_size,
n_layers=n_layers,
n_heads=n_heads,
drop_ratio=attn_drop)
self.obj_interact_fc = nn.Sequential(
nn.Linear(enc_size*2, int(enc_size/2)),
nn.ReLU(),
nn.Linear(int(enc_size/2), 5), # object interaction guidance (always 5 snippets)
nn.Sigmoid()
)
self.num_frm = num_frm
self.has_loss_weighting = has_loss_weighting
if isinstance(kwargs['pretrained'], int) and kwargs['pretrained']:
self._load_pretrained_weights()
def forward(self, x_o, obj, load_type):
is_evaluate = 1 if load_type[0] == 'test' or load_type[0] == 'val' else 0
if is_evaluate:
return self.output_attn(x_o, obj)
#only a single batch expected
x_o = x_o[0]
obj = obj[0]
x_o = self.feat_enc(x_o.permute(0,2,3,1).contiguous()).permute(0,3,1,2).contiguous()
x_o = torch.stack([x_o[0], x_o[1], x_o[0]])
obj = torch.stack([obj[0], obj[0], obj[1]])
N, C_out, T, num_proposals = x_o.size()
assert(N == 3) # two pos samples and one neg sample
# attention
O = obj.size(1)
attn_key = self.obj_emb(obj)
num_pos_obj = torch.sum(obj[0]<self.num_class).long().item()
num_neg_obj = torch.sum(obj[2]<self.num_class).long().item()
# object interaction guidance
attn_key_frm_feat = attn_key[0:1, :num_pos_obj] # cat visual feature
obj_attn_emb,_ = self.obj_interact(attn_key_frm_feat)
obj_attn_emb = obj_attn_emb[:, :num_pos_obj, :]
obj_attn_emb = torch.cat((obj_attn_emb, attn_key[0:1, :num_pos_obj], ), dim=2)
obj_attn_emb = self.obj_interact_fc(obj_attn_emb) # N, O, 5
itv = math.ceil(T/5)
tmp = [] # expand obj_attn_emb to N, O, T
for i in range(5):
l = min(itv*(i+1), T)-itv*i
if l>0:
tmp.append(obj_attn_emb[:, :, i:(i+1)].expand(1, num_pos_obj, l))
obj_attn_emb = torch.cat(tmp, 2).squeeze(0)
assert(obj_attn_emb.size(1) == self.num_frm)
loss_weigh = torch.mean(obj_attn_emb, dim=0)
loss_weigh = torch.cat((loss_weigh, loss_weigh)).unsqueeze(1)
if self.has_loss_weighting:
# dot-product attention
x_o = x_o.view(N, 1, C_out, T, num_proposals)
attn_weights = self.sigmoid((x_o*attn_key.view(N, O, C_out, 1, 1)).sum(2)/math.sqrt(C_out))
pos_weights = attn_weights[0, :num_pos_obj, :, :]
neg1_weights = attn_weights[1, :num_pos_obj, :, :]
neg2_weights = attn_weights[2, :num_neg_obj, :, :]
return torch.cat((torch.stack((torch.mean(torch.max(pos_weights, dim=2)[0], dim=0), torch.mean(torch.max(neg1_weights, dim=2)[0], dim=0)), dim=1),
torch.stack((torch.mean(torch.max(pos_weights, dim=2)[0], dim=0), torch.mean(torch.max(neg2_weights, dim=2)[0], dim=0)), dim=1))), loss_weigh
else:
# dot-product attention
x_o = x_o.view(N, 1, C_out, T*num_proposals)
attn_weights = self.sigmoid((x_o*attn_key.view(N, O, C_out, 1)).sum(2)/math.sqrt(C_out))
pos_weights = attn_weights[0, :num_pos_obj, :]
neg1_weights = attn_weights[1, :num_pos_obj, :]
neg2_weights = attn_weights[2, :num_neg_obj, :]
return torch.stack((torch.stack((torch.mean(torch.max(pos_weights, dim=1)[0]), torch.mean(torch.max(neg1_weights, dim=1)[0]))),
torch.stack((torch.mean(torch.max(pos_weights, dim=1)[0]), torch.mean(torch.max(neg2_weights, dim=1)[0]))))), loss_weigh
def output_attn(self, x_o, obj):
x_o = self.feat_enc(x_o.permute(0,2,3,1).contiguous()).permute(0,3,1,2).contiguous()
N, C_out, T, num_proposals = x_o.size()
assert(N == 1)
# attention
O = obj.size(1)
attn_key = self.obj_emb(obj)
# dot-product attention
x_o = x_o.view(N, 1, C_out, T*num_proposals)
attn_weights = self.sigmoid((x_o*attn_key.view(N, O, C_out, 1)).sum(2)/math.sqrt(C_out))
# attn_weights = self.sigmoid((x_e*attn_key.view(N, O, C_out, 1).expand(N, O, C_out, T*num_proposals)).sum(2)) # N, O, T, H*W
# additive attention
# x_e = x_o.view(N, 1, C_out, T, H*W).contiguous().expand(N, O, C_out, T, H*W)
# attn_e = attn_key.view(N, O, C_out, 1, 1).expand(N, O, C_out, T, H*W)
# attn_weights = self.attn_mlp(torch.cat((x_e, attn_e), dim=2).permute(0,1,3,4,2).contiguous()).squeeze(4) # N, O, T, H*W
return attn_weights.view(N, O, T, num_proposals)
def _load_pretrained_weights(self):
state_dict = torch.load('weights/yc2bb_full-model.pth', map_location=lambda storage, location: storage)
self.load_state_dict(state_dict)
|
cloudify_types/cloudify_types/component/operations.py | cloudify-cosmo/cloudify-manager | 124 | 11177703 | <reponame>cloudify-cosmo/cloudify-manager<gh_stars>100-1000
# Copyright (c) 2017-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from cloudify import manager, ctx
from cloudify.decorators import operation
from cloudify.constants import COMPONENT
from cloudify._compat import urlparse
from cloudify.exceptions import NonRecoverableError
from cloudify.deployment_dependencies import (dependency_creator_generator,
create_deployment_dependency)
from cloudify_rest_client.client import CloudifyClient
from cloudify_rest_client.exceptions import CloudifyClientError
from cloudify_types.utils import errors_nonrecoverable
from .polling import (
poll_with_timeout,
is_all_executions_finished,
verify_execution_state,
wait_for_blueprint_to_upload
)
from .constants import (
DEPLOYMENTS_CREATE_RETRIES,
EXECUTIONS_TIMEOUT,
POLLING_INTERVAL,
EXTERNAL_RESOURCE
)
from .utils import (
blueprint_id_exists,
deployment_id_exists,
get_local_path,
zip_files,
should_upload_plugin,
populate_runtime_with_wf_results,
no_rerun_on_resume,
)
def _is_valid_url(candidate):
parse_url = urlparse(candidate)
return not (parse_url.netloc and parse_url.scheme)
def _get_desired_operation_input(key, args):
""" Resolving a key's value from kwargs or
runtime properties, node properties in the order of priority.
"""
return (args.get(key) or
ctx.instance.runtime_properties.get(key) or
ctx.node.properties.get(key))
def _get_client(kwargs):
client_config = _get_desired_operation_input('client', kwargs)
if client_config:
return CloudifyClient(**client_config)
else:
return manager.get_rest_client()
@operation(resumable=True)
@errors_nonrecoverable
def upload_blueprint(**kwargs):
resource_config = _get_desired_operation_input('resource_config', kwargs)
client = _get_client(kwargs)
blueprint = resource_config.get('blueprint', {})
blueprint_id = blueprint.get('id') or ctx.instance.id
blueprint_archive = blueprint.get('blueprint_archive')
blueprint_file_name = blueprint.get('main_file_name')
if 'blueprint' not in ctx.instance.runtime_properties:
ctx.instance.runtime_properties['blueprint'] = dict()
ctx.instance.runtime_properties['blueprint']['id'] = blueprint_id
ctx.instance.runtime_properties['blueprint']['blueprint_archive'] = \
blueprint_archive
ctx.instance.runtime_properties['blueprint']['application_file_name'] = \
blueprint_file_name
blueprint_exists = blueprint_id_exists(client, blueprint_id)
if blueprint.get(EXTERNAL_RESOURCE) and not blueprint_exists:
raise NonRecoverableError(
'Blueprint ID \"{0}\" does not exist, '
'but {1} is {2}.'.format(
blueprint_id,
EXTERNAL_RESOURCE,
blueprint.get(EXTERNAL_RESOURCE)))
elif blueprint.get(EXTERNAL_RESOURCE) and blueprint_exists:
ctx.logger.info("Using external blueprint.")
return True
elif blueprint_exists:
ctx.logger.info(
'Blueprint "%s" exists, but %s is %s, will use the existing one.',
blueprint_id, EXTERNAL_RESOURCE, blueprint.get(EXTERNAL_RESOURCE))
return True
if not blueprint_archive:
raise NonRecoverableError(
f'No blueprint_archive supplied, but {EXTERNAL_RESOURCE} is False')
# Check if the ``blueprint_archive`` is not a URL then we need to
# download it and pass the binaries to the client_args
if _is_valid_url(blueprint_archive):
blueprint_archive = ctx.download_resource(blueprint_archive)
try:
client.blueprints._upload(
blueprint_id=blueprint_id,
archive_location=blueprint_archive,
application_file_name=blueprint_file_name)
wait_for_blueprint_to_upload(blueprint_id, client)
except CloudifyClientError as ex:
if 'already exists' not in str(ex):
raise NonRecoverableError(
f'Client action "_upload" failed: {ex}.')
return True
def _abort_if_secrets_clash(client, secrets):
"""Check that new secret names aren't already in use"""
existing_secrets = {
secret.key: secret.value for secret in client.secrets.list()
}
duplicate_secrets = set(secrets).intersection(existing_secrets)
if duplicate_secrets:
raise NonRecoverableError(
f'The secrets: "{ ", ".join(duplicate_secrets) }" already exist, '
f'not updating...')
def _set_secrets(client, secrets):
if not secrets:
return
_abort_if_secrets_clash(client, secrets)
for secret_name in secrets:
client.secrets.create(
key=secret_name,
value=u'{0}'.format(secrets[secret_name]),
)
ctx.logger.info('Created secret %r', secret_name)
def _upload_plugins(client, plugins):
if (not plugins or 'plugins' in ctx.instance.runtime_properties):
# No plugins to install or already uploaded them.
return
ctx.instance.runtime_properties['plugins'] = []
existing_plugins = client.plugins.list()
for plugin_name, plugin in plugins.items():
zip_list = []
zip_path = None
try:
if (not plugin.get('wagon_path') or
not plugin.get('plugin_yaml_path')):
raise NonRecoverableError(
f'Provide wagon_path (got { plugin.get("wagon_path") }) '
f'and plugin_yaml_path (got '
f'{ plugin.get("plugin_yaml_path") })'
)
wagon_path = get_local_path(plugin['wagon_path'],
create_temp=True)
yaml_path = get_local_path(plugin['plugin_yaml_path'],
create_temp=True)
zip_list = [wagon_path, yaml_path]
if 'icon_png_path' in plugin:
icon_path = get_local_path(plugin['icon_png_path'],
create_temp=True)
zip_list.append(icon_path)
if not should_upload_plugin(yaml_path, existing_plugins):
ctx.logger.warning('Plugin "%s" was already uploaded...',
plugin_name)
continue
ctx.logger.info('Creating plugin "%s" zip archive...', plugin_name)
zip_path = zip_files(zip_list)
# upload plugin
plugin = client.plugins.upload(plugin_path=zip_path)
ctx.instance.runtime_properties['plugins'].append(
plugin.id)
ctx.logger.info('Uploaded %r', plugin.id)
finally:
for f in zip_list:
os.remove(f)
if zip_path:
os.remove(zip_path)
def _create_deployment_id(base_deployment_id, auto_inc_suffix):
if not auto_inc_suffix:
yield base_deployment_id
else:
for ix in range(DEPLOYMENTS_CREATE_RETRIES):
yield f'{base_deployment_id}-{ix}'
@no_rerun_on_resume('_component_create_deployment_id')
def _do_create_deployment(client, deployment_ids, deployment_kwargs):
create_error = NonRecoverableError('Unknown error creating deployment')
for deployment_id in deployment_ids:
ctx.instance.runtime_properties['deployment']['id'] = deployment_id
try:
client.deployments.create(
deployment_id=deployment_id,
async_create=True,
**deployment_kwargs)
return deployment_id
except CloudifyClientError as ex:
create_error = ex
raise create_error
def _wait_for_deployment_create(client, deployment_id,
deployment_log_redirect, timeout, interval,
workflow_end_state):
"""Wait for deployment's create_dep_env to finish"""
create_execution = client.deployments.get(
deployment_id,
_include=['id', 'create_execution'],
)['create_execution']
if not create_execution:
raise NonRecoverableError(
f'No create execution found for deployment "{deployment_id}"')
return verify_execution_state(client,
create_execution,
deployment_id,
deployment_log_redirect,
workflow_end_state,
timeout,
interval)
@no_rerun_on_resume('_component_create_idd')
def _create_inter_deployment_dependency(client, deployment_id):
client.inter_deployment_dependencies.create(**create_deployment_dependency(
dependency_creator_generator(COMPONENT, ctx.instance.id),
source_deployment=ctx.deployment.id,
target_deployment=deployment_id
))
@operation(resumable=True)
@errors_nonrecoverable
def create(timeout=EXECUTIONS_TIMEOUT, interval=POLLING_INTERVAL, **kwargs):
client = _get_client(kwargs)
secrets = _get_desired_operation_input('secrets', kwargs)
_set_secrets(client, secrets)
plugins = _get_desired_operation_input('plugins', kwargs)
_upload_plugins(client, plugins)
if 'deployment' not in ctx.instance.runtime_properties:
ctx.instance.runtime_properties['deployment'] = dict()
config = _get_desired_operation_input('resource_config', kwargs)
runtime_deployment_prop = ctx.instance.runtime_properties.get(
'deployment', {})
runtime_deployment_id = runtime_deployment_prop.get('id')
deployment = config.get('deployment', {})
deployment_id = (runtime_deployment_id or
deployment.get('id') or
ctx.instance.id)
deployment_inputs = deployment.get('inputs', {})
# TODO capabilities are unused?
# deployment_capabilities = deployment.get('capabilities')
deployment_auto_suffix = deployment.get('auto_inc_suffix', False)
blueprint = config.get('blueprint', {})
blueprint_id = blueprint.get('id') or ctx.instance.id
deployment_id = _do_create_deployment(
client,
_create_deployment_id(deployment_id, deployment_auto_suffix),
{'blueprint_id': blueprint_id, 'inputs': deployment_inputs},
)
ctx.logger.info('Creating "%s" component deployment', deployment_id)
_create_inter_deployment_dependency(client, deployment_id)
return _wait_for_deployment_create(
client,
deployment_id,
deployment_log_redirect=deployment.get('logs', True),
timeout=timeout,
interval=interval,
workflow_end_state=kwargs.get('workflow_state', 'terminated'),
)
def _try_to_remove_plugin(client, plugin_id):
try:
client.plugins.delete(plugin_id=plugin_id)
except CloudifyClientError as ex:
if 'currently in use in blueprints' in str(ex):
ctx.logger.warning('Could not remove plugin "%s", it '
'is currently in use...', plugin_id)
else:
raise NonRecoverableError(
f'Failed to remove plugin {plugin_id}: {ex}')
def _delete_plugins(client):
plugins = ctx.instance.runtime_properties.get('plugins', [])
for plugin_id in plugins:
_try_to_remove_plugin(client, plugin_id)
ctx.logger.info('Removed plugin "%s".', plugin_id)
def _delete_secrets(client, secrets):
if not secrets:
return
for secret_name in secrets:
client.secrets.delete(key=secret_name)
ctx.logger.info('Removed secret "%r"', secret_name)
def _delete_runtime_properties():
for property_name in [
'deployment', 'blueprint', 'plugins', '_component_create_idd',
'_component_create_deployment_id',
]:
if property_name in ctx.instance.runtime_properties:
del ctx.instance.runtime_properties[property_name]
@operation(resumable=True)
@errors_nonrecoverable
def delete(timeout=EXECUTIONS_TIMEOUT, **kwargs):
client = _get_client(kwargs)
ctx.logger.info("Wait for component's stop deployment operation "
"related executions.")
config = _get_desired_operation_input('resource_config', kwargs)
runtime_deployment_prop = ctx.instance.runtime_properties.get(
'deployment', {})
runtime_deployment_id = runtime_deployment_prop.get('id')
deployment = config.get('deployment', {})
deployment_id = (runtime_deployment_id or
deployment.get('id') or
ctx.instance.id)
blueprint = config.get('blueprint', {})
blueprint_id = blueprint.get('id') or ctx.instance.id
_inter_deployment_dependency = create_deployment_dependency(
dependency_creator_generator(COMPONENT, ctx.instance.id),
ctx.deployment.id)
poll_with_timeout(
lambda: is_all_executions_finished(client, deployment_id),
timeout=timeout,
expected_result=True)
ctx.logger.info('Delete component\'s "%s" deployment', deployment_id)
poll_result = True
if not deployment_id_exists(client, deployment_id):
# Could happen in case that deployment failed to install
ctx.logger.warning('Didn\'t find component\'s "%s" deployment,'
'so nothing to do and moving on.', deployment_id)
else:
client.deployments.delete(deployment_id=deployment_id)
ctx.logger.info("Waiting for component's deployment delete.")
poll_result = poll_with_timeout(
lambda: deployment_id_exists(client, deployment_id),
timeout=timeout,
expected_result=False)
ctx.logger.debug("Internal services cleanup.")
time.sleep(POLLING_INTERVAL)
ctx.logger.debug("Waiting for all system workflows to stop/finish.")
poll_with_timeout(
lambda: is_all_executions_finished(client),
timeout=timeout,
expected_result=True)
if not blueprint.get(EXTERNAL_RESOURCE):
ctx.logger.info('Delete component\'s blueprint "%s".', blueprint_id)
client.blueprints.delete(blueprint_id=blueprint_id)
ctx.logger.info('Removing inter-deployment dependency between this '
'deployment ("%s") and "%s" the Component\'s '
'creator deployment...',
deployment_id, ctx.deployment.id)
_inter_deployment_dependency['target_deployment'] = \
deployment_id
_inter_deployment_dependency['is_component_deletion'] = True
client.inter_deployment_dependencies.delete(**_inter_deployment_dependency)
_delete_plugins(client)
_delete_secrets(client, _get_desired_operation_input('secrets', kwargs))
_delete_runtime_properties()
return poll_result
@operation(resumable=True)
@errors_nonrecoverable
def execute_start(timeout=EXECUTIONS_TIMEOUT, interval=POLLING_INTERVAL,
**kwargs):
client = _get_client(kwargs)
config = _get_desired_operation_input('resource_config', kwargs)
runtime_deployment_prop = ctx.instance.runtime_properties.get(
'deployment', {})
runtime_deployment_id = runtime_deployment_prop.get('id')
deployment = config.get('deployment', {})
deployment_id = (runtime_deployment_id or
deployment.get('id') or
ctx.instance.id)
deployment_log_redirect = deployment.get('logs', True)
workflow_id = kwargs.get('workflow_id', 'create_deployment_environment')
# Wait for the deployment to finish any executions
if not poll_with_timeout(
lambda: is_all_executions_finished(client, deployment_id),
timeout=timeout,
expected_result=True):
return ctx.operation.retry(
'The "{0}" deployment is not ready for execution.'.format(
deployment_id))
execution_args = config.get('executions_start_args', {})
request_args = dict(
deployment_id=deployment_id,
workflow_id=workflow_id,
**execution_args
)
if workflow_id == ctx.workflow_id:
request_args.update(dict(parameters=ctx.workflow_parameters))
ctx.logger.info('Starting execution for "%s" deployment', deployment_id)
execution = client.executions.start(**request_args)
ctx.logger.debug('Execution start response: "%s".', execution)
execution_id = execution['id']
if not verify_execution_state(
client,
execution_id,
deployment_id,
deployment_log_redirect,
kwargs.get('workflow_state', 'terminated'),
timeout,
interval):
ctx.logger.error('Execution %s failed for "%s" deployment',
execution_id, deployment_id)
ctx.logger.info('Execution succeeded for "%s" deployment', deployment_id)
populate_runtime_with_wf_results(client, deployment_id)
return True
|
src/genie/libs/parser/iosxe/tests/ShowFlowMonitor/cli/equal/golden_output2_expected.py | balmasea/genieparser | 204 | 11177720 | expected_output = {
"cache_type": "Normal (Platform cache)",
"cache_size": 16,
"current_entries": 1,
"flows_added": 1,
"flows_aged": 0,
}
|
mmhuman3d/core/visualization/renderer/torch3d_renderer/pointcloud_renderer.py | ykk648/mmhuman3d | 472 | 11177766 | <reponame>ykk648/mmhuman3d
import warnings
from typing import Iterable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from pytorch3d.renderer import (
AlphaCompositor,
PointsRasterizationSettings,
PointsRasterizer,
)
from pytorch3d.structures import Meshes, Pointclouds
from mmhuman3d.core.cameras import MMCamerasBase
from mmhuman3d.utils.mesh_utils import mesh_to_pointcloud_vc
from .base_renderer import BaseRenderer
from .builder import RENDERER
@RENDERER.register_module(name=[
'PointCloud', 'pointcloud', 'point_cloud', 'pointcloud_renderer',
'PointCloudRenderer'
])
class PointCloudRenderer(BaseRenderer):
def __init__(self,
resolution: Tuple[int, int] = None,
device: Union[torch.device, str] = 'cpu',
output_path: Optional[str] = None,
out_img_format: str = '%06d.png',
radius: Optional[float] = None,
**kwargs) -> None:
"""Point cloud renderer.
Args:
resolution (Iterable[int]):
(width, height) of the rendered images resolution.
device (Union[torch.device, str], optional):
You can pass a str or torch.device for cpu or gpu render.
Defaults to 'cpu'.
output_path (Optional[str], optional):
Output path of the video or images to be saved.
Defaults to None.
out_img_format (str, optional): name format for temp images.
Defaults to '%06d.png'.
radius (float, optional): radius of points. Defaults to None.
Returns:
None
"""
self.radius = radius
super().__init__(
resolution=resolution,
device=device,
output_path=output_path,
out_img_format=out_img_format,
**kwargs)
def to(self, device):
if isinstance(device, str):
device = torch.device(device)
self.device = device
if getattr(self.rasterizer, 'cameras', None) is not None:
self.rasterizer.cameras = self.rasterizer.cameras.to(device)
self.compositor = self.compositor.to(device)
return self
def _init_renderer(self, rasterizer=None, compositor=None, **kwargs):
"""Set render params."""
if isinstance(rasterizer, nn.Module):
rasterizer.raster_settings.image_size = self.resolution
self.rasterizer = rasterizer
elif isinstance(rasterizer, dict):
rasterizer['image_size'] = self.resolution
if self.radius is not None:
rasterizer.update(radius=self.radius)
raster_settings = PointsRasterizationSettings(**rasterizer)
self.rasterizer = PointsRasterizer(raster_settings=raster_settings)
elif rasterizer is None:
self.rasterizer = PointsRasterizer(
raster_settings=PointsRasterizationSettings(
radius=self.radius,
image_size=self.resolution,
points_per_pixel=10))
else:
raise TypeError(
f'Wrong type of rasterizer: {type(self.rasterizer)}.')
if isinstance(compositor, dict):
self.compositor = AlphaCompositor(**compositor)
elif isinstance(compositor, nn.Module):
self.compositor = compositor
elif compositor is None:
self.compositor = AlphaCompositor()
else:
raise TypeError(
f'Wrong type of compositor: {type(self.compositor)}.')
self = self.to(self.device)
def forward(
self,
pointclouds: Optional[Pointclouds] = None,
vertices: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
verts_rgba: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
meshes: Meshes = None,
cameras: Optional[MMCamerasBase] = None,
indexes: Optional[Iterable[int]] = None,
backgrounds: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[None, torch.Tensor]:
"""Render pointclouds.
Args:
pointclouds (Optional[Pointclouds], optional): pytorch3d data
structure. If not None, `vertices` and `verts_rgba` will
be ignored.
Defaults to None.
vertices (Optional[Union[torch.Tensor, List[torch.Tensor]]],
optional): coordinate tensor of points. Defaults to None.
verts_rgba (Optional[Union[torch.Tensor, List[torch.Tensor]]],
optional): color tensor of points. Defaults to None.
indexes (Optional[Iterable[int]], optional): indexes for the
images.
Defaults to None.
backgrounds (Optional[torch.Tensor], optional): background images.
Defaults to None.
Returns:
Union[None, torch.Tensor]: Return tensor or None.
"""
if pointclouds is None:
if meshes is not None:
pointclouds = mesh_to_pointcloud_vc(meshes)
else:
assert vertices is not None
if isinstance(vertices, torch.Tensor):
if vertices.ndim == 2:
vertices = vertices[None]
if isinstance(verts_rgba, torch.Tensor):
if verts_rgba.ndim == 2:
verts_rgba = verts_rgba[None]
pointclouds = Pointclouds(points=vertices, features=verts_rgba)
else:
if vertices is not None or verts_rgba is not None:
warnings.warn(
'Redundant input, will ignore `vertices` and `verts_rgb`.')
pointclouds = pointclouds.to(self.device)
self._update_resolution(cameras, **kwargs)
fragments = self.rasterizer(pointclouds, cameras=cameras)
r = self.rasterizer.raster_settings.radius
dists2 = fragments.dists.permute(0, 3, 1, 2)
weights = 1 - dists2 / (r * r)
rendered_images = self.compositor(
fragments.idx.long().permute(0, 3, 1, 2),
weights,
pointclouds.features_packed().permute(1, 0),
**kwargs,
)
rendered_images = rendered_images.permute(0, 2, 3, 1)
if self.output_path is not None:
rgba = self.tensor2rgba(rendered_images)
if self.output_path is not None:
self.write_images(rgba, backgrounds, indexes)
return rendered_images
|
e0/h.py | daedalus/knob | 151 | 11177780 | <filename>e0/h.py
#!/usr/bin/python2
# -*- coding: utf-8 -*-
"""
h.py
Ar and Ar_prime uses SAFER+
We use https://github.com/aer0s/python-mcrypt that is ONLY compatible with
python2 and it uses a str based API.
SAFER+ is an enhanced
version of an existing 64-bit block cipher SAFER-SK128
bitstring.BitArray API:
s = BitArray()
s.append('0x000001b3') # the sequence_header_code
s.append('uint:12=352') # 12 bit unsigned integer
s.append('uint:12=288')
# s[0] contains the MOST SIGNIFICANT BIT
"""
from bitstring import BitArray
import math
from constants import *
def H(K, I_one, I_two, L):
"""Hash function used in e1 and e3.
e1 computes SRES and ACO
e3 computes Kc
Returns Keys, Ar, KeysPrime, ArPrime, Out
"""
assert len(K) == Ar_KEY_LEN and type(K) == bytearray
assert len(I_one) == Ar_KEY_LEN and type(I_one) == bytearray
assert (len(I_two) == COF_LEN or len(I_two) == BTADD_LEN) and type(I_two) == bytearray
log.debug('H(K, I_one, I_two, {})'.format(L))
Keys = key_sched(K)
K_tilda = K_to_K_tilda(K)
KeysPrime = key_sched(K_tilda)
I_two_ext = E(I_two, L)
log.debug('H I_two : {}'.format(repr(I_two)))
log.debug('H I_two_ext: {}'.format(repr(I_two_ext)))
Ar = Ar_rounds(Keys, I_one, is_prime=False)
pre_ar_prime_inp = xor_bytes(Ar[10], I_one)
log.debug('H pre_ar_prime_inp: {}'.format(repr(pre_ar_prime_inp)))
ar_prime_inp = add_bytes_mod256(I_two_ext, pre_ar_prime_inp)
log.debug('H ar_prime_inp: {}'.format(repr(ar_prime_inp)))
ArPrime = Ar_rounds(KeysPrime, ar_prime_inp, is_prime=True)
# NOTE: either Kc or SRES || ACO
Out = ArPrime[10]
return Keys, Ar, KeysPrime, ArPrime, Out
def Ar_rounds(Keys, inp, is_prime):
"""
Ar[0] = None, not used
Ar[1] = inp = round1
Ar[2..9] = round1..8
Ar[10] = extra add_one after last round
"""
assert len(Keys) == 18 and type(Keys[1]) == bytearray
assert len(inp) == Ar_KEY_LEN and type(inp) == bytearray
# NOTE: Ar[0..9]
Ar = [i for i in range(11)]
Ar[0] = None
# NOTE: deep copy here
Ar[1] = bytearray(inp[i] for i in range(16))
log.debug('Ar_rounds is_prime: {}, Ar[1]: {}'.format(is_prime, repr(Ar[1])))
# NOTE: temp holds the current input value
temp = bytearray(inp[i] for i in range(16))
for r in range(1,9): # 1..7
# NOTE: input of round1 is add_one to input of round3
if is_prime and r == 3:
temp = add_one(temp, Ar[1])
# log.debug('Ar_rounds is_prime: {}, added: {}'.format(is_prime, repr(temp)))
rv1 = add_one(temp, Keys[2*r - 1]) # odd keys use add_one
rv2 = nonlin_subs(rv1)
rv3 = add_two(rv2, Keys[2*r]) # even keys use add_two
rv4 = PHTs(rv3)
rv5 = PERMUTE(rv4)
rv6 = PHTs(rv5)
rv7 = PERMUTE(rv6)
rv8 = PHTs(rv7)
rv9 = PERMUTE(rv8)
rv10 = PHTs(rv9)
temp = rv10
Ar[r+1] = rv10
# log.debug('Ar_rounds is_prime: {}, Ar[{}]: {}'.format(is_prime, r+1, repr(Ar[r+1])))
Ar[10] = add_one(Ar[9], Keys[17])
# log.debug('Ar_rounds is_prime: {}, Ar[10]: {}'.format(is_prime, repr(Ar[9])))
emsg = 'Ar_rounds len(Ar) is {}, it should be {}'.format(len(Ar), 10)
assert(len(Ar) == 11), emsg
return Ar
def add_one(l, r):
"""Applied when subkey index is odd, including K[17]."""
assert type(l) == bytearray and len(l) == Ar_KEY_LEN
assert type(r) == bytearray and len(r) == Ar_KEY_LEN
rv = bytearray(16)
for i in range(16):
if i in [0, 3, 4, 7, 8, 11, 12, 15]:
rv[i] = l[i] ^ r[i]
else:
rv[i] = (l[i] + r[i]) % 256
assert len(rv) == Ar_KEY_LEN
return rv
def add_two(l, r):
"""Applied when subkey index is even."""
assert type(l) == bytearray and len(l) == Ar_KEY_LEN
assert type(r) == bytearray and len(r) == Ar_KEY_LEN
rv = bytearray(16)
for i in range(16):
if i in [0, 3, 4, 7, 8, 11, 12, 15]:
rv[i] = (l[i] + r[i]) % 256
else:
rv[i] = l[i] ^ r[i]
assert len(rv) == Ar_KEY_LEN
return rv
def nonlin_subs(inp):
"""e(xponential) and l(og) non linear subs."""
assert type(inp) == bytearray and len(inp) == Ar_KEY_LEN
rv = bytearray(16)
for i in range(16):
if i in [0, 3, 4, 7, 8, 11, 12, 15]:
rv[i] = EXP_45[inp[i]]
else:
rv[i] = EXP_45.index(inp[i])
assert len(rv) == Ar_KEY_LEN
return rv
def PHT(x, y):
"""Pseudo-Hadamard transform"""
assert type(x) == int
assert type(y) == int
rv_x = (2*x + y) % 256
rv_y = (x + y) % 256
return rv_x, rv_y
def PHTs(inp):
assert type(inp) == bytearray and len(inp) == Ar_KEY_LEN
rv = bytearray(16)
for i in [0, 2, 4, 6, 8, 10, 12, 14]:
rv[i], rv[i+1] = PHT(inp[i], inp[i+1])
assert len(rv) == Ar_KEY_LEN
return rv
def PERMUTE(inp):
"""Armenian permutation."""
assert type(inp) == bytearray
assert len(inp) == Ar_KEY_LEN
permuted_inp = bytearray()
permuted_inp.append(inp[8])
permuted_inp.append(inp[11])
permuted_inp.append(inp[12])
permuted_inp.append(inp[15])
permuted_inp.append(inp[2])
permuted_inp.append(inp[1])
permuted_inp.append(inp[6])
permuted_inp.append(inp[5])
permuted_inp.append(inp[10])
permuted_inp.append(inp[9])
permuted_inp.append(inp[14])
permuted_inp.append(inp[13])
permuted_inp.append(inp[0])
permuted_inp.append(inp[7])
permuted_inp.append(inp[4])
permuted_inp.append(inp[3])
assert len(permuted_inp) == Ar_KEY_LEN
return permuted_inp
def key_sched(key):
emsg = 'key_sched key len is {}, it should be {}'.format(len(key), Ar_KEY_LEN)
assert len(key) == Ar_KEY_LEN, emsg
Keys = [i for i in range(18)]
Keys[0] = None # Keys[0] is not used
B = biases()
# NOTE: XOR of all Bytes
byte_16 = 0
for i in range(16):
byte_16 ^= key[i]
# log.debug('key_sched byte_16: {}'.format(byte_16))
# NOTE: deep copy here
presel_k1 = bytearray(key[i] for i in range(16))
presel_k1.append(byte_16)
# log.debug('key_sched presel_k1: {}'.format(repr(presel_k1)))
emsg = 'key_sched presel_k1 len is {}, it should be {}'.format(len(presel_k1),Ar_KEY_LEN+1)
assert len(presel_k1) == Ar_KEY_LEN+1, emsg
k1 = select(1, presel_k1)
emsg = 'key_sched k1 len is {}, it should be {}'.format(len(k1),Ar_KEY_LEN)
assert len(k1) == Ar_KEY_LEN, emsg
Keys[1] = k1
# log.debug('key_sched k1: {}'.format(repr(k1)))
# presel_k2 = rotate(presel_k1)
# pre_k2 = select('k2', presel_k2)
# Keys[2] = add_bytes_mod256(pre_k2, B[2])
# presel_k3 = rotate(presel_k2)
# pre_k3 = select('k3', presel_k3)
# Keys[3] = add_bytes_mod256(pre_k3, B[3])
presel_old = presel_k1
for N in range(2, 18):
presel = rotate(presel_old)
pre_k = select(N, presel)
Keys[N] = add_bytes_mod256(pre_k, B[N])
presel_old = presel
return Keys
def select(what, key):
if what == 1:
selected_key = key[0:16]
elif what == 2:
selected_key = key[1:]
elif what == 3:
selected_key = key[2:]
selected_key.append(key[0])
elif what == 4:
selected_key = key[3:]
selected_key.extend(key[:2])
elif what == 5:
selected_key = key[4:]
selected_key.extend(key[:3])
elif what == 6:
selected_key = key[5:]
selected_key.extend(key[:4])
elif what == 7:
selected_key = key[6:]
selected_key.extend(key[:5])
elif what == 8:
selected_key = key[7:]
selected_key.extend(key[:6])
elif what == 9:
selected_key = key[8:]
selected_key.extend(key[:7])
elif what == 10:
selected_key = key[9:]
selected_key.extend(key[:8])
elif what == 11:
selected_key = key[10:]
selected_key.extend(key[:9])
elif what == 12:
selected_key = key[11:]
selected_key.extend(key[:10])
elif what == 13:
selected_key = key[12:]
selected_key.extend(key[:11])
elif what == 14:
selected_key = key[13:]
selected_key.extend(key[:12])
elif what == 15:
selected_key = key[14:]
selected_key.extend(key[:13])
elif what == 16:
selected_key = key[15:]
selected_key.extend(key[:14])
elif what == 17:
selected_key = key[16:]
selected_key.extend(key[:15])
else:
log.error('select what: {} is not supported'.format(what))
return None
emsg = 'select selected_key len is {}, it should be {}'.format(
len(selected_key),Ar_KEY_LEN)
assert len(selected_key) == Ar_KEY_LEN, emsg
return selected_key
def biases():
"""Returns a list of bytearrays biases.
These are constants and can be pre-computed:
biases B[2]: bytearray(b'F\x97\xb1\xba\xa3\xb7\x10\n\xc57\xb3\xc9Z(\xacd')
biases B[3]: bytearray(b'\xec\xab\xaa\xc6g\x95X\r\xf8\x9a\xf6nf\xdc\x05=')
biases B[4]: bytearray(b'\x8a\xc3\xd8\x89j\xe96IC\xbf\xeb\xd4\x96\x9bh\xa0')
biases B[5]: bytearray(b']W\x92\x1f\xd5q\\\xbb"\xc1\xbe{\xbc\x99c\x94')
biases B[6]: bytearray(b'*a\xb842\x19\xfd\xfb\x17@\xe6Q\x1dAD\x8f')
biases B[7]: bytearray(b'\xdd\x04\x80\xde\xe71\xd6\x7f\x01\xa2\xf79\xdao#\xca')
biases B[8]: bytearray(b':\xd0\x1c\xd10>\x12\xa1\xcd\x0f\xe0\xa8\xaf\x82Y,')
biases B[9]: bytearray(b'}\xad\xb2\xef\xc2\x87\xceu\x06\x13\x02\x90O.r3')
biases B[10]: bytearray(b"\xc0\x8d\xcf\xa9\x81\xe2\xc4\'/lz\x9fR\xe1\x158")
biases B[11]: bytearray(b'\xfcB\xc7\x08\xe4\tU^\x8c\x14v`\xff\xdf\xd7')
biases B[12]: bytearray(b'\xfa\x0b!\x00\x1a\xf9\xa6\xb9\xe8\x9ebL\xd9\x91P\xd2')
biases B[13]: bytearray(b'\x18\xb4\x07\x84\xea[\xa4\xc8\x0e\xcbHiKN\x9c5')
biases B[14]: bytearray(b'EMT\xe5%<\x0cJ\x8b?\xcc\xa7\xdbk\xae\xf4')
biases B[15]: bytearray(b'-\xf3|m\x9d\xb5&t\xf2\x93S\xb0\xf0\x11\xed\x83')
biases B[16]: bytearray(b'\xb6\x03\x16s;\x1e\x8ep\xbd\x86\x1bG~$V\xf1')
biases B[17]: bytearray(b'\x88F\x97\xb1\xba\xa3\xb7\x10\n\xc57\xb3\xc9Z(\xac')
"""
B = [i for i in range(18)]
B[0] = None # not used
B[1] = None # not used
# B[2] = bytearray()
# N = 2
# for i in range(16):
# int_val = ((45**(45**(17*N+i+1) % 257)) % 257) % 256
# B[2].append(int_val)
# log.debug('biases B[2]: {}'.format(repr(B[2])))
# assert len(B[2]) == Ar_KEY_LEN
# B[3] = bytearray()
# N=3
# for i in range(16):
# int_val = ((45**(45**(17*N+i+1) % 257)) % 257) % 256
# B[3].append(int_val)
# log.debug('biases B[3]: {}'.format(repr(B[3])))
# assert len(B[3]) == Ar_KEY_LEN
for N in range(2,18):
B[N] = bytearray()
for i in range(16):
int_val = ((45**(45**(17*N+i+1) % 257)) % 257) % 256
B[N].append(int_val)
# log.debug('biases B[{}]: {}'.format(N, repr(B[N])))
assert len(B[N]) == Ar_KEY_LEN
return B
def rotate(key):
""""Each Byte is rotated 3 positions on the left (not shifted)."""
assert len(key) == Ar_KEY_LEN+1 and type(key) == bytearray
rotated_key = bytearray()
for i in range(0, 17):
byte = BitArray(key[i:i+1])
assert len(byte.bin) == 8
# log.debug('rotate {} byte: {}, {}'.format(i, byte.bin, byte.uint))
# rotated_byte = byte << 3
rotated_byte = byte
rotated_byte.rol(3)
assert len(rotated_byte.bin) == 8
# log.debug('rotate {} rotated_byte: {}, {}'.format(i, rotated_byte.bin, rotated_byte.uint))
# NOTE: byte.uint is unsigned, byte.int is signed
rotated_key.append(rotated_byte.uint)
# log.debug('rotate rotated_key: {}'.format(repr(rotated_key)))
assert len(rotated_key) == Ar_KEY_LEN+1
return rotated_key
def add_bytes_mod256(l, r):
"""Sixteen bytewise additions mod 256 .
Used to produce input to for ArPrime
"""
assert type(l) == bytearray
assert len(l) == Ar_KEY_LEN
assert type(r) == bytearray
assert len(r) == Ar_KEY_LEN
rv = bytearray()
for i in range(16):
rv.append((l[i] + r[i]) % 256)
assert len(rv) == Ar_KEY_LEN
return rv
def xor_bytes(l, r):
"""Sixteen bytewise XOR.
Used to produce input to for ArPrime
"""
assert type(l) == bytearray
assert len(l) == Ar_KEY_LEN
assert type(r) == bytearray
assert len(r) == Ar_KEY_LEN
rv = bytearray()
for i in range(16):
rv.append(l[i] ^ r[i])
assert len(rv) == Ar_KEY_LEN
return rv
def K_to_K_tilda(K):
"""(EQ 15) p 1676 Accepts and returns a bytearray"""
emsg1 = 'K_to_K_tilda K len is {}, it should be {}'.format(len(K), Ar_KEY_LEN)
assert len(K) == Ar_KEY_LEN, emsg1
emsg2 = 'K_to_K_tilda K type is {}, it should be a bytearray'.format(type(K))
assert type(K) == bytearray, emsg2
K_tilda = bytearray()
K_tilda.append((K[0] + 233) % 256)
K_tilda.append( K[1] ^ 229)
K_tilda.append((K[2] + 223) % 256)
K_tilda.append( K[3] ^ 193)
K_tilda.append((K[4] + 179) % 256)
K_tilda.append( K[5] ^ 167)
K_tilda.append((K[6] + 149) % 256)
K_tilda.append( K[7] ^ 131)
K_tilda.append( K[8] ^ 233)
K_tilda.append((K[9] + 229) % 256)
K_tilda.append( K[10] ^ 223)
K_tilda.append((K[11] + 193) % 256)
K_tilda.append( K[12] ^ 179)
K_tilda.append((K[13] + 167) % 256)
K_tilda.append( K[14] ^ 149)
K_tilda.append((K[15] + 131) % 256)
log.debug('K_to_K_tilda K_tilda: {}'.format(repr(K_tilda)))
assert len(K_tilda) == Ar_KEY_LEN
return K_tilda
def K_to_K_tilda_str(K):
"""(EQ 15) p 1676 Accepts and returns a str"""
emsg1 = 'K_to_K_tilda K len is {}. it should be {}'.format(len(K),
Ar_KEY_LEN)
assert len(K) == Ar_KEY_LEN, emsg1
emsg2 = 'K_to_K_tilda K type is {}. it should be {}'.format(type(K), str)
assert type(K) == str, emsg2
K_tilda = ''
K_tilda += chr( (ord(K[0]) + 233) % 256 )
K_tilda += chr( (ord(K[1]) ^ 229 ) )
K_tilda += chr( (ord(K[2]) + 223) % 256 )
K_tilda += chr( (ord(K[3]) ^ 193 ) )
K_tilda += chr( (ord(K[4]) + 179) % 256 )
K_tilda += chr( (ord(K[5]) ^ 167 ) )
K_tilda += chr( (ord(K[6]) + 149) % 256 )
K_tilda += chr( (ord(K[7]) ^ 131 ) )
K_tilda += chr( (ord(K[8]) ^ 233 ) )
K_tilda += chr( (ord(K[9]) + 229) % 256 )
K_tilda += chr( (ord(K[10]) ^ 223 ) )
K_tilda += chr( (ord(K[11]) + 193) % 256 )
K_tilda += chr( (ord(K[12]) ^ 179 ) )
K_tilda += chr( (ord(K[13]) + 167) % 256 )
K_tilda += chr( (ord(K[14]) ^ 149 ) )
K_tilda += chr( (ord(K[15]) + 131) % 256 )
log.debug('K_to_K_tilda_str: {}'.format(K_tilda.encode('hex')))
return K_tilda
def E(inp, L):
"""(EQ 14) pag 1675 Expansion of a L Bytes inp to a 16 Byte output.
X[i % L] for i = 0...15
"""
emsg1 = 'E L is {}, it should be {}'.format(type(L), 'int')
assert type(L) == int, emsg1
emsg2 = 'E inp is {}, it should be bytearray'.format(type(inp))
assert type(inp) == bytearray, emsg2
emsg3 = 'E inp len is {}, it should be {}'.format(len(inp), L)
assert len(inp) == L, emsg3
ext_inp = bytearray()
for i in range(16):
index = i % L
ext_inp.append(inp[index])
# log.debug('E inp: {}'.format(repr(inp)))
# log.debug('E ext_inp: {}'.format(repr(ext_inp)))
assert(len(ext_inp) == Ar_KEY_LEN)
return ext_inp
def E_str(inp, L):
"""Expansion of a L Bytes inp to a 16 Byte output.
Our inp should always be 12 Bytes long.
"""
emsg1 = 'E L is {}, it should be {}'.format(type(L), 'int')
assert type(L) == int, emsg1
emsg2 = 'E inp is {}, it should be {}'.format(type(inp), 'str')
assert type(inp) == str, emsg2
emsg3 = 'E inp len is {}, it should be {}'.format(len(inp), L)
assert len(inp) == L, emsg3
ext_inp = ''
for i in range(16):
index = i % L
ext_inp += inp[index]
# log.debug('E inp: {}'.format(inp))
# log.debug('E ext_inp: {}'.format(ext_inp))
log.debug('E inp: {}'.format(inp.encode('hex')))
log.debug('E ext_inp: {}'.format(ext_inp.encode('hex')))
return ext_inp
|
src/stringTesting.py | MarletteFunding/aws-kube-codesuite | 184 | 11177785 | import yaml, boto3, botocore, json, zipfile
from os import path
from kubernetes import client, config
from string import Template
s3 = boto3.resource('s3')
code_pipeline = boto3.client('codepipeline')
ssm = boto3.client('ssm')
def main():
# Build config file from template and secrets in SSM
CA = "THIS IS A CA"
CLIENT_CERT = "THIS IS A CLIENT CERT"
CLIENT_KEY = "THIS IS A CLIENT KEY"
ENDPOINT= "THIS IS AN ENDPOINT"
filein = open('/tmp/config')
src = Template(filein.read())
d={'$CA': CA, '$CLIENT_CERT': CLIENT_CERT, '$CLIENT_KEY': CLIENT_KEY, '$ENDPOINT': ENDPOINT}
result= src.substiute(d)
config.load_kube_config('/tmp/config')
def inplace_change(filename, old_string, new_string):
with open(filename) as f:
s = f.read()
if old_string not in s:
# print '"{old_string}" not found in {filename}.'.format(**locals())
return
with open(filename, 'w') as f:
# print 'Changing "{old_string}" to "{new_string}" in {filename}'.format(**locals())
s = s.replace(old_string, new_string)
f.write(s)
main() |
Pixel_Chase_Game/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 11177788 | import time
import random
import board
from rainbowio import colorwheel
import neopixel
import digitalio
import adafruit_led_animation.color as color
# button pin setup
button = digitalio.DigitalInOut(board.D5)
button.direction = digitalio.Direction.INPUT
button.pull = digitalio.Pull.UP
# neopixel setup
pixel_pin = board.D6
num_pixels = 61
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.2, auto_write=False)
def rainbow_cycle(wait):
for j in range(255):
for i in range(num_pixels):
rc_index = (i * 256 // 10) + j
pixels[i] = colorwheel(rc_index & 255)
pixels.show()
time.sleep(wait)
# color_chase setup
def color_chase(c, wait):
for i in range(num_pixels):
pixels[i] = c
time.sleep(wait)
pixels.show()
time.sleep(0.5)
# function to blink the neopixels when you lose
def game_over():
color_chase(color.BLACK, 0.05)
pixels.fill(color.RED)
pixels.show()
time.sleep(0.5)
pixels.fill(color.BLACK)
pixels.show()
time.sleep(0.5)
pixels.fill(color.RED)
pixels.show()
time.sleep(0.5)
pixels.fill(color.BLACK)
pixels.show()
time.sleep(0.5)
pixels.fill(color.RED)
pixels.show()
time.sleep(1)
# variables and states
pixel = 0
num = 0
last_num = 0
now_color = 0
next_color = 1
speed = 0.1
level = 0.005
final_level = 0.001
new_target = True
button_state = False
# neopixel colors
colors = [color.RED, color.ORANGE, color.YELLOW, color.GREEN, color.TEAL, color.CYAN,
color.BLUE, color.PURPLE, color.MAGENTA, color.GOLD, color.AQUA, color.PINK]
while True:
# button debouncing
if not button.value and not button_state:
button_state = True
# if new level starting..
if new_target:
# randomize target location
y = int(random.randint(5, 55))
x = int(y - 1)
z = int(y + 1)
new_target = False
print(x, y, z)
pixels[x] = color.WHITE
pixels[y] = colors[next_color]
pixels[z] = color.WHITE
# delay without time.sleep()
if (pixel + speed) < time.monotonic():
# turn off pixel behind chaser
if num > 0:
last_num = num - 1
pixels[last_num] = color.BLACK
pixels.show()
# keep target pixels their colors when the chaser passes
if last_num in (x, y, z):
pixels[x] = color.WHITE
pixels[y] = colors[next_color]
pixels[z] = color.WHITE
# move chaser pixel by one
if num < num_pixels:
pixels[num] = colors[now_color]
pixels.show()
#print(num)
#print("target is", y)
num += 1
# send chaser back to the beginning of the circle
if num == num_pixels:
last_num = num - 1
pixels[last_num] = color.BLACK
pixels.show()
num = 0
# if the chaser hits the target...
if last_num in [x, y, z] and not button.value:
button_state = False
# fills with the next color
pixels.fill(colors[next_color])
pixels.show()
print(num)
print(x, y, z)
# chaser resets
num = 0
time.sleep(0.5)
pixels.fill(color.BLACK)
pixels.show()
# speed increases for next level
speed = speed - level
# color updates
next_color = next_color + 1
if next_color > 11:
next_color = 0
now_color = now_color + 1
if now_color > 11:
now_color = 0
# setup for new target
new_target = True
print("speed is", speed)
print("button is", button.value)
# if the chaser misses the target...
if last_num not in [x, y, z] and not button.value:
button_state = False
print(num)
print(x, y, z)
# fills with current chaser color
pixels.fill(colors[now_color])
pixels.show()
# function to flash all pixels red
game_over()
# chaser is reset
num = 0
pixels.fill(color.BLACK)
pixels.show()
# speed is reset to default
speed = 0.1
# colors are reset
next_color = 1
now_color = 0
# setup for new target
new_target = True
print("speed is", speed)
print("button is", button.value)
# when you have beaten all the levels...
if speed < final_level:
# rainbows!
rainbow_cycle(0.01)
time.sleep(1)
# chaser is reset
num = 0
pixels.fill(color.BLACK)
pixels.show()
# speed is reset to default
speed = 0.1
# colors are reset
next_color = 1
now_color = 0
# setup for new target
new_target = True
# time.monotonic() is reset for the delay
pixel = time.monotonic()
|
seahub/api2/endpoints/shareable_groups.py | weimens/seahub | 101 | 11177819 | # Copyright (c) 2011-2016 Seafile Ltd.
import os
import sys
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from seaserv import ccnet_api
from seahub.utils import is_org_context
from seahub.utils.timeutils import timestamp_to_isoformat_timestr
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.avatar.settings import GROUP_AVATAR_DEFAULT_SIZE
from constance import config
try:
current_path = os.path.dirname(os.path.abspath(__file__))
seafile_conf_dir = os.path.join(current_path, \
'../../../../../conf')
sys.path.append(seafile_conf_dir)
from seahub_custom_functions import custom_get_groups
CUSTOM_GET_GROUPS = True
except ImportError as e:
CUSTOM_GET_GROUPS = False
class ShareableGroups(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def _get_group_info(self, request, group, avatar_size):
isoformat_timestr = timestamp_to_isoformat_timestr(group.timestamp)
group_info = {
"id": group.id,
"parent_group_id": group.parent_group_id,
"name": group.group_name,
"owner": group.creator_name,
"created_at": isoformat_timestr,
}
return group_info
def get(self, request):
""" List groups that user can share a library to.
"""
if config.ENABLE_SHARE_TO_ALL_GROUPS:
if CUSTOM_GET_GROUPS:
groups = custom_get_groups(request)
else:
groups = ccnet_api.get_all_groups(-1, -1)
else:
username = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
groups = ccnet_api.get_org_groups_by_user(org_id, username)
else:
groups = ccnet_api.get_groups(username)
try:
avatar_size = int(request.GET.get('avatar_size',
GROUP_AVATAR_DEFAULT_SIZE))
except ValueError:
avatar_size = GROUP_AVATAR_DEFAULT_SIZE
result = [self._get_group_info(request, group, avatar_size) for group in groups]
return Response(result)
|
ptranking/ltr_adhoc/util/bin_utils.py | ryo59/ptranking | 236 | 11177829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import torch
def batch_count(batch_std_labels=None, max_rele_grade=None, descending=False, gpu=False):
"""
Todo now an api is already provided by pytorch
:param batch_std_labels:
:param max_rele_grade:
:param descending:
:param gpu:
:return:
"""
rele_grades = torch.arange(max_rele_grade+1).type(torch.cuda.FloatTensor) if gpu else torch.arange(max_rele_grade+1).type(torch.FloatTensor)
if descending: rele_grades, _ = torch.sort(rele_grades, descending=True)
batch_cnts = torch.stack([(batch_std_labels == g).sum(dim=1) for g in rele_grades])
batch_cnts = torch.t(batch_cnts)
return batch_cnts
|
reviewboard/scmtools/tests/test_tool_manager.py | seekingalpha/reviewboard | 921 | 11177840 | <gh_stars>100-1000
"""Unit tests for reviewboard.scmtools.managers.ToolManager."""
from __future__ import unicode_literals
from django.db.models import Q
from reviewboard.scmtools.models import Tool
from reviewboard.testing import TestCase
class ToolManagerTests(TestCase):
"""Unit tests for reviewboard.scmtools.managers.ToolManager."""
fixtures = ['test_scmtools']
def test_get_with_id_caches(self):
"""Testing Tool.objects.get with id= caches"""
self._test_id_cache_query(id=1)
def test_get_with_id_exact_caches(self):
"""Testing Tool.objects.get with id__exact= caches"""
self._test_id_cache_query(id__exact=1)
def test_get_with_pk_caches(self):
"""Testing Tool.objects.get with pk= caches"""
self._test_id_cache_query(pk=1)
def test_get_with_pk_exact_caches(self):
"""Testing Tool.objects.get with pk__exact= caches"""
self._test_id_cache_query(pk=1)
def test_get_with_q_id_caches(self):
"""Testing Tool.objects.get with Q(id=) caches"""
self._test_id_cache_query(Q(id=1))
def test_get_with_q_pk_caches(self):
"""Testing Tool.objects.get with Q(pk=) caches"""
self._test_id_cache_query(Q(pk=1))
def test_get_with_q_id_exact_caches(self):
"""Testing Tool.objects.get with Q(id__exact=) caches"""
self._test_id_cache_query(Q(id__exact=1))
def test_get_with_q_pk_exact_caches(self):
"""Testing Tool.objects.get with Q(pk__exact=) caches"""
self._test_id_cache_query(Q(pk__exact=1))
def test_get_non_id_lookup(self):
"""Testing Tool.objects.get with non-id/pk lookup"""
with self.assertNumQueries(1):
tool1 = Tool.objects.get(name='Git')
with self.assertNumQueries(2):
tool2 = Tool.objects.get(name='Git')
tool3 = Tool.objects.get(name='CVS')
self.assertIsNot(tool1, tool2)
self.assertEqual(tool1, tool2)
self.assertNotEqual(tool1, tool3)
def _test_id_cache_query(self, *args, **kwargs):
"""Utility function for testing ID-based caching.
Args:
*args (tuple):
Positional arguments to use for the query.
**kwargs (dict):
Keyword arguments to use for the query.
Raises:
AssertionError:
An assertion failed.
"""
Tool.objects.clear_tool_cache()
with self.assertNumQueries(1):
tool1 = Tool.objects.get(*args, **kwargs)
with self.assertNumQueries(0):
# Further queries for any available ID should reuse the cache.
tools = [
Tool.objects.get(id=1),
Tool.objects.get(id__exact=1),
Tool.objects.get(pk=1),
Tool.objects.get(pk__exact=1),
Tool.objects.get(Q(id=1)),
Tool.objects.get(Q(id__exact=1)),
Tool.objects.get(Q(pk=1)),
Tool.objects.get(Q(pk__exact=1)),
]
tool3 = Tool.objects.get(pk=2)
for tool in tools:
self.assertIs(tool1, tool)
self.assertIsNot(tool1, tool3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.