max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
river/tree/splitter/base.py | fox-ds/river | 2,184 | 11082282 | import abc
import typing
from river import base
from ..split_criterion.base import SplitCriterion
from ..utils import BranchFactory, GradHess, GradHessStats
class Splitter(base.Estimator, abc.ABC):
"""Base class for the tree splitters.
Each Attribute Observer (AO) or Splitter monitors one input feature and finds the best
split point for this attribute. AOs can also perform other tasks related to the monitored
feature, such as estimating its probability density function (classification case).
This class should not be instantiated, as none of its methods are implemented.
"""
def __init__(self):
super().__init__()
@abc.abstractmethod
def update(self, att_val, target_val: base.typing.Target, sample_weight: float):
"""Update statistics of this observer given an attribute value, its target value
and the weight of the instance observed.
Parameters
----------
att_val
The value of the monitored attribute.
target_val
The target value.
sample_weight
The weight of the instance.
"""
@abc.abstractmethod
def cond_proba(self, att_val, target_val: base.typing.ClfTarget) -> float:
"""Get the probability for an attribute value given a class.
Parameters
----------
att_val
The value of the attribute.
target_val
The target (class label) value.
Returns
-------
Probability for an attribute value given a class.
"""
@abc.abstractmethod
def best_evaluated_split_suggestion(
self,
criterion: SplitCriterion,
pre_split_dist: typing.Union[typing.List, typing.Dict],
att_idx: base.typing.FeatureName,
binary_only: bool,
) -> BranchFactory:
"""Get the best split suggestion given a criterion and the target's statistics.
Parameters
----------
criterion
The split criterion to use.
pre_split_dist
The target statistics before the split.
att_idx
The attribute index.
binary_only
True if only binary splits are allowed.
Returns
-------
Suggestion of the best attribute split.
"""
@property
def is_numeric(self) -> bool:
"""Determine whether or not the splitter works with numerical features."""
return True
@property
def is_target_class(self) -> bool:
"""Check on which kind of learning task the splitter is designed to work.
If `True`, the splitter works with classification trees, otherwise it is designed for
regression trees.
"""
return True
class Quantizer(base.Estimator, abc.ABC):
"""Base class for the feature quantizers used in Stochastic Gradient Trees[^1].
References
----------
[^1]: <NAME>., <NAME>., & <NAME>. (2019, October). Stochastic Gradient Trees.
In Asian Conference on Machine Learning (pp. 1094-1109).
"""
def __init__(self):
super().__init__()
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractmethod
def update(self, x_val, gh: GradHess, w: float):
pass
@abc.abstractmethod
def __iter__(self) -> typing.Tuple[float, typing.Iterator[GradHessStats]]:
pass
|
tests/test_preprocess_tokenfuncs.py | samir-joshi/tmtoolkit | 167 | 11082298 | """
Preprocessing: Tests for ._tokenfuncs submodule.
"""
import string
from hypothesis import given, strategies as st
import pytest
import numpy as np
from tmtoolkit.preprocess._tokenfuncs import (
str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches,
token_match_subsequent, token_glue_subsequent, token_match
)
def test_str_multisplit():
punct = list(string.punctuation)
assert str_multisplit('Te;s,t', {';', ','}) == ['Te', 's', 't']
assert str_multisplit('US-Student', punct) == ['US', 'Student']
assert str_multisplit('-main_file.exe,', punct) == ['', 'main', 'file', 'exe', '']
@given(s=st.text(), split_chars=st.lists(st.characters()))
def test_str_multisplit_hypothesis(s, split_chars):
res = str_multisplit(s, split_chars)
assert type(res) is list
if len(s) == 0:
assert res == ['']
if len(split_chars) == 0:
assert res == [s]
for p in res:
assert all(c not in p for c in split_chars)
n_asserted_parts = 0
for c in set(split_chars):
n_asserted_parts += s.count(c)
assert len(res) == n_asserted_parts + 1
def test_str_shape():
assert str_shape('') == []
assert str_shape('xxx') == [0, 0, 0]
assert str_shape('Xxx') == [1, 0, 0]
assert str_shape('xxX') == [0, 0, 1]
assert str_shape('Xxx', lower=1, upper=0) == [0, 1, 1]
assert str_shape('Xxx', lower=1, upper=0, as_str=True) == '011'
assert str_shape('Foo', lower='x', upper='X', as_str=True) == 'Xxx'
@given(s=st.text(), lower_int=st.integers(min_value=0, max_value=9), upper_int=st.integers(min_value=0, max_value=9),
lower=st.characters(), upper=st.characters(),
as_str=st.booleans(), use_ints=st.booleans())
def test_str_shape_hypothesis(s, lower_int, upper_int, lower, upper, as_str, use_ints):
if use_ints:
l = lower_int
u = upper_int
else:
l = lower
u = upper
res = str_shape(s, l, u, as_str)
if as_str:
assert isinstance(res, str)
assert all([x in {str(l), str(u)} for x in res])
else:
assert isinstance(res, list)
assert all([x in {l, u} for x in res])
assert len(s) == len(res)
def test_str_shapesplit():
assert str_shapesplit('') == ['']
assert str_shapesplit('NewYork') == ['New', 'York']
assert str_shapesplit('newYork') == ['new', 'York']
assert str_shapesplit('newyork') == ['newyork']
assert str_shapesplit('USflag') == ['US', 'flag']
assert str_shapesplit('eMail') == ['eMail']
assert str_shapesplit('foobaR') == ['foobaR']
@given(s=st.text(string.printable), precalc_shape=st.booleans(), min_len=st.integers(min_value=1, max_value=5))
def test_str_shapesplit_hypothesis(s, precalc_shape, min_len):
if precalc_shape:
shape = str_shape(s)
else:
shape = None
res = str_shapesplit(s, shape, min_part_length=min_len)
assert len(res) >= 1
assert all([isinstance(x, str) for x in res])
if len(s) >= min_len:
assert all([min_len <= len(x) <= len(s) for x in res])
assert ''.join(res) == s
def test_expand_compound_token():
assert expand_compound_token('US-Student') == ['US', 'Student']
assert expand_compound_token('US-Student-X') == ['US', 'StudentX']
assert expand_compound_token('Camel-CamelCase') == ['Camel', 'CamelCase']
assert expand_compound_token('Camel-CamelCase', split_on_casechange=True) == ['Camel', 'Camel', 'Case']
assert expand_compound_token('Camel-camelCase') == ['Camel', 'camelCase']
assert expand_compound_token('Camel-camelCase', split_on_casechange=True) == ['Camel', 'camel', 'Case']
assert expand_compound_token('Student-X') == ['StudentX']
assert expand_compound_token('Do-Not-Disturb') == ['Do', 'Not', 'Disturb']
assert expand_compound_token('E-Mobility-Strategy') == ['EMobility', 'Strategy']
for inp, expected in zip(['US-Student', 'Do-Not-Disturb', 'E-Mobility-Strategy'],
[['USStudent'], ['Do', 'Not', 'Disturb'], ['EMobility', 'Strategy']]):
assert expand_compound_token(inp, split_on_len=None, split_on_casechange=True) == expected
for inp, expected in zip(['US-Student', 'Do-Not-Disturb', 'E-Mobility-Strategy'],
[['US', 'Student'], ['Do', 'Not', 'Disturb'], ['EMobility', 'Strategy']]):
assert expand_compound_token(inp, split_on_len=2, split_on_casechange=True) == expected
assert expand_compound_token('E-Mobility-Strategy', split_on_len=1) == ['E', 'Mobility', 'Strategy']
assert expand_compound_token('') == ['']
assert expand_compound_token('Te;s,t', split_chars=[';', ','], split_on_len=1, split_on_casechange=False) \
== expand_compound_token('Te-s-t', split_chars=['-'], split_on_len=1, split_on_casechange=False) \
== ['Te', 's', 't']
@given(s=st.text(string.printable), split_chars=st.lists(st.characters(min_codepoint=32)),
split_on_len=st.integers(1),
split_on_casechange=st.booleans())
def test_expand_compound_token_hypothesis(s, split_chars, split_on_len, split_on_casechange):
res = expand_compound_token(s, split_chars, split_on_len=split_on_len, split_on_casechange=split_on_casechange)
assert isinstance(res, list)
assert len(res) > 0
s_contains_split_char = any(c in s for c in split_chars)
s_is_split_chars = all(c in split_chars for c in s)
if not s_contains_split_char: # nothing to split on
assert res == [s]
if len(s) > 0:
assert all([p for p in res])
if not s_is_split_chars:
for p in res:
assert all(c not in p for c in split_chars)
@given(matches=st.lists(st.booleans()),
left=st.integers(min_value=0, max_value=10),
right=st.integers(min_value=0, max_value=10),
remove_overlaps=st.booleans())
def test_make_index_window_around_matches_flatten(matches, left, right, remove_overlaps):
matches = np.array(matches, dtype=np.bool)
matches_ind = np.where(matches)[0]
n_true = matches.sum()
res = make_index_window_around_matches(matches, left, right, flatten=True, remove_overlaps=remove_overlaps)
assert isinstance(res, np.ndarray)
assert res.dtype.kind in {'u', 'i'}
assert len(res) >= n_true
if len(res) > 0:
assert np.min(res) >= 0
assert np.max(res) < len(matches)
if left == 0 and right == 0:
assert np.array_equal(matches_ind, res)
if remove_overlaps:
assert np.array_equal(res, np.sort(np.unique(res)))
for i in matches_ind:
for x in range(i-left, i+right+1):
if 0 <= x < len(matches):
assert x in res
@given(matches=st.lists(st.booleans()),
left=st.integers(min_value=0, max_value=10),
right=st.integers(min_value=0, max_value=10))
def test_make_index_window_around_matches_not_flattened(matches, left, right):
matches = np.array(matches, dtype=np.bool)
matches_ind = np.where(matches)[0]
n_true = matches.sum()
res = make_index_window_around_matches(matches, left, right, flatten=False)
assert isinstance(res, list)
assert len(res) == n_true == len(matches_ind)
for win, i in zip(res, matches_ind):
assert win.dtype.kind in {'u', 'i'}
assert len(win) > 0
assert np.min(win) >= 0
assert np.max(win) < len(matches)
i_in_win = 0
for x in range(i-left, i+right+1):
if 0 <= x < len(matches):
assert x == win[i_in_win]
i_in_win += 1
@pytest.mark.parametrize('pattern, tokens, match_type, ignore_case, glob_method, expected', [
('a', [], 'exact', False, 'match', []),
('', [], 'exact', False, 'match', []),
('', ['a', ''], 'exact', False, 'match', [False, True]),
('a', ['a', 'b', 'c'], 'exact', False, 'match', [True, False, False]),
('a', np.array(['a', 'b', 'c']), 'exact', False, 'match', [True, False, False]),
('A', ['a', 'b', 'c'], 'exact', False, 'match', [False, False, False]),
('A', ['a', 'b', 'c'], 'exact', True, 'match', [True, False, False]),
(r'foo$', ['a', 'bfoo', 'c'], 'regex', False, 'match', [False, True, False]),
(r'foo$', ['a', 'bFOO', 'c'], 'regex', False, 'match', [False, False, False]),
(r'foo$', ['a', 'bFOO', 'c'], 'regex', True, 'match', [False, True, False]),
(r'foo*', ['a', 'food', 'c'], 'glob', False, 'match', [False, True, False]),
(r'foo*', ['a', 'FOOd', 'c'], 'glob', False, 'match', [False, False, False]),
(r'foo*', ['a', 'FOOd', 'c'], 'glob', True, 'match', [False, True, False]),
(r'foo*', ['a', 'FOOd', 'c'], 'glob', True, 'search', [False, True, False]),
])
def test_token_match(pattern, tokens, match_type, ignore_case, glob_method, expected):
assert np.array_equal(token_match(pattern, tokens, match_type, ignore_case, glob_method), np.array(expected))
def test_token_match_subsequent():
tok = ['green', 'test', 'emob', 'test', 'greener', 'tests', 'test', 'test']
with pytest.raises(ValueError):
token_match_subsequent('pattern', tok)
with pytest.raises(ValueError):
token_match_subsequent(['pattern'], tok)
assert token_match_subsequent(['a', 'b'], []) == []
assert token_match_subsequent(['foo', 'bar'], tok) == []
res = token_match_subsequent(['green*', 'test*'], tok, match_type='glob')
assert len(res) == 2
assert np.array_equal(res[0], np.array([0, 1]))
assert np.array_equal(res[1], np.array([4, 5]))
res = token_match_subsequent(['green*', 'test*', '*'], tok, match_type='glob')
assert len(res) == 2
assert np.array_equal(res[0], np.array([0, 1, 2]))
assert np.array_equal(res[1], np.array([4, 5, 6]))
@given(tokens=st.lists(st.text()), n_patterns=st.integers(0, 4))
def test_token_match_subsequent_hypothesis(tokens, n_patterns):
tokens = np.array(tokens)
n_patterns = min(len(tokens), n_patterns)
pat_ind = np.arange(n_patterns)
np.random.shuffle(pat_ind)
patterns = list(tokens[pat_ind])
if len(patterns) < 2:
with pytest.raises(ValueError):
token_match_subsequent(patterns, tokens)
else:
res = token_match_subsequent(patterns, tokens)
assert isinstance(res, list)
if len(tokens) == 0:
assert res == []
else:
for ind in res:
assert len(ind) == len(patterns)
assert np.all(ind >= 0)
assert np.all(ind < len(tokens))
assert np.all(np.diff(ind) == 1) # subsequent words
assert np.array_equal(tokens[ind], patterns)
def test_token_glue_subsequent():
tok = ['green', 'test', 'emob', 'test', 'greener', 'tests', 'test', 'test']
with pytest.raises(ValueError):
token_glue_subsequent(tok, 'invalid')
assert token_glue_subsequent(tok, []) == tok
matches = token_match_subsequent(['green*', 'test*'], tok, match_type='glob')
assert token_glue_subsequent(tok, matches) == ['green_test', 'emob', 'test', 'greener_tests', 'test', 'test']
matches = token_match_subsequent(['green*', 'test*', '*'], tok, match_type='glob')
assert token_glue_subsequent(tok, matches) == ['green_test_emob', 'test', 'greener_tests_test', 'test']
@given(tokens=st.lists(st.text(string.printable)), n_patterns=st.integers(0, 4))
def test_token_glue_subsequent_hypothesis(tokens, n_patterns):
tokens_arr = np.array(tokens)
n_patterns = min(len(tokens), n_patterns)
pat_ind = np.arange(n_patterns)
np.random.shuffle(pat_ind)
patterns = list(tokens_arr[pat_ind])
if len(patterns) > 1:
matches = token_match_subsequent(patterns, tokens)
assert token_glue_subsequent(tokens, []) == tokens
if len(tokens) == 0:
assert token_glue_subsequent(tokens, matches) == []
elif len(matches) == 0:
assert token_glue_subsequent(tokens, matches) == tokens
else:
res = token_glue_subsequent(tokens, matches)
assert isinstance(res, list)
assert 0 < len(res) < len(tokens)
for ind in matches:
assert '_'.join(tokens_arr[ind]) in res
|
hubspot/cms/site_search/__init__.py | Ronfer/hubspot-api-python | 117 | 11082303 | <gh_stars>100-1000
# coding: utf-8
# flake8: noqa
"""
CMS Site Search
Use these endpoints for searching content on your HubSpot hosted CMS website(s). # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from hubspot.cms.site_search.api.public_api import PublicApi
# import ApiClient
from hubspot.cms.site_search.api_client import ApiClient
from hubspot.cms.site_search.configuration import Configuration
from hubspot.cms.site_search.exceptions import OpenApiException
from hubspot.cms.site_search.exceptions import ApiTypeError
from hubspot.cms.site_search.exceptions import ApiValueError
from hubspot.cms.site_search.exceptions import ApiKeyError
from hubspot.cms.site_search.exceptions import ApiException
# import models into sdk package
from hubspot.cms.site_search.models.content_search_result import ContentSearchResult
from hubspot.cms.site_search.models.error import Error
from hubspot.cms.site_search.models.error_detail import ErrorDetail
from hubspot.cms.site_search.models.indexed_data import IndexedData
from hubspot.cms.site_search.models.public_search_results import PublicSearchResults
from hubspot.cms.site_search.models.search_hit_field import SearchHitField
|
setup.py | alvistack/tabatkins-railroad-diagrams | 1,277 | 11082322 | from setuptools import setup, find_packages
with open("README-py.md", "r") as fh:
long_description = fh.read()
with open("semver.txt", "r") as fh:
semver = fh.read().strip()
setup(
name='railroad-diagrams',
py_modules=['railroad'],
version=semver,
description='Generate SVG railroad syntax diagrams, like on JSON.org.',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
include_package_data=True,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/tabatkins/railroad-diagrams',
keywords=['diagrams', 'syntax', 'grammar', 'railroad diagrams'],
classifiers=[
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) |
androguard/gui/TextDecorators.py | amimo/androguard | 4,084 | 11082333 | <filename>androguard/gui/TextDecorators.py<gh_stars>1000+
import re
import string
from PyQt5 import QtGui, QtCore
class CTextDecorator:
redPen = QtGui.QPen(QtGui.QColor(255, 0, 0))
greenPen = QtGui.QPen(QtGui.QColor(255, 255, 0))
whitePen = QtGui.QPen(QtGui.QColor(255, 255, 255))
normalPen = QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine)
MZbrush = QtGui.QBrush(QtGui.QColor(128, 0, 0))
grayBrush = QtGui.QBrush(QtGui.QColor(128, 128, 128))
def __init__(self):
pass
class TextDecorator(CTextDecorator):
def __init__(self, viewmode):
self.operations = []
self.dataModel = viewmode.getDataModel()
self.viewmode = viewmode
self.penMap = {}
self.brushMap = {}
self.PenInterval = []
self.normalPen = QtGui.QPen(QtGui.QColor(192, 192, 192), 1, QtCore.Qt.SolidLine)
# if we want to generate T/F table
self.Special = string.ascii_letters + string.digits + ' .;\':;=\"?-!()/\\_'
self.Special = [False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, True, True,
True, False, False, False, False, True, True,
True, False, False, False, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, False, True, False, True,
False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True,
True, True, True, False, True, False, False, True, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, False, False, False, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False]
def reset(self):
self.penMap = {}
self.brushMap = {}
self.PenInterval = []
def getDataModel(self):
return self.dataModel
def isText(self, c):
"""
D = []
for i in range(256):
b = False
if self.isText(chr(i)):
b = True
D.append(b)
print D
sys.exit()
"""
return self.Special[ord(c)]
def getChar(self, idx):
# self.page = self.getDataModel().getDisplayablePage()
if idx < len(self.page):
return self.page[idx]
return 0
def decorate(self, pageOffset=None):
if pageOffset:
self.page = self.viewmode.getDisplayablePage(pageOffset=pageOffset)
else:
self.page = self.viewmode.getDisplayablePage()
return self.page
def addPenInterval(self, a, b, pen, ignoreHighlights=True):
self.PenInterval.append((a, b, pen, ignoreHighlights))
def choosePen(self, idx):
key = self.dataModel.getOffset() + idx
# if we do have a pen with that index, return it if it's different than default pen
# otherwise, return the pen that was set in that interval
# the priority here is de pen from other transformations, than interval pen
for a, b, ignoreHighlights, pen in self.PenInterval:
# in interval
if a <= key <= b:
if ignoreHighlights:
return pen
if key in self.penMap:
if self.penMap[key] == self.normalPen:
return pen
else:
return self.penMap[key]
else:
return pen
if key in self.penMap:
return self.penMap[key]
return self.normalPen
def chooseBrush(self, idx):
off = self.dataModel.getOffset() + idx
if off in self.brushMap:
return self.brushMap[off]
return None
class PageDecorator(TextDecorator):
def __init__(self, decorated):
pass
def reset(self):
self.decorated.reset()
self.penMap = {}
self.brushMap = {}
self.PenInterval = []
def getBrushMap(self):
return self.brushMap
def getPenMap(self):
return self.penMap
def doit(self):
pass
def getDataModel(self):
return self.dataModel
class HighlightASCII(PageDecorator):
def __init__(self, decorated):
self.dataModel = decorated.getDataModel()
self.penMap = decorated.penMap
self.decorated = decorated
super().__init__(decorated)
self.dataModel = super().getDataModel()
def decorate(self, pageOffset=None):
page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
off = self.dataModel.getOffset()
Match = [(m.start(), m.end()) for m in re.finditer(b'([a-zA-Z0-9\\-\\\\.%*:/? _<>]){4,}', page)]
for s, e in Match:
for i in range(e - s):
idx = off + s + i
if idx not in self.penMap:
self.penMap[off + s + i] = self.redPen
self.page = page
return self.page
class HighlightPrefix(PageDecorator):
def __init__(self, decorated, text, additionalLength=0, brush=None, pen=None):
super().__init__(decorated)
self.dataModel = decorated.getDataModel()
self.decorated = decorated
self.additionalLength = additionalLength
self.brush = brush
self.text = text
self.pen = pen
def decorate(self, pageOffset=None):
page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
self.page = self.highliteWithPrefix(page, self.text, self.additionalLength, self.brush, self.pen)
return self.page
def highliteWithPrefix(self, page, text, additionalLength=0, brush=None, pen=None):
# todo: nu am gasit o metoda mai eleganta pentru a selecta toate aparitiile ale lui text
# regexp nu merg, "bad re expression"
lenText = len(text)
M = []
idx = 0
if lenText > 0:
while idx < len(page):
idx = page.find(text, idx, len(page))
if idx == -1:
break
M.append((idx, lenText + additionalLength))
idx += lenText + additionalLength
off = self.dataModel.getOffset()
for start, length in M:
for i in range(length):
self.penMap[off + start + i] = pen
self.brushMap[off + start + i] = brush
return page
class HighlightWideChar(PageDecorator):
def __init__(self, decorated):
super().__init__(decorated)
self.dataModel = decorated.getDataModel()
self.decorated = decorated
def decorate(self, pageOffset=None):
self.page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
self.page = self.highliteWidechar2(self.page)
return self.page
def highliteWidechar2(self, page):
pageStart = self.dataModel.getOffset()
pageEnd = pageStart + len(page)
touched = False
# for s, e in self.Intervals:
# touched = True
if not touched:
# expand
Match = [(m.start(), m.end()) for m in re.finditer(r'([a-zA-Z0-9\-\\.%*:/? ]\x00){4,}', page)]
for s, e in Match:
for i in range(e - s):
self.penMap[pageStart + s + i] = QtGui.QPen(QtGui.QColor(255, 255, 0))
# get rid of '\x00'
string = page[s:e:2]
l = len(string)
# copy string that has no zeros
page[s:s + l] = string
# fill with zeros the remaining space
page[s + l: s + 2 * l] = '\x00' * l
return page
### todo: other way to highlight widechar, should test and see which one is faster
"""
def _changeText(self, page, page_start, I):
page_end = page_start + len(page)
for obj in I:
if obj['s'] >= page_start and obj['e'] <= page_end:
page[obj['s']-page_start:obj['e']-page_start] = obj['text']
def _expand(self, page, off, start, end):
I = []
start = start - off
end = end - off
i = start
while i < end:
if i+1 < end:
if page[i+1] == 0 and self.isText(chr(page[i])):
k = 0
for j in xrange(i, end, 2):
if j + 1 < end:
if self.isText(chr(page[j])) and page[j+1] == 0:
k += 1
else:
break
if k > 4:
if i+k*2 <= end:
obj = {}
obj['s'] = off + i + 1
obj['e'] = off + i + k * 2
for idx, j in enumerate(range(i+1, i + k*2)):
if j > i + k:
page[j] = 0
#self.penMap[j] = self.greenPen
elif j+idx+1 < end:
page[j] = page[j + idx + 1]
self.penMap[off + j] = self.greenPen
obj['text'] = page[i+1:i+k*2]
I.append(obj)
self.penMap[off + i] = self.greenPen
i += k*2
i = i + 1
return I
pass
def highliteWidechar(self, page):
off = self.dataModel.getOffset()
page_end = off + len(page)
touched = False
#print '-------'
for idx, iv in enumerate(self.Intervals):
#print 'acum aici'
# in interval
s, e, I = iv
#print s ,e
#print page_end
page_start = off
if off >= s:
touched = True
if page_end <= e:
self._changeText(page, off, I)
else:
if off <= e:
I2 = self._expand(page, off, e, page_end)
for obj in I2:
I.append(obj)
e = page_end
self.Intervals[idx] = (s, e, I)
else:
# suntem cu mai multe pagini mai jos
touched = False
else:
if page_end <= e and page_end >= s:
# scrolled up
I2 = self._expand(page, off, page_start, s)
for obj in I2:
I.append(obj)
s = page_start
self.Intervals[idx] = (s, e, I)
touched = True
else:
# out of this interval
touched = False
if not touched or touched:
#print 'aici'
self.Intervals.append((off, page_end, self._expand(page, off, off, page_end)))
"""
class RangePen(PageDecorator):
def __init__(self, decorated, a, b, pen, ignoreHighlights=True):
super().__init__(decorated)
self.dataModel = decorated.getDataModel()
self.decorated = decorated
self.a = a
self.b = b
self.pen = pen
self.already = False
self.ignoreHighlights = ignoreHighlights
def decorate(self, pageOffset=None):
self.page = self.decorated.decorate(pageOffset)
self.PenInterval = self.decorated.PenInterval
self.brushMap = self.decorated.brushMap
self.penMap = self.decorated.penMap
if not self.already:
self.addPenInterval(self.a, self.b, self.ignoreHighlights, self.pen)
self.already = True
return self.page
|
client/verta/tests/test_permissions/test_visibility_api.py | fool-sec-review/modeldb | 624 | 11082336 | <reponame>fool-sec-review/modeldb
"""
Basic tests to make sure the client passes `visibility` without errors.
"""
import pytest
import requests
from verta._protos.public.common import CommonService_pb2 as _CommonCommonService
from verta._protos.public.modeldb import DatasetService_pb2 as _DatasetService
from verta._protos.public.modeldb import ProjectService_pb2 as _ProjectService
from verta._protos.public.modeldb.versioning import VersioningService_pb2 as _VersioningService
from verta._internal_utils import _utils
from verta.visibility import (
OrgCustom,
Private,
)
pytestmark = pytest.mark.not_oss
def assert_visibility(entity, visibility, entity_name):
if not entity._msg.HasField('custom_permission'):
pytest.skip("backend does not support new visibility")
assert entity._msg.custom_permission == visibility._custom_permission
if entity_name == "registered_model":
assert entity._msg.resource_visibility == visibility._visibility
else:
assert entity._msg.visibility == visibility._visibility
def assert_endpoint_visibility(endpoint, visibility):
endpoint_json = endpoint._get_json_by_id(endpoint._conn, endpoint.workspace, endpoint.id)
if 'custom_permission' not in endpoint_json['creator_request']:
pytest.skip("backend does not support new visibility")
assert endpoint_json['creator_request']['custom_permission']['collaborator_type'] == visibility._collaborator_type_str
assert endpoint_json['creator_request']['resource_visibility'] == visibility._visibility_str
class TestCreate:
@pytest.mark.parametrize(
("entity_name", "visibility"),
[
("dataset", OrgCustom(write=True)),
("project", OrgCustom(write=True, deploy=True)),
("registered_model", OrgCustom(write=True, deploy=True)),
]
)
def test_mdb_entity(self, client, organization, entity_name, visibility):
create_entity = getattr(client, "create_{}".format(entity_name))
entity = create_entity(workspace=organization.name, visibility=visibility)
try:
assert_visibility(entity, visibility, entity_name)
finally:
entity.delete()
client._ctx.proj = None # otherwise client teardown tries to delete
def test_endpoint(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
endpoint = client.create_endpoint(
path=_utils.generate_default_name(),
workspace=organization.name, visibility=visibility,
)
created_entities.append(endpoint)
assert_endpoint_visibility(endpoint, visibility)
class TestSet:
@pytest.mark.parametrize(
("entity_name", "visibility"),
[
("dataset", OrgCustom(write=True)),
("project", OrgCustom(write=True, deploy=True)),
("registered_model", OrgCustom(write=True, deploy=True)),
]
)
def test_mdb_entity(self, client, organization, entity_name, visibility):
set_entity = getattr(client, "set_{}".format(entity_name))
entity = set_entity(workspace=organization.name, visibility=visibility)
try:
assert_visibility(entity, visibility, entity_name)
# second set ignores visibility
with pytest.warns(UserWarning, match="cannot set"):
entity = set_entity(entity.name, workspace=organization.name, visibility=Private())
assert_visibility(entity, visibility, entity_name)
finally:
entity.delete()
client._ctx.proj = None # otherwise client teardown tries to delete
def test_endpoint(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
endpoint = client.set_endpoint(
path=_utils.generate_default_name(),
workspace=organization.name, visibility=visibility,
)
created_entities.append(endpoint)
assert_endpoint_visibility(endpoint, visibility)
# second set ignores visibility
with pytest.warns(UserWarning, match="cannot set"):
endpoint = client.set_endpoint(path=endpoint.path, workspace=organization.name, visibility=Private())
assert_endpoint_visibility(endpoint, visibility)
class TestPublicWithinOrg:
"""
`visibility` gets translated to an equivalent `public_within_org` value for
compatibility with older backends.
"""
def test_dataset(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
dataset = client.set_dataset(workspace=organization.name, visibility=visibility)
created_entities.append(dataset)
if visibility._to_public_within_org():
assert dataset._msg.dataset_visibility == _DatasetService.DatasetVisibilityEnum.ORG_SCOPED_PUBLIC
else:
assert dataset._msg.dataset_visibility == _DatasetService.DatasetVisibilityEnum.PRIVATE
def test_endpoint(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
endpoint = client.set_endpoint(
path=_utils.generate_default_name(),
workspace=organization.name, visibility=visibility,
)
created_entities.append(endpoint)
endpoint_json = endpoint._get_json_by_id(endpoint._conn, endpoint.workspace, endpoint.id)
if visibility._to_public_within_org():
assert endpoint_json['creator_request']['visibility'] == "ORG_SCOPED_PUBLIC"
else:
assert endpoint_json['creator_request']['visibility'] == "PRIVATE"
def test_project(self, client, organization):
visibility = OrgCustom(write=True)
entity = client.set_project(workspace=organization.name, visibility=visibility)
try:
if visibility._to_public_within_org():
assert entity._msg.project_visibility == _ProjectService.ORG_SCOPED_PUBLIC
else:
assert entity._msg.project_visibility == _ProjectService.PRIVATE
finally:
entity.delete()
client._ctx.proj = None # otherwise client teardown tries to delete
def test_registered_model(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
entity = client.set_registered_model(workspace=organization.name, visibility=visibility)
created_entities.append(entity)
if visibility._to_public_within_org():
assert entity._msg.visibility == _CommonCommonService.VisibilityEnum.ORG_SCOPED_PUBLIC
else:
assert entity._msg.visibility == _CommonCommonService.VisibilityEnum.PRIVATE
|
src/utils/csv_to_pkl.py | scaomath/BCAI_kaggle_CHAMPS | 108 | 11082354 | <filename>src/utils/csv_to_pkl.py
## Copyright (c) 2017 <NAME> GmbH
## All rights reserved.
##
## This source code is licensed under the MIT license found in the
## LICENSE file in the root directory of this source tree.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
import pickle
import gzip
import bz2
import os
import torch.nn as nn
import torch.optim as optim
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
def get_datasets(root_dir, mode=""):
root_dir = root_dir.strip("/")
full_path = f"/torch_proc_train{mode}.pkl.gz"
p1_path = f"/torch_proc_train{mode}_p1.pkl.gz"
p2_path = f"/torch_proc_train{mode}_p2.pkl.gz"
if mode != "":
print(f"Using {mode} for dataset")
if mode == "" and (not os.path.exists(root_dir + full_path)) and os.path.exists(root_dir + p1_path):
# Found part 1 and 2 of the dataset. Concatenate them first
with gzip.open(root_dir + f"/torch_proc_train{mode}_p1.pkl.gz", "rb") as f:
print("Wait Patiently! Combining part 1 & 2 of the dataset so that we don't need to do it in the future.")
D_train_part1 = pickle.load(f)
with gzip.open(root_dir + f"/torch_proc_train{mode}_p2.pkl.gz", "rb") as f:
D_train_part2 = pickle.load(f)
D_train = tuple([torch.cat([D_train_part1[i], D_train_part2[i]], dim=0) for i in range(len(D_train_part1))])
with gzip.open(root_dir+'/'+f"/torch_proc_train{mode}.pkl.gz", "wb") as f:
pickle.dump(D_train, f, protocol=4)
if os.path.exists(root_dir + full_path):
print("Found gzipped dataset!")
with gzip.open(root_dir + f"/torch_proc_train{mode}.pkl.gz", "rb") as f:
train = TensorDataset(*pickle.load(f))
if mode == "_full":
return train, None
with gzip.open(root_dir + f"/torch_proc_val{mode}.pkl.gz", "rb") as f:
val = TensorDataset(*pickle.load(f))
return train, val
else:
with bz2.open(root_dir + f"/torch_proc_train{mode}.pkl.bz2", "rb") as f:
train = TensorDataset(*pickle.load(f))
if mode == "_full":
return train, None
with bz2.open(root_dir + f"/torch_proc_val{mode}.pkl.bz2", "rb") as f:
val = TensorDataset(*pickle.load(f))
return train, val
def get_submission_set(root_dir, mode=""):
full_path = "/torch_proc_submission{mode}.pkl.gz"
if mode != "":
print(f"Using {mode} for dataset")
if os.path.exists(root_dir + full_path):
with gzip.open(root_dir + f"/torch_proc_submission{mode}.pkl.gz", "rb") as f:
submission = TensorDataset(*pickle.load(f))
else:
with bz2.open(root_dir + "/torch_proc_submission.pkl.bz2", "rb") as f:
submission = TensorDataset(*pickle.load(f))
return submission
|
model_hub/examples/huggingface/question-answering/qa_utils.py | gh-determined-ai/determined | 1,729 | 11082392 | <filename>model_hub/examples/huggingface/question-answering/qa_utils.py
import numpy as np
import model_hub.huggingface as hf
import model_hub.utils as utils
def compute_metrics(
data_config,
column_names,
post_processing_function,
raw_datasets,
tokenized_datasets,
model,
metric,
predictions,
):
inds, predictions = zip(*predictions)
inds = np.hstack(inds)
sorted_inds = np.argsort(inds)
predictions = zip(*predictions)
predictions = [utils.expand_like(p) for p in predictions]
predictions = [p[sorted_inds] for p in predictions]
# We need to add back in columns needed for validation.
tokenized_datasets["validation"].set_format(
type=tokenized_datasets["validation"].format["type"],
columns=list(tokenized_datasets["validation"].features.keys()),
)
output = post_processing_function(
examples=raw_datasets["validation"],
features=tokenized_datasets["validation"],
predictions=predictions,
data_args=data_config,
column_names=column_names,
prefix="eval",
model=model,
)
result = metric.compute(predictions=output.predictions, references=output.label_ids)
# Then remove them again so that data collation doesn't break.
hf.remove_unused_columns(model, tokenized_datasets["validation"])
return result
class DatasetWithIndex:
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
sample = self.dataset[idx]
sample["ind"] = idx
return sample
|
checkov/cloudformation/checks/resource/aws/S3IgnorePublicACLs.py | kylelaker/checkov | 4,013 | 11082403 | <gh_stars>1000+
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class S3IgnorePublicACLs(BaseResourceValueCheck):
def __init__(self):
name = "Ensure S3 bucket has ignore public ACLs enabled"
id = "CKV_AWS_55"
supported_resources = ['AWS::S3::Bucket']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'Properties/PublicAccessBlockConfiguration/IgnorePublicAcls'
check = S3IgnorePublicACLs()
|
examples/scrollbar.py | nano-labs/bullet | 2,962 | 11082429 | from bullet import ScrollBar
from bullet import emojis
from bullet import colors
cli = ScrollBar(
"How are you feeling today? ",
emojis.feelings[0],
height = 5,
align = 5,
margin = 0,
pointer = "👉",
background_on_switch = colors.background['default'],
word_on_switch = colors.foreground['default'],
return_index = True
)
print('\n')
result = cli.launch()
print(result) |
report/csv_to_html.py | ysh329/mobile-ai-bench | 313 | 11082478 | import os
import pandas as pd
csvs = ['run_report', 'prepare_report', 'precision_report']
for csv in csvs:
csv_file = 'output/' + csv + '.csv'
if os.path.exists(csv_file):
df = pd.read_csv('output/' + csv + '.csv')
df.to_html('report/' + csv + '.html')
dir_path = os.path.dirname(os.path.realpath(__file__))
print("Open %s/index.html in a browser to see the report!" % dir_path)
|
external/rocksdb/tools/block_cache_analyzer/block_cache_pysim.py | cashbitecrypto/cashbite | 12,278 | 11082523 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import gc
import heapq
import random
import sys
import time
from collections import OrderedDict
from os import path
import numpy as np
kSampleSize = 64 # The sample size used when performing eviction.
kMicrosInSecond = 1000000
kSecondsInMinute = 60
kSecondsInHour = 3600
class TraceRecord:
"""
A trace record represents a block access.
It holds the same struct as BlockCacheTraceRecord in
trace_replay/block_cache_tracer.h
"""
def __init__(
self,
access_time,
block_id,
block_type,
block_size,
cf_id,
cf_name,
level,
fd,
caller,
no_insert,
get_id,
key_id,
kv_size,
is_hit,
referenced_key_exist_in_block,
num_keys_in_block,
table_id,
seq_number,
block_key_size,
key_size,
block_offset_in_file,
next_access_seq_no,
):
self.access_time = access_time
self.block_id = block_id
self.block_type = block_type
self.block_size = block_size + block_key_size
self.cf_id = cf_id
self.cf_name = cf_name
self.level = level
self.fd = fd
self.caller = caller
if no_insert == 1:
self.no_insert = True
else:
self.no_insert = False
self.get_id = get_id
self.key_id = key_id
self.kv_size = kv_size
if is_hit == 1:
self.is_hit = True
else:
self.is_hit = False
if referenced_key_exist_in_block == 1:
self.referenced_key_exist_in_block = True
else:
self.referenced_key_exist_in_block = False
self.num_keys_in_block = num_keys_in_block
self.table_id = table_id
self.seq_number = seq_number
self.block_key_size = block_key_size
self.key_size = key_size
self.block_offset_in_file = block_offset_in_file
self.next_access_seq_no = next_access_seq_no
class CacheEntry:
"""A cache entry stored in the cache."""
def __init__(
self,
value_size,
cf_id,
level,
block_type,
table_id,
access_number,
time_s,
num_hits=0,
):
self.value_size = value_size
self.last_access_number = access_number
self.num_hits = num_hits
self.cf_id = 0
self.level = level
self.block_type = block_type
self.last_access_time = time_s
self.insertion_time = time_s
self.table_id = table_id
def __repr__(self):
"""Debug string."""
return "(s={},last={},hits={},cf={},l={},bt={})\n".format(
self.value_size,
self.last_access_number,
self.num_hits,
self.cf_id,
self.level,
self.block_type,
)
def cost_class(self, cost_class_label):
if cost_class_label == "table_bt":
return "{}-{}".format(self.table_id, self.block_type)
elif cost_class_label == "table":
return "{}".format(self.table_id)
elif cost_class_label == "bt":
return "{}".format(self.block_type)
elif cost_class_label == "cf":
return "{}".format(self.cf_id)
elif cost_class_label == "cf_bt":
return "{}-{}".format(self.cf_id, self.block_type)
elif cost_class_label == "table_level_bt":
return "{}-{}-{}".format(self.table_id, self.level, self.block_type)
assert False, "Unknown cost class label {}".format(cost_class_label)
return None
class HashEntry:
"""A hash entry stored in a hash table."""
def __init__(self, key, hash, value):
self.key = key
self.hash = hash
self.value = value
def __repr__(self):
return "k={},h={},v=[{}]".format(self.key, self.hash, self.value)
class HashTable:
"""
A custom implementation of hash table to support fast random sampling.
It is closed hashing and uses chaining to resolve hash conflicts.
It grows/shrinks the hash table upon insertion/deletion to support
fast lookups and random samplings.
"""
def __init__(self):
self.initial_size = 32
self.table = [None] * self.initial_size
self.elements = 0
def random_sample(self, sample_size):
"""Randomly sample 'sample_size' hash entries from the table."""
samples = []
index = random.randint(0, len(self.table) - 1)
pos = index
# Starting from index, adding hash entries to the sample list until
# sample_size is met or we ran out of entries.
while True:
if self.table[pos] is not None:
for i in range(len(self.table[pos])):
if self.table[pos][i] is None:
continue
samples.append(self.table[pos][i])
if len(samples) == sample_size:
break
pos += 1
pos = pos % len(self.table)
if pos == index or len(samples) == sample_size:
break
assert len(samples) <= sample_size
return samples
def __repr__(self):
all_entries = []
for i in range(len(self.table)):
if self.table[i] is None:
continue
for j in range(len(self.table[i])):
if self.table[i][j] is not None:
all_entries.append(self.table[i][j])
return "{}".format(all_entries)
def values(self):
all_values = []
for i in range(len(self.table)):
if self.table[i] is None:
continue
for j in range(len(self.table[i])):
if self.table[i][j] is not None:
all_values.append(self.table[i][j].value)
return all_values
def __len__(self):
return self.elements
def insert(self, key, hash, value):
"""
Insert a hash entry in the table. Replace the old entry if it already
exists.
"""
self.grow()
inserted = False
index = hash % len(self.table)
if self.table[index] is None:
self.table[index] = []
# Search for the entry first.
for i in range(len(self.table[index])):
if self.table[index][i] is None:
continue
if self.table[index][i].hash == hash and self.table[index][i].key == key:
# The entry already exists in the table.
self.table[index][i] = HashEntry(key, hash, value)
return
# Find an empty slot.
for i in range(len(self.table[index])):
if self.table[index][i] is None:
self.table[index][i] = HashEntry(key, hash, value)
inserted = True
break
if not inserted:
self.table[index].append(HashEntry(key, hash, value))
self.elements += 1
def resize(self, new_size):
if new_size == len(self.table):
return
if new_size < self.initial_size:
return
if self.elements < 100:
return
new_table = [None] * new_size
# Copy 'self.table' to new_table.
for i in range(len(self.table)):
entries = self.table[i]
if entries is None:
continue
for j in range(len(entries)):
if entries[j] is None:
continue
index = entries[j].hash % new_size
if new_table[index] is None:
new_table[index] = []
new_table[index].append(entries[j])
self.table = new_table
del new_table
# Manually call python gc here to free the memory as 'self.table'
# might be very large.
gc.collect()
def grow(self):
if self.elements < 4 * len(self.table):
return
new_size = int(len(self.table) * 1.5)
self.resize(new_size)
def delete(self, key, hash):
index = hash % len(self.table)
deleted = False
deleted_entry = None
if self.table[index] is None:
return
for i in range(len(self.table[index])):
if (
self.table[index][i] is not None
and self.table[index][i].hash == hash
and self.table[index][i].key == key
):
deleted_entry = self.table[index][i]
self.table[index][i] = None
self.elements -= 1
deleted = True
break
if deleted:
self.shrink()
return deleted_entry
def shrink(self):
if self.elements * 2 >= len(self.table):
return
new_size = int(len(self.table) * 0.7)
self.resize(new_size)
def lookup(self, key, hash):
index = hash % len(self.table)
if self.table[index] is None:
return None
for i in range(len(self.table[index])):
if (
self.table[index][i] is not None
and self.table[index][i].hash == hash
and self.table[index][i].key == key
):
return self.table[index][i].value
return None
class MissRatioStats:
def __init__(self, time_unit):
self.num_misses = 0
self.num_accesses = 0
self.time_unit = time_unit
self.time_misses = {}
self.time_miss_bytes = {}
self.time_accesses = {}
def update_metrics(self, access_time, is_hit, miss_bytes):
access_time /= kMicrosInSecond * self.time_unit
self.num_accesses += 1
if access_time not in self.time_accesses:
self.time_accesses[access_time] = 0
self.time_accesses[access_time] += 1
if not is_hit:
self.num_misses += 1
if access_time not in self.time_misses:
self.time_misses[access_time] = 0
self.time_miss_bytes[access_time] = 0
self.time_misses[access_time] += 1
self.time_miss_bytes[access_time] += miss_bytes
def reset_counter(self):
self.num_misses = 0
self.num_accesses = 0
self.time_miss_bytes.clear()
self.time_misses.clear()
self.time_accesses.clear()
def compute_miss_bytes(self):
miss_bytes = []
for at in self.time_miss_bytes:
miss_bytes.append(self.time_miss_bytes[at])
miss_bytes = sorted(miss_bytes)
avg_miss_bytes = 0
p95_miss_bytes = 0
for i in range(len(miss_bytes)):
avg_miss_bytes += float(miss_bytes[i]) / float(len(miss_bytes))
p95_index = min(int(0.95 * float(len(miss_bytes))), len(miss_bytes) - 1)
p95_miss_bytes = miss_bytes[p95_index]
return avg_miss_bytes, p95_miss_bytes
def miss_ratio(self):
return float(self.num_misses) * 100.0 / float(self.num_accesses)
def write_miss_timeline(
self, cache_type, cache_size, target_cf_name, result_dir, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-miss-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-miss-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
row = "{}".format(cache_type)
for trace_time in range(start, end):
row += ",{}".format(self.time_misses.get(trace_time, 0))
file.write(row + "\n")
def write_miss_ratio_timeline(
self, cache_type, cache_size, target_cf_name, result_dir, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-miss-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-miss-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
row = "{}".format(cache_type)
for trace_time in range(start, end):
naccesses = self.time_accesses.get(trace_time, 0)
miss_ratio = 0
if naccesses > 0:
miss_ratio = float(
self.time_misses.get(trace_time, 0) * 100.0
) / float(naccesses)
row += ",{0:.2f}".format(miss_ratio)
file.write(row + "\n")
class PolicyStats:
def __init__(self, time_unit, policies):
self.time_selected_polices = {}
self.time_accesses = {}
self.policy_names = {}
self.time_unit = time_unit
for i in range(len(policies)):
self.policy_names[i] = policies[i].policy_name()
def update_metrics(self, access_time, selected_policy):
access_time /= kMicrosInSecond * self.time_unit
if access_time not in self.time_accesses:
self.time_accesses[access_time] = 0
self.time_accesses[access_time] += 1
if access_time not in self.time_selected_polices:
self.time_selected_polices[access_time] = {}
policy_name = self.policy_names[selected_policy]
if policy_name not in self.time_selected_polices[access_time]:
self.time_selected_polices[access_time][policy_name] = 0
self.time_selected_polices[access_time][policy_name] += 1
def write_policy_timeline(
self, cache_type, cache_size, target_cf_name, result_dir, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-policy-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-policy-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
for policy in self.policy_names:
policy_name = self.policy_names[policy]
row = "{}-{}".format(cache_type, policy_name)
for trace_time in range(start, end):
row += ",{}".format(
self.time_selected_polices.get(trace_time, {}).get(
policy_name, 0
)
)
file.write(row + "\n")
def write_policy_ratio_timeline(
self, cache_type, cache_size, target_cf_name, file_path, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-policy-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-policy-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
for policy in self.policy_names:
policy_name = self.policy_names[policy]
row = "{}-{}".format(cache_type, policy_name)
for trace_time in range(start, end):
naccesses = self.time_accesses.get(trace_time, 0)
ratio = 0
if naccesses > 0:
ratio = float(
self.time_selected_polices.get(trace_time, {}).get(
policy_name, 0
)
* 100.0
) / float(naccesses)
row += ",{0:.2f}".format(ratio)
file.write(row + "\n")
class Policy(object):
"""
A policy maintains a set of evicted keys. It returns a reward of one to
itself if it has not evicted a missing key. Otherwise, it gives itself 0
reward.
"""
def __init__(self):
self.evicted_keys = {}
def evict(self, key, max_size):
self.evicted_keys[key] = 0
def delete(self, key):
self.evicted_keys.pop(key, None)
def prioritize_samples(self, samples, auxilliary_info):
raise NotImplementedError
def policy_name(self):
raise NotImplementedError
def generate_reward(self, key):
if key in self.evicted_keys:
return 0
return 1
class LRUPolicy(Policy):
def prioritize_samples(self, samples, auxilliary_info):
return sorted(
samples,
cmp=lambda e1, e2: e1.value.last_access_number
- e2.value.last_access_number,
)
def policy_name(self):
return "lru"
class MRUPolicy(Policy):
def prioritize_samples(self, samples, auxilliary_info):
return sorted(
samples,
cmp=lambda e1, e2: e2.value.last_access_number
- e1.value.last_access_number,
)
def policy_name(self):
return "mru"
class LFUPolicy(Policy):
def prioritize_samples(self, samples, auxilliary_info):
return sorted(samples, cmp=lambda e1, e2: e1.value.num_hits - e2.value.num_hits)
def policy_name(self):
return "lfu"
class HyperbolicPolicy(Policy):
"""
An implementation of Hyperbolic caching.
<NAME>, <NAME>, and <NAME>. 2017.
Hyperbolic caching: flexible caching for web applications. In Proceedings
of the 2017 USENIX Conference on Usenix Annual Technical Conference
(USENIX ATC '17). USENIX Association, Berkeley, CA, USA, 499-511.
"""
def compare(self, e1, e2, now):
e1_duration = max(0, (now - e1.value.insertion_time) / kMicrosInSecond) * float(
e1.value.value_size
)
e2_duration = max(0, (now - e2.value.insertion_time) / kMicrosInSecond) * float(
e2.value.value_size
)
if e1_duration == e2_duration:
return e1.value.num_hits - e2.value.num_hits
if e1_duration == 0:
return 1
if e2_duration == 0:
return 1
diff = (float(e1.value.num_hits) / (float(e1_duration))) - (
float(e2.value.num_hits) / float(e2_duration)
)
if diff == 0:
return 0
elif diff > 0:
return 1
else:
return -1
def prioritize_samples(self, samples, auxilliary_info):
assert len(auxilliary_info) == 3
now = auxilliary_info[0]
return sorted(samples, cmp=lambda e1, e2: self.compare(e1, e2, now))
def policy_name(self):
return "hb"
class CostClassPolicy(Policy):
"""
We calculate the hit density of a cost class as
number of hits / total size in cache * average duration in the cache.
An entry has a higher priority if its class's hit density is higher.
"""
def compare(self, e1, e2, now, cost_classes, cost_class_label):
e1_class = e1.value.cost_class(cost_class_label)
e2_class = e2.value.cost_class(cost_class_label)
assert e1_class in cost_classes
assert e2_class in cost_classes
e1_entry = cost_classes[e1_class]
e2_entry = cost_classes[e2_class]
e1_density = e1_entry.density(now)
e2_density = e2_entry.density(now)
e1_hits = cost_classes[e1_class].hits
e2_hits = cost_classes[e2_class].hits
if e1_density == e2_density:
return e1_hits - e2_hits
if e1_entry.num_entries_in_cache == 0:
return -1
if e2_entry.num_entries_in_cache == 0:
return 1
if e1_density == 0:
return 1
if e2_density == 0:
return -1
diff = (float(e1_hits) / float(e1_density)) - (
float(e2_hits) / float(e2_density)
)
if diff == 0:
return 0
elif diff > 0:
return 1
else:
return -1
def prioritize_samples(self, samples, auxilliary_info):
assert len(auxilliary_info) == 3
now = auxilliary_info[0]
cost_classes = auxilliary_info[1]
cost_class_label = auxilliary_info[2]
return sorted(
samples,
cmp=lambda e1, e2: self.compare(
e1, e2, now, cost_classes, cost_class_label
),
)
def policy_name(self):
return "cc"
class Cache(object):
"""
This is the base class for the implementations of alternative cache
replacement policies.
"""
def __init__(self, cache_size, enable_cache_row_key):
self.cache_size = cache_size
self.used_size = 0
self.per_second_miss_ratio_stats = MissRatioStats(1)
self.miss_ratio_stats = MissRatioStats(kSecondsInMinute)
self.per_hour_miss_ratio_stats = MissRatioStats(kSecondsInHour)
# 0: disabled. 1: enabled. Insert both row and the refereneced data block.
# 2: enabled. Insert only the row but NOT the referenced data block.
self.enable_cache_row_key = enable_cache_row_key
self.get_id_row_key_map = {}
self.max_seen_get_id = 0
self.retain_get_id_range = 100000
def block_key(self, trace_record):
return "b{}".format(trace_record.block_id)
def row_key(self, trace_record):
return "g{}-{}".format(trace_record.fd, trace_record.key_id)
def _lookup(self, trace_record, key, hash):
"""
Look up the key in the cache.
Returns true upon a cache hit, false otherwise.
"""
raise NotImplementedError
def _evict(self, trace_record, key, hash, value_size):
"""
Evict entries in the cache until there is enough room to insert the new
entry with 'value_size'.
"""
raise NotImplementedError
def _insert(self, trace_record, key, hash, value_size):
"""
Insert the new entry into the cache.
"""
raise NotImplementedError
def _should_admit(self, trace_record, key, hash, value_size):
"""
A custom admission policy to decide whether we should admit the new
entry upon a cache miss.
Returns true if the new entry should be admitted, false otherwise.
"""
raise NotImplementedError
def cache_name(self):
"""
The name of the replacement policy.
"""
raise NotImplementedError
def is_ml_cache(self):
return False
def _update_stats(self, access_time, is_hit, miss_bytes):
self.per_second_miss_ratio_stats.update_metrics(access_time, is_hit, miss_bytes)
self.miss_ratio_stats.update_metrics(access_time, is_hit, miss_bytes)
self.per_hour_miss_ratio_stats.update_metrics(access_time, is_hit, miss_bytes)
def access(self, trace_record):
"""
Access a trace record. The simulator calls this function to access a
trace record.
"""
assert self.used_size <= self.cache_size
if (
self.enable_cache_row_key > 0
and trace_record.caller == 1
and trace_record.key_id != 0
and trace_record.get_id != 0
):
# This is a get request.
self._access_row(trace_record)
return
is_hit = self._access_kv(
trace_record,
self.block_key(trace_record),
trace_record.block_id,
trace_record.block_size,
trace_record.no_insert,
)
self._update_stats(
trace_record.access_time, is_hit=is_hit, miss_bytes=trace_record.block_size
)
def _access_row(self, trace_record):
row_key = self.row_key(trace_record)
self.max_seen_get_id = max(self.max_seen_get_id, trace_record.get_id)
self.get_id_row_key_map.pop(
self.max_seen_get_id - self.retain_get_id_range, None
)
if trace_record.get_id not in self.get_id_row_key_map:
self.get_id_row_key_map[trace_record.get_id] = {}
self.get_id_row_key_map[trace_record.get_id]["h"] = False
if self.get_id_row_key_map[trace_record.get_id]["h"]:
# We treat future accesses as hits since this get request
# completes.
# print("row hit 1")
self._update_stats(trace_record.access_time, is_hit=True, miss_bytes=0)
return
if row_key not in self.get_id_row_key_map[trace_record.get_id]:
# First time seen this key.
is_hit = self._access_kv(
trace_record,
key=row_key,
hash=trace_record.key_id,
value_size=trace_record.kv_size,
no_insert=False,
)
inserted = False
if trace_record.kv_size > 0:
inserted = True
self.get_id_row_key_map[trace_record.get_id][row_key] = inserted
self.get_id_row_key_map[trace_record.get_id]["h"] = is_hit
if self.get_id_row_key_map[trace_record.get_id]["h"]:
# We treat future accesses as hits since this get request
# completes.
# print("row hit 2")
self._update_stats(trace_record.access_time, is_hit=True, miss_bytes=0)
return
# Access its blocks.
no_insert = trace_record.no_insert
if (
self.enable_cache_row_key == 2
and trace_record.kv_size > 0
and trace_record.block_type == 9
):
no_insert = True
is_hit = self._access_kv(
trace_record,
key=self.block_key(trace_record),
hash=trace_record.block_id,
value_size=trace_record.block_size,
no_insert=no_insert,
)
self._update_stats(
trace_record.access_time, is_hit, miss_bytes=trace_record.block_size
)
if (
trace_record.kv_size > 0
and not self.get_id_row_key_map[trace_record.get_id][row_key]
):
# Insert the row key-value pair.
self._access_kv(
trace_record,
key=row_key,
hash=trace_record.key_id,
value_size=trace_record.kv_size,
no_insert=False,
)
# Mark as inserted.
self.get_id_row_key_map[trace_record.get_id][row_key] = True
def _access_kv(self, trace_record, key, hash, value_size, no_insert):
# Sanity checks.
assert self.used_size <= self.cache_size
if self._lookup(trace_record, key, hash):
# A cache hit.
return True
if no_insert or value_size <= 0:
return False
# A cache miss.
if value_size > self.cache_size:
# The block is too large to fit into the cache.
return False
self._evict(trace_record, key, hash, value_size)
if self._should_admit(trace_record, key, hash, value_size):
self._insert(trace_record, key, hash, value_size)
self.used_size += value_size
return False
class CostClassEntry:
"""
A cost class maintains aggregated statistics of cached entries in a class.
For example, we may define block type as a class. Then, cached blocks of the
same type will share one cost class entry.
"""
def __init__(self):
self.hits = 0
self.num_entries_in_cache = 0
self.size_in_cache = 0
self.sum_insertion_times = 0
self.sum_last_access_time = 0
def insert(self, trace_record, key, value_size):
self.size_in_cache += value_size
self.num_entries_in_cache += 1
self.sum_insertion_times += trace_record.access_time / kMicrosInSecond
self.sum_last_access_time += trace_record.access_time / kMicrosInSecond
def remove(self, insertion_time, last_access_time, key, value_size, num_hits):
self.hits -= num_hits
self.num_entries_in_cache -= 1
self.sum_insertion_times -= insertion_time / kMicrosInSecond
self.size_in_cache -= value_size
self.sum_last_access_time -= last_access_time / kMicrosInSecond
def update_on_hit(self, trace_record, last_access_time):
self.hits += 1
self.sum_last_access_time -= last_access_time / kMicrosInSecond
self.sum_last_access_time += trace_record.access_time / kMicrosInSecond
def avg_lifetime_in_cache(self, now):
avg_insertion_time = self.sum_insertion_times / self.num_entries_in_cache
return now / kMicrosInSecond - avg_insertion_time
def avg_last_access_time(self):
if self.num_entries_in_cache == 0:
return 0
return float(self.sum_last_access_time) / float(self.num_entries_in_cache)
def avg_size(self):
if self.num_entries_in_cache == 0:
return 0
return float(self.sum_last_access_time) / float(self.num_entries_in_cache)
def density(self, now):
avg_insertion_time = self.sum_insertion_times / self.num_entries_in_cache
in_cache_duration = now / kMicrosInSecond - avg_insertion_time
return self.size_in_cache * in_cache_duration
class MLCache(Cache):
"""
MLCache is the base class for implementations of alternative replacement
policies using reinforcement learning.
"""
def __init__(self, cache_size, enable_cache_row_key, policies, cost_class_label):
super(MLCache, self).__init__(cache_size, enable_cache_row_key)
self.table = HashTable()
self.policy_stats = PolicyStats(kSecondsInMinute, policies)
self.per_hour_policy_stats = PolicyStats(kSecondsInHour, policies)
self.policies = policies
self.cost_classes = {}
self.cost_class_label = cost_class_label
def is_ml_cache(self):
return True
def _lookup(self, trace_record, key, hash):
value = self.table.lookup(key, hash)
if value is not None:
# Update the entry's cost class statistics.
if self.cost_class_label is not None:
cost_class = value.cost_class(self.cost_class_label)
assert cost_class in self.cost_classes
self.cost_classes[cost_class].update_on_hit(
trace_record, value.last_access_time
)
# Update the entry's last access time.
self.table.insert(
key,
hash,
CacheEntry(
value_size=value.value_size,
cf_id=value.cf_id,
level=value.level,
block_type=value.block_type,
table_id=value.table_id,
access_number=self.miss_ratio_stats.num_accesses,
time_s=trace_record.access_time,
num_hits=value.num_hits + 1,
),
)
return True
return False
def _evict(self, trace_record, key, hash, value_size):
# Select a policy, random sample kSampleSize keys from the cache, then
# evict keys in the sample set until we have enough room for the new
# entry.
policy_index = self._select_policy(trace_record, key)
assert policy_index < len(self.policies) and policy_index >= 0
self.policies[policy_index].delete(key)
self.policy_stats.update_metrics(trace_record.access_time, policy_index)
self.per_hour_policy_stats.update_metrics(
trace_record.access_time, policy_index
)
while self.used_size + value_size > self.cache_size:
# Randomly sample n entries.
samples = self.table.random_sample(kSampleSize)
samples = self.policies[policy_index].prioritize_samples(
samples,
[trace_record.access_time, self.cost_classes, self.cost_class_label],
)
for hash_entry in samples:
assert self.table.delete(hash_entry.key, hash_entry.hash) is not None
self.used_size -= hash_entry.value.value_size
self.policies[policy_index].evict(
key=hash_entry.key, max_size=self.table.elements
)
# Update the entry's cost class statistics.
if self.cost_class_label is not None:
cost_class = hash_entry.value.cost_class(self.cost_class_label)
assert cost_class in self.cost_classes
self.cost_classes[cost_class].remove(
hash_entry.value.insertion_time,
hash_entry.value.last_access_time,
key,
hash_entry.value.value_size,
hash_entry.value.num_hits,
)
if self.used_size + value_size <= self.cache_size:
break
def _insert(self, trace_record, key, hash, value_size):
assert self.used_size + value_size <= self.cache_size
entry = CacheEntry(
value_size,
trace_record.cf_id,
trace_record.level,
trace_record.block_type,
trace_record.table_id,
self.miss_ratio_stats.num_accesses,
trace_record.access_time,
)
# Update the entry's cost class statistics.
if self.cost_class_label is not None:
cost_class = entry.cost_class(self.cost_class_label)
if cost_class not in self.cost_classes:
self.cost_classes[cost_class] = CostClassEntry()
self.cost_classes[cost_class].insert(trace_record, key, value_size)
self.table.insert(key, hash, entry)
def _should_admit(self, trace_record, key, hash, value_size):
return True
def _select_policy(self, trace_record, key):
raise NotImplementedError
class ThompsonSamplingCache(MLCache):
"""
An implementation of Thompson Sampling for the Bernoulli Bandit.
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. 2018. A Tutorial on Thompson Sampling. Found.
Trends Mach. Learn. 11, 1 (July 2018), 1-96.
DOI: https://doi.org/10.1561/2200000070
"""
def __init__(
self,
cache_size,
enable_cache_row_key,
policies,
cost_class_label,
init_a=1,
init_b=1,
):
super(ThompsonSamplingCache, self).__init__(
cache_size, enable_cache_row_key, policies, cost_class_label
)
self._as = {}
self._bs = {}
for _i in range(len(policies)):
self._as = [init_a] * len(self.policies)
self._bs = [init_b] * len(self.policies)
def _select_policy(self, trace_record, key):
if len(self.policies) == 1:
return 0
samples = [
np.random.beta(self._as[x], self._bs[x]) for x in range(len(self.policies))
]
selected_policy = max(range(len(self.policies)), key=lambda x: samples[x])
reward = self.policies[selected_policy].generate_reward(key)
assert reward <= 1 and reward >= 0
self._as[selected_policy] += reward
self._bs[selected_policy] += 1 - reward
return selected_policy
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid ThompsonSampling with cost class {} (ts_hybrid)".format(
self.cost_class_label
)
return "ThompsonSampling with cost class {} (ts)".format(self.cost_class_label)
class LinUCBCache(MLCache):
"""
An implementation of LinUCB with disjoint linear models.
<NAME>, <NAME>, <NAME>, and <NAME>. 2010.
A contextual-bandit approach to personalized news article recommendation.
In Proceedings of the 19th international conference on World wide web
(WWW '10). ACM, New York, NY, USA, 661-670.
DOI=http://dx.doi.org/10.1145/1772690.1772758
"""
def __init__(self, cache_size, enable_cache_row_key, policies, cost_class_label):
super(LinUCBCache, self).__init__(
cache_size, enable_cache_row_key, policies, cost_class_label
)
self.nfeatures = 4 # Block type, level, cf.
self.th = np.zeros((len(self.policies), self.nfeatures))
self.eps = 0.2
self.b = np.zeros_like(self.th)
self.A = np.zeros((len(self.policies), self.nfeatures, self.nfeatures))
self.A_inv = np.zeros((len(self.policies), self.nfeatures, self.nfeatures))
for i in range(len(self.policies)):
self.A[i] = np.identity(self.nfeatures)
self.th_hat = np.zeros_like(self.th)
self.p = np.zeros(len(self.policies))
self.alph = 0.2
def _select_policy(self, trace_record, key):
if len(self.policies) == 1:
return 0
x_i = np.zeros(self.nfeatures) # The current context vector
x_i[0] = trace_record.block_type
x_i[1] = trace_record.level
x_i[2] = trace_record.cf_id
p = np.zeros(len(self.policies))
for a in range(len(self.policies)):
self.th_hat[a] = self.A_inv[a].dot(self.b[a])
ta = x_i.dot(self.A_inv[a]).dot(x_i)
a_upper_ci = self.alph * np.sqrt(ta)
a_mean = self.th_hat[a].dot(x_i)
p[a] = a_mean + a_upper_ci
p = p + (np.random.random(len(p)) * 0.000001)
selected_policy = p.argmax()
reward = self.policies[selected_policy].generate_reward(key)
assert reward <= 1 and reward >= 0
self.A[selected_policy] += np.outer(x_i, x_i)
self.b[selected_policy] += reward * x_i
self.A_inv[selected_policy] = np.linalg.inv(self.A[selected_policy])
del x_i
return selected_policy
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid LinUCB with cost class {} (linucb_hybrid)".format(
self.cost_class_label
)
return "LinUCB with cost class {} (linucb)".format(self.cost_class_label)
class OPTCacheEntry:
"""
A cache entry for the OPT algorithm. The entries are sorted based on its
next access sequence number in reverse order, i.e., the entry which next
access is the furthest in the future is ordered before other entries.
"""
def __init__(self, key, next_access_seq_no, value_size):
self.key = key
self.next_access_seq_no = next_access_seq_no
self.value_size = value_size
self.is_removed = False
def __cmp__(self, other):
if other.next_access_seq_no != self.next_access_seq_no:
return other.next_access_seq_no - self.next_access_seq_no
return self.value_size - other.value_size
def __repr__(self):
return "({} {} {} {})".format(
self.key, self.next_access_seq_no, self.value_size, self.is_removed
)
class PQTable:
"""
A hash table with a priority queue.
"""
def __init__(self):
# A list of entries arranged in a heap sorted based on the entry custom
# implementation of __cmp__
self.pq = []
self.table = {}
def pqinsert(self, entry):
"Add a new key or update the priority of an existing key"
# Remove the entry from the table first.
removed_entry = self.table.pop(entry.key, None)
if removed_entry:
# Mark as removed since there is no 'remove' API in heappq.
# Instead, an entry in pq is removed lazily when calling pop.
removed_entry.is_removed = True
self.table[entry.key] = entry
heapq.heappush(self.pq, entry)
return removed_entry
def pqpop(self):
while self.pq:
entry = heapq.heappop(self.pq)
if not entry.is_removed:
del self.table[entry.key]
return entry
return None
def pqpeek(self):
while self.pq:
entry = self.pq[0]
if not entry.is_removed:
return entry
heapq.heappop(self.pq)
return
def __contains__(self, k):
return k in self.table
def __getitem__(self, k):
return self.table[k]
def __len__(self):
return len(self.table)
def values(self):
return self.table.values()
class OPTCache(Cache):
"""
An implementation of the Belady MIN algorithm. OPTCache evicts an entry
in the cache whose next access occurs furthest in the future.
Note that Belady MIN algorithm is optimal assuming all blocks having the
same size and a missing entry will be inserted in the cache.
These are NOT true for the block cache trace since blocks have different
sizes and we may not insert a block into the cache upon a cache miss.
However, it is still useful to serve as a "theoretical upper bound" on the
lowest miss ratio we can achieve given a cache size.
<NAME>. 1966. A Study of Replacement Algorithms for a
Virtual-storage Computer. IBM Syst. J. 5, 2 (June 1966), 78-101.
DOI=http://dx.doi.org/10.1147/sj.52.0078
"""
def __init__(self, cache_size):
super(OPTCache, self).__init__(cache_size, enable_cache_row_key=0)
self.table = PQTable()
def _lookup(self, trace_record, key, hash):
if key not in self.table:
return False
# A cache hit. Update its next access time.
assert (
self.table.pqinsert(
OPTCacheEntry(
key, trace_record.next_access_seq_no, self.table[key].value_size
)
)
is not None
)
return True
def _evict(self, trace_record, key, hash, value_size):
while self.used_size + value_size > self.cache_size:
evict_entry = self.table.pqpop()
assert evict_entry is not None
self.used_size -= evict_entry.value_size
def _insert(self, trace_record, key, hash, value_size):
assert (
self.table.pqinsert(
OPTCacheEntry(key, trace_record.next_access_seq_no, value_size)
)
is None
)
def _should_admit(self, trace_record, key, hash, value_size):
return True
def cache_name(self):
return "Belady MIN (opt)"
class GDSizeEntry:
"""
A cache entry for the greedy dual size replacement policy.
"""
def __init__(self, key, value_size, priority):
self.key = key
self.value_size = value_size
self.priority = priority
self.is_removed = False
def __cmp__(self, other):
if other.priority != self.priority:
return self.priority - other.priority
return self.value_size - other.value_size
def __repr__(self):
return "({} {} {} {})".format(
self.key, self.next_access_seq_no, self.value_size, self.is_removed
)
class GDSizeCache(Cache):
"""
An implementation of the greedy dual size algorithm.
We define cost as an entry's size.
See https://www.usenix.org/legacy/publications/library/proceedings/usits97/full_papers/cao/cao_html/node8.html
and <NAME>. The k-server dual and loose competitiveness for paging.
Algorithmica,June 1994, vol. 11,(no.6):525-41.
Rewritten version of ''On-line caching as cache size varies'',
in The 2nd Annual ACM-SIAM Symposium on Discrete Algorithms, 241-250, 1991.
"""
def __init__(self, cache_size, enable_cache_row_key):
super(GDSizeCache, self).__init__(cache_size, enable_cache_row_key)
self.table = PQTable()
self.L = 0.0
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid GreedyDualSize (gdsize_hybrid)"
return "GreedyDualSize (gdsize)"
def _lookup(self, trace_record, key, hash):
if key not in self.table:
return False
# A cache hit. Update its priority.
entry = self.table[key]
assert (
self.table.pqinsert(
GDSizeEntry(key, entry.value_size, self.L + entry.value_size)
)
is not None
)
return True
def _evict(self, trace_record, key, hash, value_size):
while self.used_size + value_size > self.cache_size:
evict_entry = self.table.pqpop()
assert evict_entry is not None
self.L = evict_entry.priority
self.used_size -= evict_entry.value_size
def _insert(self, trace_record, key, hash, value_size):
assert (
self.table.pqinsert(GDSizeEntry(key, value_size, self.L + value_size))
is None
)
def _should_admit(self, trace_record, key, hash, value_size):
return True
class Deque(object):
"""A Deque class facilitates the implementation of LRU and ARC."""
def __init__(self):
self.od = OrderedDict()
def appendleft(self, k):
if k in self.od:
del self.od[k]
self.od[k] = None
def pop(self):
item = self.od.popitem(last=False) if self.od else None
if item is not None:
return item[0]
return None
def remove(self, k):
del self.od[k]
def __len__(self):
return len(self.od)
def __contains__(self, k):
return k in self.od
def __iter__(self):
return reversed(self.od)
def __repr__(self):
return "Deque(%r)" % (list(self),)
class ARCCache(Cache):
"""
An implementation of ARC. ARC assumes that all blocks are having the
same size. The size of index and filter blocks are variable. To accommodate
this, we modified ARC as follows:
1) We use 16 KB as the average block size and calculate the number of blocks
(c) in the cache.
2) When we insert an entry, the cache evicts entries in both t1 and t2
queues until it has enough space for the new entry. This also requires
modification of the algorithm to maintain a maximum of 2*c blocks.
<NAME> and <NAME>. 2003. ARC: A Self-Tuning, Low
Overhead Replacement Cache. In Proceedings of the 2nd USENIX Conference on
File and Storage Technologies (FAST '03). USENIX Association, Berkeley, CA,
USA, 115-130.
"""
def __init__(self, cache_size, enable_cache_row_key):
super(ARCCache, self).__init__(cache_size, enable_cache_row_key)
self.table = {}
self.c = cache_size / 16 * 1024 # Number of elements in the cache.
self.p = 0 # Target size for the list T1
# L1: only once recently
self.t1 = Deque() # T1: recent cache entries
self.b1 = Deque() # B1: ghost entries recently evicted from the T1 cache
# L2: at least twice recently
self.t2 = Deque() # T2: frequent entries
self.b2 = Deque() # B2: ghost entries recently evicted from the T2 cache
def _replace(self, key, value_size):
while self.used_size + value_size > self.cache_size:
if self.t1 and ((key in self.b2) or (len(self.t1) > self.p)):
old = self.t1.pop()
self.b1.appendleft(old)
else:
if self.t2:
old = self.t2.pop()
self.b2.appendleft(old)
else:
old = self.t1.pop()
self.b1.appendleft(old)
self.used_size -= self.table[old].value_size
del self.table[old]
def _lookup(self, trace_record, key, hash):
# Case I: key is in T1 or T2.
# Move key to MRU position in T2.
if key in self.t1:
self.t1.remove(key)
self.t2.appendleft(key)
return True
if key in self.t2:
self.t2.remove(key)
self.t2.appendleft(key)
return True
return False
def _evict(self, trace_record, key, hash, value_size):
# Case II: key is in B1
# Move x from B1 to the MRU position in T2 (also fetch x to the cache).
if key in self.b1:
self.p = min(self.c, self.p + max(len(self.b2) / len(self.b1), 1))
self._replace(key, value_size)
self.b1.remove(key)
self.t2.appendleft(key)
return
# Case III: key is in B2
# Move x from B2 to the MRU position in T2 (also fetch x to the cache).
if key in self.b2:
self.p = max(0, self.p - max(len(self.b1) / len(self.b2), 1))
self._replace(key, value_size)
self.b2.remove(key)
self.t2.appendleft(key)
return
# Case IV: key is not in (T1 u B1 u T2 u B2)
self._replace(key, value_size)
while len(self.t1) + len(self.b1) >= self.c and self.b1:
self.b1.pop()
total = len(self.t1) + len(self.b1) + len(self.t2) + len(self.b2)
while total >= (2 * self.c) and self.b2:
self.b2.pop()
total -= 1
# Finally, move it to MRU position in T1.
self.t1.appendleft(key)
return
def _insert(self, trace_record, key, hash, value_size):
self.table[key] = CacheEntry(
value_size,
trace_record.cf_id,
trace_record.level,
trace_record.block_type,
trace_record.table_id,
0,
trace_record.access_time,
)
def _should_admit(self, trace_record, key, hash, value_size):
return True
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid Adaptive Replacement Cache (arc_hybrid)"
return "Adaptive Replacement Cache (arc)"
class LRUCache(Cache):
"""
A strict LRU queue.
"""
def __init__(self, cache_size, enable_cache_row_key):
super(LRUCache, self).__init__(cache_size, enable_cache_row_key)
self.table = {}
self.lru = Deque()
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid LRU (lru_hybrid)"
return "LRU (lru)"
def _lookup(self, trace_record, key, hash):
if key not in self.table:
return False
# A cache hit. Update LRU queue.
self.lru.remove(key)
self.lru.appendleft(key)
return True
def _evict(self, trace_record, key, hash, value_size):
while self.used_size + value_size > self.cache_size:
evict_key = self.lru.pop()
self.used_size -= self.table[evict_key].value_size
del self.table[evict_key]
def _insert(self, trace_record, key, hash, value_size):
self.table[key] = CacheEntry(
value_size,
trace_record.cf_id,
trace_record.level,
trace_record.block_type,
trace_record.table_id,
0,
trace_record.access_time,
)
self.lru.appendleft(key)
def _should_admit(self, trace_record, key, hash, value_size):
return True
class TraceCache(Cache):
"""
A trace cache. Lookup returns true if the trace observes a cache hit.
It is used to maintain cache hits observed in the trace.
"""
def __init__(self, cache_size):
super(TraceCache, self).__init__(cache_size, enable_cache_row_key=0)
def _lookup(self, trace_record, key, hash):
return trace_record.is_hit
def _evict(self, trace_record, key, hash, value_size):
pass
def _insert(self, trace_record, key, hash, value_size):
pass
def _should_admit(self, trace_record, key, hash, value_size):
return False
def cache_name(self):
return "Trace"
def parse_cache_size(cs):
cs = cs.replace("\n", "")
if cs[-1] == "M":
return int(cs[: len(cs) - 1]) * 1024 * 1024
if cs[-1] == "G":
return int(cs[: len(cs) - 1]) * 1024 * 1024 * 1024
if cs[-1] == "T":
return int(cs[: len(cs) - 1]) * 1024 * 1024 * 1024 * 1024
return int(cs)
def create_cache(cache_type, cache_size, downsample_size):
cache_size = cache_size / downsample_size
enable_cache_row_key = 0
if "hybridn" in cache_type:
enable_cache_row_key = 2
cache_type = cache_type[:-8]
if "hybrid" in cache_type:
enable_cache_row_key = 1
cache_type = cache_type[:-7]
if cache_type == "ts":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[LRUPolicy(), LFUPolicy(), HyperbolicPolicy()],
cost_class_label=None,
)
elif cache_type == "linucb":
return LinUCBCache(
cache_size,
enable_cache_row_key,
[LRUPolicy(), LFUPolicy(), HyperbolicPolicy()],
cost_class_label=None,
)
elif cache_type == "pylru":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [LRUPolicy()], cost_class_label=None
)
elif cache_type == "pymru":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [MRUPolicy()], cost_class_label=None
)
elif cache_type == "pylfu":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [LFUPolicy()], cost_class_label=None
)
elif cache_type == "pyhb":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[HyperbolicPolicy()],
cost_class_label=None,
)
elif cache_type == "pycctbbt":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="table_bt",
)
elif cache_type == "pycccf":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [CostClassPolicy()], cost_class_label="cf"
)
elif cache_type == "pycctblevelbt":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="table_level_bt",
)
elif cache_type == "pycccfbt":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="cf_bt",
)
elif cache_type == "pycctb":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="table",
)
elif cache_type == "pyccbt":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [CostClassPolicy()], cost_class_label="bt"
)
elif cache_type == "opt":
if enable_cache_row_key:
print("opt does not support hybrid mode.")
assert False
return OPTCache(cache_size)
elif cache_type == "trace":
if enable_cache_row_key:
print("trace does not support hybrid mode.")
assert False
return TraceCache(cache_size)
elif cache_type == "lru":
return LRUCache(cache_size, enable_cache_row_key)
elif cache_type == "arc":
return ARCCache(cache_size, enable_cache_row_key)
elif cache_type == "gdsize":
return GDSizeCache(cache_size, enable_cache_row_key)
else:
print("Unknown cache type {}".format(cache_type))
assert False
return None
class BlockAccessTimeline:
"""
BlockAccessTimeline stores all accesses of a block.
"""
def __init__(self):
self.accesses = []
self.current_access_index = 1
def get_next_access(self):
if self.current_access_index == len(self.accesses):
return sys.maxsize
next_access_seq_no = self.accesses[self.current_access_index]
self.current_access_index += 1
return next_access_seq_no
def percent(e1, e2):
if e2 == 0:
return -1
return float(e1) * 100.0 / float(e2)
def is_target_cf(access_cf, target_cf_name):
if target_cf_name == "all":
return True
return access_cf == target_cf_name
def run(
trace_file_path,
cache_type,
cache,
warmup_seconds,
max_accesses_to_process,
target_cf_name,
):
warmup_complete = False
trace_miss_ratio_stats = MissRatioStats(kSecondsInMinute)
access_seq_no = 0
time_interval = 1
start_time = time.time()
trace_start_time = 0
trace_duration = 0
is_opt_cache = False
if cache.cache_name() == "Belady MIN (opt)":
is_opt_cache = True
block_access_timelines = {}
num_no_inserts = 0
num_blocks_with_no_size = 0
num_inserts_block_with_no_size = 0
if is_opt_cache:
# Read all blocks in memory and stores their access times so that OPT
# can use this information to evict the cached key which next access is
# the furthest in the future.
print("Preprocessing block traces.")
with open(trace_file_path, "r") as trace_file:
for line in trace_file:
if (
max_accesses_to_process != -1
and access_seq_no > max_accesses_to_process
):
break
ts = line.split(",")
timestamp = int(ts[0])
cf_name = ts[5]
if not is_target_cf(cf_name, target_cf_name):
continue
if trace_start_time == 0:
trace_start_time = timestamp
trace_duration = timestamp - trace_start_time
block_id = int(ts[1])
block_size = int(ts[3])
no_insert = int(ts[9])
if block_id not in block_access_timelines:
block_access_timelines[block_id] = BlockAccessTimeline()
if block_size == 0:
num_blocks_with_no_size += 1
block_access_timelines[block_id].accesses.append(access_seq_no)
access_seq_no += 1
if no_insert == 1:
num_no_inserts += 1
if no_insert == 0 and block_size == 0:
num_inserts_block_with_no_size += 1
if access_seq_no % 100 != 0:
continue
now = time.time()
if now - start_time > time_interval * 10:
print(
"Take {} seconds to process {} trace records with trace "
"duration of {} seconds. Throughput: {} records/second.".format(
now - start_time,
access_seq_no,
trace_duration / 1000000,
access_seq_no / (now - start_time),
)
)
time_interval += 1
print(
"Trace contains {0} blocks, {1}({2:.2f}%) blocks with no size."
"{3} accesses, {4}({5:.2f}%) accesses with no_insert,"
"{6}({7:.2f}%) accesses that want to insert but block size is 0.".format(
len(block_access_timelines),
num_blocks_with_no_size,
percent(num_blocks_with_no_size, len(block_access_timelines)),
access_seq_no,
num_no_inserts,
percent(num_no_inserts, access_seq_no),
num_inserts_block_with_no_size,
percent(num_inserts_block_with_no_size, access_seq_no),
)
)
access_seq_no = 0
time_interval = 1
start_time = time.time()
trace_start_time = 0
trace_duration = 0
print("Running simulated {} cache on block traces.".format(cache.cache_name()))
with open(trace_file_path, "r") as trace_file:
for line in trace_file:
if (
max_accesses_to_process != -1
and access_seq_no > max_accesses_to_process
):
break
if access_seq_no % 1000000 == 0:
# Force a python gc periodically to reduce memory usage.
gc.collect()
ts = line.split(",")
timestamp = int(ts[0])
cf_name = ts[5]
if not is_target_cf(cf_name, target_cf_name):
continue
if trace_start_time == 0:
trace_start_time = timestamp
trace_duration = timestamp - trace_start_time
if (
not warmup_complete
and warmup_seconds > 0
and trace_duration > warmup_seconds * 1000000
):
cache.miss_ratio_stats.reset_counter()
warmup_complete = True
next_access_seq_no = 0
block_id = int(ts[1])
if is_opt_cache:
next_access_seq_no = block_access_timelines[block_id].get_next_access()
record = TraceRecord(
access_time=int(ts[0]),
block_id=int(ts[1]),
block_type=int(ts[2]),
block_size=int(ts[3]),
cf_id=int(ts[4]),
cf_name=ts[5],
level=int(ts[6]),
fd=int(ts[7]),
caller=int(ts[8]),
no_insert=int(ts[9]),
get_id=int(ts[10]),
key_id=int(ts[11]),
kv_size=int(ts[12]),
is_hit=int(ts[13]),
referenced_key_exist_in_block=int(ts[14]),
num_keys_in_block=int(ts[15]),
table_id=int(ts[16]),
seq_number=int(ts[17]),
block_key_size=int(ts[18]),
key_size=int(ts[19]),
block_offset_in_file=int(ts[20]),
next_access_seq_no=next_access_seq_no,
)
trace_miss_ratio_stats.update_metrics(
record.access_time, is_hit=record.is_hit, miss_bytes=record.block_size
)
cache.access(record)
access_seq_no += 1
del record
del ts
if access_seq_no % 100 != 0:
continue
# Report progress every 10 seconds.
now = time.time()
if now - start_time > time_interval * 10:
print(
"Take {} seconds to process {} trace records with trace "
"duration of {} seconds. Throughput: {} records/second. "
"Trace miss ratio {}".format(
now - start_time,
access_seq_no,
trace_duration / 1000000,
access_seq_no / (now - start_time),
trace_miss_ratio_stats.miss_ratio(),
)
)
time_interval += 1
print(
"{},0,0,{},{},{}".format(
cache_type,
cache.cache_size,
cache.miss_ratio_stats.miss_ratio(),
cache.miss_ratio_stats.num_accesses,
)
)
now = time.time()
print(
"Take {} seconds to process {} trace records with trace duration of {} "
"seconds. Throughput: {} records/second. Trace miss ratio {}".format(
now - start_time,
access_seq_no,
trace_duration / 1000000,
access_seq_no / (now - start_time),
trace_miss_ratio_stats.miss_ratio(),
)
)
print(
"{},0,0,{},{},{}".format(
cache_type,
cache.cache_size,
cache.miss_ratio_stats.miss_ratio(),
cache.miss_ratio_stats.num_accesses,
)
)
return trace_start_time, trace_duration
def report_stats(
cache,
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
):
cache_label = "{}-{}-{}".format(cache_type, cache_size, target_cf_name)
with open("{}/data-ml-mrc-{}".format(result_dir, cache_label), "w+") as mrc_file:
mrc_file.write(
"{},0,0,{},{},{}\n".format(
cache_type,
cache_size,
cache.miss_ratio_stats.miss_ratio(),
cache.miss_ratio_stats.num_accesses,
)
)
cache_stats = [
cache.per_second_miss_ratio_stats,
cache.miss_ratio_stats,
cache.per_hour_miss_ratio_stats,
]
for i in range(len(cache_stats)):
avg_miss_bytes, p95_miss_bytes = cache_stats[i].compute_miss_bytes()
with open(
"{}/data-ml-avgmb-{}-{}".format(
result_dir, cache_stats[i].time_unit, cache_label
),
"w+",
) as mb_file:
mb_file.write(
"{},0,0,{},{}\n".format(cache_type, cache_size, avg_miss_bytes)
)
with open(
"{}/data-ml-p95mb-{}-{}".format(
result_dir, cache_stats[i].time_unit, cache_label
),
"w+",
) as mb_file:
mb_file.write(
"{},0,0,{},{}\n".format(cache_type, cache_size, p95_miss_bytes)
)
cache_stats[i].write_miss_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
cache_stats[i].write_miss_ratio_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
if not cache.is_ml_cache():
return
policy_stats = [cache.policy_stats, cache.per_hour_policy_stats]
for i in range(len(policy_stats)):
policy_stats[i].write_policy_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
policy_stats[i].write_policy_ratio_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
if __name__ == "__main__":
if len(sys.argv) <= 8:
print(
"Must provide 8 arguments.\n"
"1) Cache type (ts, linucb, arc, lru, opt, pylru, pymru, pylfu, "
"pyhb, gdsize, trace). One may evaluate the hybrid row_block cache "
"by appending '_hybrid' to a cache_type, e.g., ts_hybrid. "
"Note that hybrid is not supported with opt and trace. \n"
"2) Cache size (xM, xG, xT).\n"
"3) The sampling frequency used to collect the trace. (The "
"simulation scales down the cache size by the sampling frequency).\n"
"4) Warmup seconds (The number of seconds used for warmup).\n"
"5) Trace file path.\n"
"6) Result directory (A directory that saves generated results)\n"
"7) Max number of accesses to process\n"
"8) The target column family. (The simulation will only run "
"accesses on the target column family. If it is set to all, "
"it will run against all accesses.)"
)
exit(1)
print("Arguments: {}".format(sys.argv))
cache_type = sys.argv[1]
cache_size = parse_cache_size(sys.argv[2])
downsample_size = int(sys.argv[3])
warmup_seconds = int(sys.argv[4])
trace_file_path = sys.argv[5]
result_dir = sys.argv[6]
max_accesses_to_process = int(sys.argv[7])
target_cf_name = sys.argv[8]
cache = create_cache(cache_type, cache_size, downsample_size)
trace_start_time, trace_duration = run(
trace_file_path,
cache_type,
cache,
warmup_seconds,
max_accesses_to_process,
target_cf_name,
)
trace_end_time = trace_start_time + trace_duration
report_stats(
cache,
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
|
owl/owl/conv.py | jjzhang166/minerva | 561 | 11082537 | <filename>owl/owl/conv.py
#!/usr/bin/env python
""" This module contains operations for convolution, pooling and softmax
"""
import libowl as _owl
soft_op = _owl.softmax_algo
""" Same enum type as cudnn's ``cudnnSoftmaxMode_t``. Either ``soft_op.instance`` or ``soft_op.channel``.
"""
pool_op = _owl.pooling_algo
""" Same enum type as cudnn's ``cudnnPoolingMode_t``. Either ``pool_op.max`` or ``pool_op.avg``.
"""
def softmax(x, op = soft_op.instance):
""" Perform softmax on the given ndarray.
Note that this function is currently only for softmax accross instances. And the last
dimension of ``x`` should represent instances. If ``x`` is of four dimension, directly
call the c++ routine. Otherwise, augment the number of dimension to four.
:param owl.NArray x: the ndarray to be softmaxed
:param owl.conv.soft_op op: what type of softmax to perform
:return: the ndarray after being softmaxed and of the same shape
:rtype: owl.NArray
"""
if len(x.shape) == 4:
return _owl.NArray.softmax_forward(x, op)
else:
ori_shape = list(x.shape)
soft_shape = x.shape[0:-1] + [1 for i in range(4 - len(ori_shape))] + [x.shape[-1]]
return _owl.NArray.softmax_forward(x.reshape(soft_shape), op).reshape(ori_shape)
class Lrner:
""" Wrapper class for LRN.
:ivar int local_size: the size of lrn across channel
:ivar float alpha: lrn parameters
:ivar float beta: lrn parameters
"""
def __init__(self, local_size, alpha, beta):
""" Constructor for Convolver class
:param int local_size: the size of lrn across channel
:param float alpha: lrn parameters
:param float beta: lrn parameters
"""
self.local_size = local_size
self.alpha = alpha
self.beta = beta
def ff(self, x, scale):
""" Feed-forward local response norm
:param owl.NArray x: input of the lrn
:param owl.NArray scale: auxiliary matrix to help computing
:return: result ndarray after forward lrn
:rtype: owl.NArray
"""
#print np.reshape(x.to_numpy(), np.prod(np.shape(x.to_numpy()))).tolist()[0:100]
return _owl.NArray.lrn_forward(x, scale, self.local_size, self.alpha, self.beta)
def bp(self, bottom_data, top_data, scale, top_diff):
""" Backward local response norm
:param owl.NArray bottom_data: activation before lrn
:param owl.NArray top_data: activation after lrn
:param owl.NArray scale: auxiliary matrix to help computing
:param owl.NArray top_diff: error derivative
:return: result ndarray after backward lrn
:rtype: owl.NArray
"""
return _owl.NArray.lrn_backward(bottom_data, top_data, scale, top_diff, self.local_size, self.alpha, self.beta)
class Convolver:
""" Wrapper class for convolution.
:ivar libowl.ConvInfo param: convolution parameters
"""
def __init__(self, pad_h, pad_w, stride_v, stride_h):
""" Constructor for Convolver class
:param int pad_h: padding height
:param int pad_w: padding width
:param int stride_v: vertical stride length
:param int stride_h: horizontal stride length
"""
ci = _owl.ConvInfo()
ci.pad_height = pad_h
ci.pad_width = pad_w
ci.stride_vertical = stride_v
ci.stride_horizontal = stride_h
self.param = ci
def ff(self, x, w, b):
""" Feed-forward convolution
:param owl.NArray x: input of the convolution
:param owl.NArray w: filters
:param owl.NArray b: bias of the convolution
:return: result ndarray after forward convolution
:rtype: owl.NArray
"""
return _owl.NArray.conv_forward(x, w, b, self.param)
def bp(self, y, x, w):
""" Backward convolution
:param owl.NArray y: error of the convolution usually passed by higher layers
:param owl.NArray x: bottom activation
:param owl.NArray w: filters
:return: result ndarray after backward convolution
:rtype: owl.NArray
"""
return _owl.NArray.conv_backward_data(y, x, w, self.param)
def weight_grad(self, y, x, w):
""" Compute the gradient of filters
:param owl.NArray y: error (sensitivity) passed by higher layer
:param owl.NArray x: input (activation) of lower layer
:param owl.NArray w: weight (used to get the filter dimension)
:return: the gradient of filters
:rtype: owl.NArray
"""
return _owl.NArray.conv_backward_filter(y, x, w, self.param)
def bias_grad(self, y):
""" Compute the gradient of bias
:param owl.NArray y: error (sensitivity) passed by higher layer
:return: the gradient of bias
:rtype: owl.NArray
"""
return _owl.NArray.conv_backward_bias(y)
class Pooler:
""" Wrapper class for pooling operations
:ivar libowl.PoolingInfo param: pooling parameters
"""
def __init__(self, h, w, stride_v, stride_h, pad_h = 0, pad_w = 0, op = pool_op.max):
""" Constructor for Pooler class
:param int h: pooling height
:param int w: pooling width
:param int stride_v: vertical stride length
:param int stride_h: horizontal stride length
:param int pad_h: padding height
:param int pad_w: padding width
:param owl.conv.pool_op op: pooling type
"""
pi = _owl.PoolingInfo()
pi.height = h
pi.width = w
pi.stride_vertical = stride_v
pi.stride_horizontal = stride_h
pi.pad_height = pad_h
pi.pad_width = pad_w
pi.algorithm = op
self.param = pi
def ff(self, x):
""" Forward propagation for pooling
:param owl.NArray x: input ndarray of pooling
:return: output ndarray after forward pooling
:rtype: owl.NArray
"""
#print "%d %d %d %d" % (self.param.height, self.param.width, self.param.stride_vertical, self.param.stride_horizontal)
return _owl.NArray.pooling_forward(x, self.param)
def bp(self, y, ff_y, ff_x):
""" Backward propagation for pooling
:param owl.NArray y: error (sensitivity) from higher-layer
:param owl.NArray ff_y: value after forward pooling
:param owl.NArray ff_x: value before forward pooling
:return: output after backward pooling
:rtype: owl.NArray
"""
return _owl.NArray.pooling_backward(y, ff_y, ff_x, self.param)
|
cdlib/test/test_static_benchmark.py | xing-lab-pitt/cdlib | 248 | 11082561 | import unittest
import cdlib
from cdlib import algorithms
from cdlib import benchmark
import networkx as nx
class STBenchTest(unittest.TestCase):
def test_LFR(self):
n = 250
tau1 = 3
tau2 = 1.5
mu = 0.1
G, coms = benchmark.LFR(n, tau1, tau2, mu, average_degree=5, min_community=20)
self.assertIsInstance(G, nx.Graph)
self.assertIsInstance(coms, cdlib.NodeClustering)
nodes_coms = sum([len(c) for c in coms.communities])
self.assertEqual(nodes_coms, G.number_of_nodes())
def test_xmark(self):
N = 2000
gamma = 3
beta = 2
m_cat = ["auto", "auto"]
theta = 0.3
mu = 0.5
avg_k = 10
min_com = 20
g, coms = benchmark.XMark(
n=N,
gamma=gamma,
beta=beta,
mu=mu,
m_cat=m_cat,
theta=theta,
avg_k=avg_k,
min_com=min_com,
type_attr="categorical",
)
set1 = nx.get_node_attributes(g, "label_0")
set2 = nx.get_node_attributes(g, "label_1")
self.assertIsInstance(g, nx.Graph)
self.assertIsInstance(coms, cdlib.NodeClustering)
self.assertEqual(len(set(set1.values())), len(coms.communities))
self.assertEqual(len(set(set2.values())), len(coms.communities))
def test_grp(self):
g, coms = benchmark.GRP(100, 10, 10, 0.25, 0.1)
self.assertIsInstance(g, nx.Graph)
self.assertIsInstance(coms, cdlib.NodeClustering)
def test_planted_partitions(self):
g, coms = benchmark.PP(4, 3, 0.5, 0.1, seed=42)
self.assertIsInstance(g, nx.Graph)
self.assertIsInstance(coms, cdlib.NodeClustering)
def test_RPG(self):
g, coms = benchmark.RPG([10, 10, 10], 0.25, 0.01)
self.assertIsInstance(g, nx.Graph)
self.assertIsInstance(coms, cdlib.NodeClustering)
def test_SBM(self):
sizes = [75, 75, 300]
probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]]
g, coms = benchmark.SBM(sizes, probs, seed=0)
self.assertIsInstance(g, nx.Graph)
self.assertIsInstance(coms, cdlib.NodeClustering)
|
OmniDB/OmniDB_app/tests.old/test_postgresql96.py | lejmr/OmniDB | 2,982 | 11082577 | <filename>OmniDB/OmniDB_app/tests.old/test_postgresql96.py
from django.test import TestCase, Client
from django.http import JsonResponse
import json
from datetime import datetime, timedelta
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
import OmniDB_app.include.OmniDatabase as OmniDatabase
class PostgreSQL(TestCase):
@classmethod
def setUpClass(self):
super(PostgreSQL, self).setUpClass()
self.host = '127.0.0.1'
self.port = '5496'
self.service = 'omnidb_tests'
self.role = 'omnidb'
self.password = '<PASSWORD>'
self.database = OmniDatabase.Generic.InstantiateDatabase(
'postgresql',
self.host,
self.port,
self.service,
self.role,
0,
'OmniDB Tests'
)
self.database.v_connection.v_password = self.password
self.cn = Client()
self.cs = Client()
response = self.cs.post('/sign_in/', {'data': '{"p_username": "admin", "p_pwd": "<PASSWORD>"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 0 <= data['v_data']
session = self.cs.session
assert 'admin' == session['omnidb_session'].v_user_name
session['omnidb_session'].v_databases = [{
'database': self.database,
'prompt_password': False,
'prompt_timeout': datetime.now() + timedelta(0,60000)
}]
session['omnidb_session'].v_tab_connections = {0: self.database}
session.save()
@classmethod
def lists_equal(self, p_list_a, p_list_b):
equal = True
equal = len(p_list_a) == len(p_list_b)
k = 0
while k < len(p_list_a) and equal:
if p_list_a[k] != p_list_b[k]:
equal = False
k = k + 1
return equal
def test_get_tree_info_postgresql_nosession(self):
response = self.cn.post('/get_tree_info_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_tree_info_postgresql_session(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'database' == data['v_data']['v_mode']
def test_template_create_tablespace(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE TABLESPACE name
LOCATION 'directory'
--OWNER new_owner | CURRENT_USER | SESSION_USER
--WITH ( tablespace_option = value [, ... ] )
''' == data['v_data']['v_database_return']['create_tablespace']
def test_template_alter_tablespace(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLESPACE #tablespace_name#
--RENAME TO new_name
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
--SET seq_page_cost = value
--RESET seq_page_cost
--SET random_page_cost = value
--RESET random_page_cost
--SET effective_io_concurrency = value
--RESET effective_io_concurrency
''' == data['v_data']['v_database_return']['alter_tablespace']
def test_template_drop_tablespace(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'DROP TABLESPACE #tablespace_name#' == data['v_data']['v_database_return']['drop_tablespace']
def test_template_create_role(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE ROLE name
--[ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'
--SUPERUSER | NOSUPERUSER
--CREATEDB | NOCREATEDB
--CREATEROLE | NOCREATEROLE
--INHERIT | NOINHERIT
--LOGIN | NOLOGIN
--REPLICATION | NOREPLICATION
--BYPASSRLS | NOBYPASSRLS
--CONNECTION LIMIT connlimit
--VALID UNTIL 'timestamp'
--IN ROLE role_name [, ...]
--IN GROUP role_name [, ...]
--ROLE role_name [, ...]
--ADMIN role_name [, ...]
--USER role_name [, ...]
--SYSID uid
''' == data['v_data']['v_database_return']['create_role']
def test_template_alter_role(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER ROLE #role_name#
--SUPERUSER | NOSUPERUSER
--CREATEDB | NOCREATEDB
--CREATEROLE | NOCREATEROLE
--INHERIT | NOINHERIT
--LOGIN | NOLOGIN
--REPLICATION | NOREPLICATION
--BYPASSRLS | NOBYPASSRLS
--CONNECTION LIMIT connlimit
--[ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'
--VALID UNTIL 'timestamp'
--RENAME TO new_name
--[ IN DATABASE database_name ] SET configuration_parameter TO { value | DEFAULT }
--[ IN DATABASE database_name ] SET configuration_parameter FROM CURRENT
--[ IN DATABASE database_name ] RESET configuration_parameter
--[ IN DATABASE database_name ] RESET ALL
''' == data['v_data']['v_database_return']['alter_role']
def test_template_drop_role(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'DROP ROLE #role_name#' == data['v_data']['v_database_return']['drop_role']
def test_template_create_database(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE DATABASE name
--OWNER user_name
--TEMPLATE template
--ENCODING encoding
--LC_COLLATE lc_collate
--LC_CTYPE lc_ctype
--TABLESPACE tablespace
--CONNECTION LIMIT connlimit
''' == data['v_data']['v_database_return']['create_database']
def test_template_alter_database(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER DATABASE #database_name#
--ALLOW_CONNECTIONS allowconn
--CONNECTION LIMIT connlimit
--IS_TEMPLATE istemplate
--RENAME TO new_name
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
--SET TABLESPACE new_tablespace
--SET configuration_parameter TO { value | DEFAULT }
--SET configuration_parameter FROM CURRENT
--RESET configuration_parameter
--RESET ALL
''' == data['v_data']['v_database_return']['alter_database']
def test_template_drop_database(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'DROP DATABASE #database_name#' == data['v_data']['v_database_return']['drop_database']
def test_template_create_extension(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE EXTENSION name
--SCHEMA schema_name
--VERSION VERSION
--FROM old_version
''' == data['v_data']['v_database_return']['create_extension']
def test_template_alter_extension(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER EXTENSION #extension_name#
--UPDATE [ TO new_version ]
--SET SCHEMA new_schema
--ADD member_object
--DROP member_object
''' == data['v_data']['v_database_return']['alter_extension']
def test_template_drop_extension(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP EXTENSION #extension_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_extension']
def test_template_create_schema(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE SCHEMA schema_name
--AUTHORIZATION [ GROUP ] user_name | CURRENT_USER | SESSION_USER
''' == data['v_data']['v_database_return']['create_schema']
def test_template_alter_schema(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER SCHEMA #schema_name#
--RENAME TO new_name
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
''' == data['v_data']['v_database_return']['alter_schema']
def test_template_drop_schema(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP SCHEMA #schema_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_schema']
def test_template_drop_table(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP TABLE #table_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_table']
def test_template_create_sequence(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE SEQUENCE #schema_name#.name
--INCREMENT BY increment
--MINVALUE minvalue | NO MINVALUE
--MAXVALUE maxvalue | NO MAXVALUE
--START WITH start
--CACHE cache
--CYCLE
--OWNED BY { table_name.column_name | NONE }
''' == data['v_data']['v_database_return']['create_sequence']
def test_template_alter_sequence(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER SEQUENCE #sequence_name#
--INCREMENT BY increment
--MINVALUE minvalue | NO MINVALUE
--MAXVALUE maxvalue | NO MAXVALUE
--START WITH start
--RESTART
--RESTART WITH restart
--CACHE cache
--CYCLE
--NO CYCLE
--OWNED BY { table_name.column_name | NONE }
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
--RENAME TO new_name
--SET SCHEMA new_schema
''' == data['v_data']['v_database_return']['alter_sequence']
def test_template_drop_sequence(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP SEQUENCE #sequence_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_sequence']
def test_template_create_function(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE FUNCTION #schema_name#.name
--(
-- [ argmode ] [ argname ] argtype [ { DEFAULT | = } default_expr ]
--)
--RETURNS rettype
--RETURNS TABLE ( column_name column_type )
LANGUAGE plpgsql
--IMMUTABLE | STABLE | VOLATILE
--STRICT
--SECURITY DEFINER
--COST execution_cost
--ROWS result_rows
AS
$function$
--DECLARE
-- variables
BEGIN
-- definition
END;
$function$
''' == data['v_data']['v_database_return']['create_function']
def test_template_drop_function(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP FUNCTION #function_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_function']
def test_template_create_triggerfunction(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE FUNCTION #schema_name#.name()
RETURNS trigger
LANGUAGE plpgsql
--IMMUTABLE | STABLE | VOLATILE
--COST execution_cost
AS
$function$
--DECLARE
-- variables
BEGIN
-- definition
END;
$function$
''' == data['v_data']['v_database_return']['create_triggerfunction']
def test_template_drop_triggerfunction(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP FUNCTION #function_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_triggerfunction']
def test_template_create_view(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE VIEW #schema_name#.name AS
SELECT ...
''' == data['v_data']['v_database_return']['create_view']
def test_template_drop_view(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP VIEW #view_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_view']
def test_template_create_mview(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE MATERIALIZED VIEW #schema_name#.name AS
SELECT ...
--WITH NO DATA
''' == data['v_data']['v_database_return']['create_mview']
def test_template_refresh_mview(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''REFRESH MATERIALIZED VIEW #view_name#
--CONCURRENTLY
--WITH NO DATA
''' == data['v_data']['v_database_return']['refresh_mview']
def test_template_drop_mview(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP MATERIALIZED VIEW #view_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_mview']
def test_template_create_column(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
ADD COLUMN name data_type
--COLLATE collation
--column_constraint [ ... ] ]
''' == data['v_data']['v_database_return']['create_column']
def test_template_alter_column(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
--ALTER COLUMN #column_name#
--RENAME COLUMN #column_name# TO new_column
--TYPE data_type [ COLLATE collation ] [ USING expression ]
--SET DEFAULT expression
--DROP DEFAULT
--SET NOT NULL
--DROP NOT NULL
--SET STATISTICS integer
--SET ( attribute_option = value [, ... ] )
--RESET ( attribute_option [, ... ] )
--SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }
''' == data['v_data']['v_database_return']['alter_column']
def test_template_drop_column(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
DROP COLUMN #column_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_column']
def test_template_create_primarykey(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
ADD CONSTRAINT name
PRIMARY KEY ( column_name [, ... ] )
--WITH ( storage_parameter [= value] [, ... ] )
--WITH OIDS
--WITHOUT OIDS
--USING INDEX TABLESPACE tablespace_name
''' == data['v_data']['v_database_return']['create_primarykey']
def test_template_drop_primarykey(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_primarykey']
def test_template_create_unique(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
ADD CONSTRAINT name
UNIQUE ( column_name [, ... ] )
--WITH ( storage_parameter [= value] [, ... ] )
--WITH OIDS
--WITHOUT OIDS
--USING INDEX TABLESPACE tablespace_name
''' == data['v_data']['v_database_return']['create_unique']
def test_template_drop_unique(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_unique']
def test_template_create_foreignkey(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
ADD CONSTRAINT name
FOREIGN KEY ( column_name [, ... ] )
REFERENCES reftable [ ( refcolumn [, ... ] ) ]
--MATCH { FULL | PARTIAL | SIMPLE }
--ON DELETE { NO ACTION | RESTRICT | CASCADE | SET NULL | SET DEFAULT }
--ON UPDATE { NO ACTION | RESTRICT | CASCADE | SET NULL | SET DEFAULT }
--NOT VALID
''' == data['v_data']['v_database_return']['create_foreignkey']
def test_template_drop_foreignkey(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_foreignkey']
def test_template_create_index(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] name
ON #table_name#
--USING method
( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )
--WITH ( storage_parameter = value [, ... ] )
--WHERE predicate
''' == data['v_data']['v_database_return']['create_index']
def test_template_alter_index(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER INDEX #index_name#
--RENAME to new_name
--SET TABLESPACE tablespace_name
--SET ( storage_parameter = value [, ... ] )
--RESET ( storage_parameter [, ... ] )
''' == data['v_data']['v_database_return']['alter_index']
def test_template_drop_index(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP INDEX [ CONCURRENTLY ] #index_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_index']
def test_template_create_check(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
ADD CONSTRAINT name
CHECK ( expression )
''' == data['v_data']['v_database_return']['create_check']
def test_template_drop_check(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_check']
def test_template_create_exclude(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
ADD CONSTRAINT name
--USING index_method
EXCLUDE ( exclude_element WITH operator [, ... ] )
--index_parameters
--WHERE ( predicate )
''' == data['v_data']['v_database_return']['create_exclude']
def test_template_drop_exclude(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_exclude']
def test_template_create_rule(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE RULE name
AS ON { SELECT | INSERT | UPDATE | DELETE }
TO #table_name#
--WHERE condition
--DO ALSO { NOTHING | command | ( command ; command ... ) }
--DO INSTEAD { NOTHING | command | ( command ; command ... ) }
''' == data['v_data']['v_database_return']['create_rule']
def test_template_alter_rule(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER RULE #rule_name# ON #table_name# RENAME TO new_name' == data['v_data']['v_database_return']['alter_rule']
def test_template_drop_rule(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP RULE #rule_name# ON #table_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_rule']
def test_template_create_trigger(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE TRIGGER name
--BEFORE { INSERT [ OR ] | UPDATE [ OF column_name [, ... ] ] [ OR ] | DELETE [ OR ] | TRUNCATE }
--AFTER { INSERT [ OR ] | UPDATE [ OF column_name [, ... ] ] [ OR ] | DELETE [ OR ] | TRUNCATE }
ON #table_name#
--FROM referenced_table_name
--NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED }
--FOR EACH ROW
--FOR EACH STATEMENT
--WHEN ( condition )
--EXECUTE PROCEDURE function_name ( arguments )
''' == data['v_data']['v_database_return']['create_trigger']
def test_template_create_view_trigger(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE TRIGGER name
--BEFORE { INSERT [ OR ] | UPDATE [ OF column_name [, ... ] ] [ OR ] | DELETE }
--AFTER { INSERT [ OR ] | UPDATE [ OF column_name [, ... ] ] [ OR ] | DELETE }
--INSTEAD OF { INSERT [ OR ] | UPDATE [ OF column_name [, ... ] ] [ OR ] | DELETE }
ON #table_name#
--FROM referenced_table_name
--NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED }
--FOR EACH ROW
--FOR EACH STATEMENT
--WHEN ( condition )
--EXECUTE PROCEDURE function_name ( arguments )
''' == data['v_data']['v_database_return']['create_view_trigger']
def test_template_alter_trigger(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER TRIGGER #trigger_name# ON #table_name# RENAME TO new_name' == data['v_data']['v_database_return']['alter_trigger']
def test_template_enable_trigger(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER TABLE #table_name# ENABLE TRIGGER #trigger_name#' == data['v_data']['v_database_return']['enable_trigger']
def test_template_disable_trigger(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER TABLE #table_name# DISABLE TRIGGER #trigger_name#' == data['v_data']['v_database_return']['disable_trigger']
def test_template_drop_trigger(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP TRIGGER #trigger_name# ON #table_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_trigger']
def test_template_create_inherited(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE TABLE name (
CHECK ( condition )
) INHERITS (#table_name#)
''' == data['v_data']['v_database_return']['create_inherited']
def test_template_noinherit_partition(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER TABLE #partition_name# NO INHERIT #table_name#' == data['v_data']['v_database_return']['noinherit_partition']
def test_template_drop_partition(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'DROP TABLE #partition_name#' == data['v_data']['v_database_return']['drop_partition']
def test_template_vacuum(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''VACUUM
--FULL
--FREEZE
--ANALYZE
''' == data['v_data']['v_database_return']['vacuum']
def test_template_vacuum_table(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''VACUUM
--FULL
--FREEZE
--ANALYZE
#table_name#
--(column_name, [, ...])
''' == data['v_data']['v_database_return']['vacuum_table']
def test_template_analyze(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ANALYZE' == data['v_data']['v_database_return']['analyze']
def test_template_analyze_table(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ANALYZE #table_name#
--(column_name, [, ...])
''' == data['v_data']['v_database_return']['analyze_table']
def test_template_truncate(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''TRUNCATE
--ONLY
#table_name#
--RESTART IDENTITY
--CASCADE
''' == data['v_data']['v_database_return']['truncate']
def test_template_create_physicalreplicationslot(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''SELECT * FROM pg_create_physical_replication_slot('slot_name')''' == data['v_data']['v_database_return']['create_physicalreplicationslot']
def test_template_drop_physicalreplicationslot(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''SELECT pg_drop_replication_slot('#slot_name#')''' == data['v_data']['v_database_return']['drop_physicalreplicationslot']
def test_template_create_logicalreplicationslot(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''SELECT * FROM pg_create_logical_replication_slot('slot_name', 'test_decoding')''' == data['v_data']['v_database_return']['create_logicalreplicationslot']
def test_template_drop_logicalreplicationslot(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''SELECT pg_drop_replication_slot('#slot_name#')''' == data['v_data']['v_database_return']['drop_logicalreplicationslot']
def test_template_create_publication(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE PUBLICATION name
--FOR TABLE [ ONLY ] table_name [ * ] [, ...]
--FOR ALL TABLES
--WITH ( publish = 'insert, update, delete' )
''' == data['v_data']['v_database_return']['create_publication']
def test_template_alter_publication(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER PUBLICATION #pub_name#
--ADD TABLE [ ONLY ] table_name [ * ] [, ...]
--SET TABLE [ ONLY ] table_name [ * ] [, ...]
--DROP TABLE [ ONLY ] table_name [ * ] [, ...]
--SET ( publish = 'insert, update, delete' )
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
--RENAME TO new_name
''' == data['v_data']['v_database_return']['alter_publication']
def test_template_drop_publication(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP PUBLICATION #pub_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_publication']
def test_template_add_pubtable(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER PUBLICATION #pub_name# ADD TABLE table_name' == data['v_data']['v_database_return']['add_pubtable']
def test_template_drop_pubtable(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER PUBLICATION #pub_name# DROP TABLE #table_name#' == data['v_data']['v_database_return']['drop_pubtable']
def test_template_create_subscription(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE SUBSCRIPTION name
CONNECTION 'conninfo'
PUBLICATION pub_name [, ...]
--WITH (
--copy_data = { true | false }
--, create_slot = { true | false }
--, enabled = { true | false }
--, slot_name = 'name'
--, synchronous_commit = { on | remote_apply | remote_write | local | off }
--, connect = { true | false }
--)
''' == data['v_data']['v_database_return']['create_subscription']
def test_template_alter_subscription(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER SUBSCRIPTION #sub_name#
--CONNECTION 'conninfo'
--SET PUBLICATION pub_name [, ...] [ WITH ( refresh = { true | false } ) ]
--REFRESH PUBLICATION [ WITH ( copy_data = { true | false } ) ]
--ENABLE
--DISABLE
--SET (
--slot_name = 'name'
--, synchronous_commit = { on | remote_apply | remote_write | local | off }
--)
--OWNER TO { new_owner | CURRENT_USER | SESSION_USER }
--RENAME TO new_name
''' == data['v_data']['v_database_return']['alter_subscription']
def test_template_drop_subscription(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP SUBSCRIPTION #sub_name#
--CASCADE
''' == data['v_data']['v_database_return']['drop_subscription']
def test_template_pglogical_drop_node(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.drop_node(
node_name := '#node_name#',
ifexists := true
)''' == data['v_data']['v_database_return']['pglogical_drop_node']
def test_template_pglogical_add_interface(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_node_add_interface(
node_name := '#node_name#',
interface_name := 'name',
dsn := 'host= port= dbname= user= password='
)''' == data['v_data']['v_database_return']['pglogical_add_interface']
def test_template_pglogical_drop_interface(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_node_drop_interface(
node_name := '#node_name#',
interface_name := '#interface_name#'
)'''== data['v_data']['v_database_return']['pglogical_drop_interface']
def test_template_pglogical_create_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.create_replication_set(
set_name := 'name',
replicate_insert := true,
replicate_update := true,
replicate_delete := true,
replicate_truncate := true
)'''== data['v_data']['v_database_return']['pglogical_create_repset']
def test_template_pglogical_alter_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_replication_set(
set_name := '#repset_name#',
replicate_insert := true,
replicate_update := true,
replicate_delete := true,
replicate_truncate := true
)'''== data['v_data']['v_database_return']['pglogical_alter_repset']
def test_template_pglogical_drop_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.drop_replication_set(
set_name := '#repset_name#',
ifexists := true
)'''== data['v_data']['v_database_return']['pglogical_drop_repset']
def test_template_pglogical_repset_add_table(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.replication_set_add_table(
set_name := '#repset_name#',
relation := 'schema.table'::regclass,
synchronize_data := true,
columns := null,
row_filter := null
)''' == data['v_data']['v_database_return']['pglogical_repset_add_table']
def test_template_pglogical_repset_add_all_tables(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.replication_set_add_all_tables(
set_name := '#repset_name#',
schema_names := ARRAY['public'],
synchronize_data := true
)''' == data['v_data']['v_database_return']['pglogical_repset_add_all_tables']
def test_template_pglogical_repset_remove_table(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.replication_set_remove_table(
set_name := '#repset_name#',
relation := '#table_name#'::regclass
)''' == data['v_data']['v_database_return']['pglogical_repset_remove_table']
def test_template_pglogical_repset_add_seq(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.replication_set_add_sequence(
set_name := '#repset_name#',
relation := 'schema.sequence'::regclass,
synchronize_data := true
)''' == data['v_data']['v_database_return']['pglogical_repset_add_seq']
def test_template_pglogical_repset_add_all_seqs(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.replication_set_add_all_sequences(
set_name := '#repset_name#',
schema_names := ARRAY['public'],
synchronize_data := true
)''' == data['v_data']['v_database_return']['pglogical_repset_add_all_seqs']
def test_template_pglogical_repset_remove_seq(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.replication_set_remove_sequence(
set_name := '#repset_name#',
relation := '#sequence_name#'::regclass
)''' == data['v_data']['v_database_return']['pglogical_repset_remove_seq']
def test_template_pglogical_create_sub(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.create_subscription(
subscription_name := 'sub_name',
provider_dsn := 'host= port= dbname= user= password=',
replication_sets := array['default','default_insert_only','ddl_sql'],
synchronize_structure := true,
synchronize_data := true,
forward_origins := array['all'],
apply_delay := '0 seconds'::interval
)''' == data['v_data']['v_database_return']['pglogical_create_sub']
def test_template_pglogical_enable_sub(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_subscription_enable(
subscription_name := '#sub_name#',
immediate := true
)''' == data['v_data']['v_database_return']['pglogical_enable_sub']
def test_template_pglogical_disable_sub(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_subscription_disable(
subscription_name := '#sub_name#',
immediate := true
)''' == data['v_data']['v_database_return']['pglogical_disable_sub']
def test_template_pglogical_sync_sub(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_subscription_synchronize(
subscription_name := '#sub_name#',
truncate := true
)''' == data['v_data']['v_database_return']['pglogical_sync_sub']
def test_template_pglogical_drop_sub(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.drop_subscription(
subscription_name := '#sub_name#',
ifexists := true
)''' == data['v_data']['v_database_return']['pglogical_drop_sub']
def test_template_pglogical_sub_add_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_subscription_add_replication_set(
subscription_name := '#sub_name#',
replication_set := 'set_name'
)''' == data['v_data']['v_database_return']['pglogical_sub_add_repset']
def test_template_pglogical_sub_remove_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select pglogical.alter_subscription_remove_replication_set(
subscription_name := '#sub_name#',
replication_set := '#set_name#'
)''' == data['v_data']['v_database_return']['pglogical_sub_remove_repset']
def test_template_bdr_join_wait(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'select bdr.bdr_node_join_wait_for_ready()' == data['v_data']['v_database_return']['bdr_join_wait']
def test_template_bdr_pause(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'select bdr.bdr_apply_pause()' == data['v_data']['v_database_return']['bdr_pause']
def test_template_bdr_resume(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'select bdr.bdr_apply_resume()' == data['v_data']['v_database_return']['bdr_resume']
def test_template_bdr_replicate_ddl_command(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert "select bdr.bdr_replicate_ddl_command('DDL command here...')" == data['v_data']['v_database_return']['bdr_replicate_ddl_command']
def test_template_bdr_part_node(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert "select bdr.bdr_part_by_node_names('{#node_name#}')" == data['v_data']['v_database_return']['bdr_part_node']
def test_template_bdr_insert_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''INSERT INTO bdr.bdr_replication_set_config (set_name, replicate_inserts, replicate_updates, replicate_deletes)
VALUES ('set_name', 't', 't', 't')
''' == data['v_data']['v_database_return']['bdr_insert_repset']
def test_template_bdr_update_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''UPDATE bdr.bdr_replication_set_config SET
--replicate_inserts = { 't' | 'f' }
--, replicate_updates = { 't' | 'f' }
--, replicate_deletes = { 't' | 'f' }
WHERE set_name = '#set_name#'
''' == data['v_data']['v_database_return']['bdr_update_repset']
def test_template_bdr_delete_repset(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DELETE
FROM bdr.bdr_replication_set_config
WHERE set_name = '#set_name#'
''' == data['v_data']['v_database_return']['bdr_delete_repset']
def test_template_bdr_set_repsets(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert "select bdr.table_set_replication_sets('#table_name#', '{repset1,repset2,...}')" == data['v_data']['v_database_return']['bdr_set_repsets']
def test_template_bdr_create_confhand(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE FUNCTION #table_name#_fnc_conflict_handler (
row1 #table_name#,
row2 #table_name#,
table_name text,
table_regclass regclass,
conflict_type bdr.bdr_conflict_type, /* [insert_insert | insert_update | update_update | update_delete | delete_delete | unhandled_tx_abort] */
OUT row_out #table_name#,
OUT handler_action bdr.bdr_conflict_handler_action) /* [IGNORE | ROW | SKIP] */
RETURNS record AS
$BODY$
BEGIN
raise warning 'conflict detected for #table_name#, old_row: %, incoming_row: %', row1, row2;
-- sample code to choose the output row or to merge values
row_out := row1;
handler_action := 'ROW';
END;
$BODY$
LANGUAGE plpgsql;
-- after writing the handler procedure we also need to register it as an handler
select *
from bdr.bdr_create_conflict_handler(
ch_rel := '#table_name#',
ch_name := '#table_name#_conflict_handler',
ch_proc := '#table_name#_fnc_conflict_handler(#table_name#, #table_name#, text, regclass, bdr.bdr_conflict_type)',
ch_type := 'insert_insert' /* [insert_insert | insert_update | update_update | update_delete | delete_delete | unhandled_tx_abort] */
)
''' == data['v_data']['v_database_return']['bdr_create_confhand']
def test_template_bdr_drop_confhand(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert "select bdr.bdr_drop_conflict_handler('#table_name#', '#ch_name#')" == data['v_data']['v_database_return']['bdr_drop_confhand']
# only in BDR >= 1
def test_template_bdr_terminate_apply(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert "select bdr.terminate_apply_workers('{#node_name#}')" == data['v_data']['v_database_return']['bdr_terminate_apply']
def test_template_bdr_terminate_walsender(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert "select bdr.terminate_walsender_workers('{#node_name#}')" == data['v_data']['v_database_return']['bdr_terminate_walsender']
def test_template_bdr_remove(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''select bdr.remove_bdr_from_local_node(
force := False
, convert_global_sequences := True
)
''' == data['v_data']['v_database_return']['bdr_remove']
def test_template_xl_pause_cluster(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'PAUSE CLUSTER' == data['v_data']['v_database_return']['xl_pause_cluster']
def test_template_xl_unpause_cluster(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'UNPAUSE CLUSTER' == data['v_data']['v_database_return']['xl_unpause_cluster']
def test_template_xl_clean_connection(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CLEAN CONNECTION TO
--COORDINATOR ( nodename [, ... ] )
--NODE ( nodename [, ... ] )
--ALL
--ALL FORCE
--FOR DATABASE database_name
--TO USER role_name
''' == data['v_data']['v_database_return']['xl_clean_connection']
def test_template_xl_execute_direct(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''EXECUTE DIRECT ON (#node_name#)
'SELECT ...'
''' == data['v_data']['v_database_return']['xl_execute_direct']
def test_template_xl_pool_reload(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'EXECUTE DIRECT ON (#node_name#) \'SELECT pgxc_pool_reload()\'' == data['v_data']['v_database_return']['xl_pool_reload']
def test_template_xl_altertable_distribution(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name# DISTRIBUTE BY
--REPLICATION
--ROUNDROBIN
--HASH ( column_name )
--MODULO ( column_name )
''' == data['v_data']['v_database_return']['xl_altertable_distribution']
def test_template_xl_altertable_location(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''ALTER TABLE #table_name#
TO NODE ( nodename [, ... ] )
--TO GROUP ( groupname [, ... ] )
''' == data['v_data']['v_database_return']['xl_altertable_location']
def test_template_xl_altertable_addnode(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER TABLE #table_name# ADD NODE (node_name)' == data['v_data']['v_database_return']['xl_altertable_addnode']
def test_template_xl_altertable_deletenode(self):
response = self.cs.post('/get_tree_info_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'ALTER TABLE #table_name# DELETE NODE (#node_name#)' == data['v_data']['v_database_return']['xl_altertable_deletenode']
def test_get_tables_postgresql_nosession(self):
response = self.cn.post('/get_tables_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_tables_postgresql_session(self):
response = self.cs.post('/get_tables_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], [
'categories',
'cust_hist',
'customers',
'inventory',
'orderlines',
'orders',
'products',
'reorder'
])
def test_get_schemas_postgresql_nosession(self):
response = self.cn.post('/get_schemas_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_schemas_postgresql_session(self):
response = self.cs.post('/get_schemas_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], [
'public',
'pg_catalog',
'information_schema'
])
def test_get_columns_postgresql_nosession(self):
response = self.cn.post('/get_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_columns_postgresql_session(self):
response = self.cs.post('/get_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_column_name'] for a in data['v_data']], [
'orderid',
'orderdate',
'customerid',
'netamount',
'tax',
'totalamount'
])
def test_get_pk_postgresql_nosession(self):
response = self.cn.post('/get_pk_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_pk_postgresql_session(self):
response = self.cs.post('/get_pk_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['orders_pkey'])
def test_get_pk_columns_postgresql_nosession(self):
response = self.cn.post('/get_pk_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_pk_columns_postgresql_session(self):
response = self.cs.post('/get_pk_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_key": "orders_pkey", "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['orderid'])
def test_get_fks_postgresql_nosession(self):
response = self.cn.post('/get_fks_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_fks_postgresql_session(self):
response = self.cs.post('/get_fks_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['fk_customerid'])
def test_get_fks_columns_postgresql_nosession(self):
response = self.cn.post('/get_fks_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_fks_columns_postgresql_session(self):
response = self.cs.post('/get_fks_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_fkey": "fk_customerid", "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[3] for a in data['v_data']], ['customerid'])
def test_get_uniques_postgresql_nosession(self):
response = self.cn.post('/get_uniques_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_uniques_postgresql_session(self):
self.database.v_connection.Execute('alter table public.categories add constraint un_test unique (categoryname)')
response = self.cs.post('/get_uniques_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['un_test'])
self.database.v_connection.Execute('alter table public.categories drop constraint un_test')
def test_get_uniques_columns_postgresql_nosession(self):
response = self.cn.post('/get_uniques_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_uniques_columns_postgresql_session(self):
self.database.v_connection.Execute('alter table public.categories add constraint un_test unique (categoryname)')
response = self.cs.post('/get_uniques_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_unique": "un_test", "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['categoryname'])
self.database.v_connection.Execute('alter table public.categories drop constraint un_test')
def test_get_indexes_postgresql_nosession(self):
response = self.cn.post('/get_indexes_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_indexes_postgresql_session(self):
response = self.cs.post('/get_indexes_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['ix_order_custid', 'orders_pkey'])
def test_get_indexes_columns_postgresql_nosession(self):
response = self.cn.post('/get_indexes_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_indexes_columns_postgresql_session(self):
response = self.cs.post('/get_indexes_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_index": "ix_order_custid", "p_schema": "public", "p_table": "orders"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['customerid'])
def test_get_functions_postgresql_nosession(self):
response = self.cn.post('/get_functions_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_functions_postgresql_session(self):
response = self.cs.post('/get_functions_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['new_customer'])
def test_get_function_fields_postgresql_nosession(self):
response = self.cn.post('/get_function_fields_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_function_fields_postgresql_session(self):
response = self.cs.post('/get_function_fields_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_function": "new_customer(character varying, character varying, character varying, character varying, character varying, character varying, integer, character varying, integer, character varying, character varying, integer, character varying, character varying, character varying, character varying, integer, integer, character varying)"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], [
'firstname_in character varying',
'lastname_in character varying',
'address1_in character varying',
'address2_in character varying',
'city_in character varying',
'state_in character varying',
'zip_in integer',
'country_in character varying',
'region_in integer',
'email_in character varying',
'phone_in character varying',
'creditcardtype_in integer',
'creditcard_in character varying',
'creditcardexpiration_in character varying',
'username_in character varying',
'password_in character varying',
'age_in integer',
'income_in integer',
'gender_in character varying',
'OUT customerid_out integer'
])
def test_get_function_definition_postgresql_nosession(self):
response = self.cn.post('/get_function_definition_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_function_definition_postgresql_session(self):
response = self.cs.post('/get_function_definition_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_function": "new_customer(character varying, character varying, character varying, character varying, character varying, character varying, integer, character varying, integer, character varying, character varying, integer, character varying, character varying, character varying, character varying, integer, integer, character varying)"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE FUNCTION public.new_customer(firstname_in character varying, lastname_in character varying, address1_in character varying, address2_in character varying, city_in character varying, state_in character varying, zip_in integer, country_in character varying, region_in integer, email_in character varying, phone_in character varying, creditcardtype_in integer, creditcard_in character varying, creditcardexpiration_in character varying, username_in character varying, password_in character varying, age_in integer, income_in integer, gender_in character varying, OUT customerid_out integer)
RETURNS integer
LANGUAGE plpgsql''' in data['v_data']
def test_get_sequences_postgresql_nosession(self):
response = self.cn.post('/get_sequences_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_sequences_postgresql_session(self):
response = self.cs.post('/get_sequences_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_sequence_name'] for a in data['v_data']], [
'categories_category_seq',
'customers_customerid_seq',
'orders_orderid_seq',
'products_prod_id_seq'
])
def test_get_views_postgresql_nosession(self):
response = self.cn.post('/get_views_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_views_postgresql_session(self):
self.database.v_connection.Execute('create or replace view vw_omnidb_test as select c.customerid, c.firstname, c.lastname, sum(o.totalamount) as totalamount from customers c inner join orders o on o.customerid = c.customerid group by c.customerid, c.firstname, c.lastname')
response = self.cs.post('/get_views_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['vw_omnidb_test'])
self.database.v_connection.Execute('drop view vw_omnidb_test')
def test_get_views_columns_postgresql_nosession(self):
response = self.cn.post('/get_views_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_views_columns_postgresql_session(self):
self.database.v_connection.Execute('create or replace view vw_omnidb_test as select c.customerid, c.firstname, c.lastname, sum(o.totalamount) as totalamount from customers c inner join orders o on o.customerid = c.customerid group by c.customerid, c.firstname, c.lastname')
response = self.cs.post('/get_views_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "vw_omnidb_test"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_column_name'] for a in data['v_data']], [
'customerid',
'firstname',
'lastname',
'totalamount'
])
self.database.v_connection.Execute('drop view vw_omnidb_test')
def test_get_view_definition_postgresql_nosession(self):
response = self.cn.post('/get_view_definition_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_view_definition_postgresql_session(self):
self.database.v_connection.Execute('create or replace view vw_omnidb_test as select c.customerid, c.firstname, c.lastname, sum(o.totalamount) as totalamount from customers c inner join orders o on o.customerid = c.customerid group by c.customerid, c.firstname, c.lastname')
response = self.cs.post('/get_view_definition_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_view": "vw_omnidb_test"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE VIEW public.vw_omnidb_test AS
SELECT c.customerid,
c.firstname,
c.lastname,
sum(o.totalamount) AS totalamount
FROM (customers c
JOIN orders o ON ((o.customerid = c.customerid)))
GROUP BY c.customerid, c.firstname, c.lastname''' in data['v_data']
self.database.v_connection.Execute('drop view vw_omnidb_test')
def test_get_databases_postgresql_nosession(self):
response = self.cn.post('/get_databases_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_databases_postgresql_session(self):
response = self.cs.post('/get_databases_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.service in [a['v_name'] for a in data['v_data']]
def test_get_tablespaces_postgresql_nosession(self):
response = self.cn.post('/get_tablespaces_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_tablespaces_postgresql_session(self):
response = self.cs.post('/get_tablespaces_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 'pg_default' in [a['v_name'] for a in data['v_data']]
def test_get_roles_postgresql_nosession(self):
response = self.cn.post('/get_roles_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_roles_postgresql_session(self):
response = self.cs.post('/get_roles_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.role in [a['v_name'] for a in data['v_data']]
def test_get_checks_postgresql_nosession(self):
response = self.cn.post('/get_checks_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_checks_postgresql_session(self):
self.database.v_connection.Execute("alter table public.categories add constraint ch_test check ( position(' ' in categoryname) = 0 )")
response = self.cs.post('/get_checks_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['ch_test'])
self.database.v_connection.Execute('alter table public.categories drop constraint ch_test')
def test_get_excludes_postgresql_nosession(self):
response = self.cn.post('/get_excludes_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_excludes_postgresql_session(self):
self.database.v_connection.Execute('alter table public.categories add constraint ex_test exclude (categoryname with = )')
response = self.cs.post('/get_excludes_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['ex_test'])
self.database.v_connection.Execute('alter table public.categories drop constraint ex_test')
def test_get_rules_postgresql_nosession(self):
response = self.cn.post('/get_rules_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_rules_postgresql_session(self):
self.database.v_connection.Execute('create rule ru_test as on delete to public.categories do instead nothing')
response = self.cs.post('/get_rules_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['ru_test'])
self.database.v_connection.Execute('drop rule ru_test on public.categories')
def test_get_rule_definition_postgresql_nosession(self):
response = self.cn.post('/get_rule_definition_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_rule_definition_postgresql_session(self):
self.database.v_connection.Execute('create rule ru_test as on delete to public.categories do instead nothing')
response = self.cs.post('/get_rule_definition_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories", "p_rule": "ru_test"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE RULE ru_test AS
ON DELETE TO public.categories DO INSTEAD NOTHING;''' in data['v_data']
self.database.v_connection.Execute('drop rule ru_test on public.categories')
def test_get_triggerfunctions_postgresql_nosession(self):
response = self.cn.post('/get_triggerfunctions_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_triggerfunctions_postgresql_session(self):
self.database.v_connection.Execute("create or replace function public.tg_ins_category() returns trigger language plpgsql as $function$begin new.categoryname := old.categoryname || ' modified'; end;$function$")
response = self.cs.post('/get_triggerfunctions_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['tg_ins_category'])
self.database.v_connection.Execute('drop function tg_ins_category()')
def test_get_triggerfunction_definition_postgresql_nosession(self):
response = self.cn.post('/get_triggerfunction_definition_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_triggerfunction_definition_postgresql_session(self):
self.database.v_connection.Execute("create or replace function public.tg_ins_category() returns trigger language plpgsql as $function$begin new.categoryname := old.categoryname || ' modified'; end;$function$")
response = self.cs.post('/get_triggerfunction_definition_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_function": "public.tg_ins_category()"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''CREATE OR REPLACE FUNCTION public.tg_ins_category()
RETURNS trigger
LANGUAGE plpgsql
AS $function$begin new.categoryname := old.categoryname || ' modified'; end;$function$''' in data['v_data']
self.database.v_connection.Execute('drop function tg_ins_category()')
def test_get_triggers_postgresql_nosession(self):
response = self.cn.post('/get_triggers_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_triggers_postgresql_session(self):
self.database.v_connection.Execute("create or replace function public.tg_ins_category() returns trigger language plpgsql as $function$begin new.categoryname := old.categoryname || ' modified'; end;$function$")
self.database.v_connection.Execute('create trigger tg_ins before insert on public.categories for each statement execute procedure public.tg_ins_category()')
response = self.cs.post('/get_triggers_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['tg_ins'])
self.database.v_connection.Execute('drop trigger tg_ins on public.categories')
self.database.v_connection.Execute('drop function public.tg_ins_category()')
def test_get_inheriteds_postgresql_nosession(self):
response = self.cn.post('/get_inheriteds_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_inheriteds_postgresql_session(self):
self.database.v_connection.Execute('create table public.categories_p1 (check ( category < 100 )) inherits (public.categories)')
response = self.cs.post('/get_inheriteds_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "categories"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a[0] for a in data['v_data']], ['public.categories_p1'])
self.database.v_connection.Execute('alter table public.categories_p1 no inherit public.categories')
self.database.v_connection.Execute('drop table public.categories_p1')
def test_get_mviews_postgresql_nosession(self):
response = self.cn.post('/get_mviews_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_mviews_postgresql_session(self):
self.database.v_connection.Execute('create materialized view public.mvw_omnidb_test as select c.customerid, c.firstname, c.lastname, sum(o.totalamount) as totalamount from customers c inner join orders o on o.customerid = c.customerid group by c.customerid, c.firstname, c.lastname')
response = self.cs.post('/get_mviews_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['mvw_omnidb_test'])
self.database.v_connection.Execute('drop materialized view public.mvw_omnidb_test')
def test_get_mviews_columns_postgresql_nosession(self):
response = self.cn.post('/get_mviews_columns_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_mviews_columns_postgresql_session(self):
self.database.v_connection.Execute('create materialized view public.mvw_omnidb_test as select c.customerid, c.firstname, c.lastname, sum(o.totalamount) as totalamount from customers c inner join orders o on o.customerid = c.customerid group by c.customerid, c.firstname, c.lastname')
response = self.cs.post('/get_mviews_columns_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_table": "mvw_omnidb_test"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_column_name'] for a in data['v_data']], [
'customerid',
'firstname',
'lastname',
'totalamount'
])
self.database.v_connection.Execute('drop materialized view public.mvw_omnidb_test')
def test_get_mview_definition_postgresql_nosession(self):
response = self.cn.post('/get_mview_definition_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_mview_definition_postgresql_session(self):
self.database.v_connection.Execute('create materialized view public.mvw_omnidb_test as select c.customerid, c.firstname, c.lastname, sum(o.totalamount) as totalamount from customers c inner join orders o on o.customerid = c.customerid group by c.customerid, c.firstname, c.lastname')
response = self.cs.post('/get_mview_definition_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0, "p_schema": "public", "p_view": "mvw_omnidb_test"}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert '''DROP MATERIALIZED VIEW public.mvw_omnidb_test;
CREATE MATERIALIZED VIEW public.mvw_omnidb_test AS
SELECT c.customerid,
c.firstname,
c.lastname,
sum(o.totalamount) AS totalamount
FROM (customers c
JOIN orders o ON ((o.customerid = c.customerid)))
GROUP BY c.customerid, c.firstname, c.lastname;
''' in data['v_data']
self.database.v_connection.Execute('drop materialized view public.mvw_omnidb_test')
def test_get_extensions_postgresql_nosession(self):
response = self.cn.post('/get_extensions_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_extensions_postgresql_session(self):
response = self.cs.post('/get_extensions_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['plpgsql'])
def test_get_physicalreplicationslots_postgresql_nosession(self):
response = self.cn.post('/get_physicalreplicationslots_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_physicalreplicationslots_postgresql_session(self):
self.database.v_connection.Execute("select * from pg_create_physical_replication_slot('test_slot')")
response = self.cs.post('/get_physicalreplicationslots_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['test_slot'])
self.database.v_connection.Execute("select pg_drop_replication_slot('test_slot')")
def test_get_logicalreplicationslots_postgresql_nosession(self):
response = self.cn.post('/get_logicalreplicationslots_postgresql/')
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert 1 == data['v_error_id']
def test_get_logicalreplicationslots_postgresql_session(self):
self.database.v_connection.Execute("select * from pg_create_logical_replication_slot('test_slot', 'test_decoding')")
response = self.cs.post('/get_logicalreplicationslots_postgresql/', {'data': '{"p_database_index": 0, "p_tab_id": 0}'})
assert 200 == response.status_code
data = json.loads(response.content.decode())
assert self.lists_equal([a['v_name'] for a in data['v_data']], ['test_slot'])
self.database.v_connection.Execute("select pg_drop_replication_slot('test_slot')")
|
tools/CleanJournalFiles.py | talrasha/Dynamo | 1,202 | 11082582 | <filename>tools/CleanJournalFiles.py<gh_stars>1000+
import os
import fnmatch
import string
def main():
testsPath = os.path.join(os.path.dirname(__file__), "..\\test\\revit")
# find all files to be cleaned
journals = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(testsPath)
for f in fnmatch.filter(files, '*.txt')]
for journal in journals:
# generate a new file name
new_journal = journal.replace('.txt', '_clean.TEST')
with open(new_journal, 'w') as j_new:
with open(journal, 'r') as j:
lines = j.readlines()
start = False
lastline = '_'
for line in lines:
if "Dim Jrn" in line:
start = True
if start is not True:
#write all lines until we
#start processing
j_new.write(line)
continue;
if line == "":
continue
# remove leading whitespace
new_line = line.lstrip()
# remove trailing underscore
# new_line = new_line.rstrip('_')
# remove the line feed
# new_line = new_line.rstrip('\n')
if new_line == "":
continue
if new_line[0] != "'":
j_new.write(new_line)
lastline = line
j.closed
j_new.closed
if __name__ == "__main__":
main() |
pinax/blog/tests/tests.py | shadytradesman/pinax-blog | 315 | 11082598 | from __future__ import absolute_import
import random
from django.core.exceptions import ValidationError
from django.http.request import HttpRequest
from django.urls import reverse
from django.utils.text import slugify
from test_plus import TestCase
from ..context_processors import scoped
from ..models import Blog, Post, Section
ascii_lowercase = "abcdefghijklmnopqrstuvwxyz"
def randomword(length):
return "".join(random.choice(ascii_lowercase) for i in range(length))
class TestBlog(TestCase):
def setUp(self):
"""
Create default Sections and Posts.
"""
self.blog = Blog.objects.first()
self.apples = Section.objects.create(name="Apples", slug="apples")
self.oranges = Section.objects.create(name="Oranges", slug="oranges")
self.user = self.make_user("patrick")
self.markup = "markdown"
# Create two published Posts, one in each section.
self.orange_title = "Orange You Wonderful"
self.orange_slug = slugify(self.orange_title)
self.orange_post = Post.objects.create(blog=self.blog,
section=self.oranges,
title=self.orange_title,
slug=self.orange_slug,
author=self.user,
markup=self.markup,
state=Post.STATE_CHOICES[-1][0])
self.apple_title = "Apple of My Eye"
self.apple_slug = slugify(self.apple_title)
self.apple_post = Post.objects.create(blog=self.blog,
section=self.apples,
title=self.apple_title,
slug=self.apple_slug,
author=self.user,
markup=self.markup,
state=Post.STATE_CHOICES[-1][0])
class TestViewGetSection(TestBlog):
def test_invalid_section_slug(self):
"""
Ensure invalid section slugs do not cause site crash.
"""
invalid_slug = "bananas"
url = reverse("pinax_blog:blog_section", kwargs={"section": invalid_slug})
try:
response = self.get(url)
except Section.DoesNotExist:
self.fail("section '{}' does not exist".format(invalid_slug))
self.assertEqual(response.status_code, 404)
def test_valid_section_slug(self):
"""
Verify that existing section slug works fine
"""
valid_slug = "oranges"
url = reverse("pinax_blog:blog_section", kwargs={"section": valid_slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestViewGetPosts(TestBlog):
def test_section_posts(self):
"""
Verify only the expected Post is in context for section "orange".
"""
url = reverse("pinax_blog:blog_section", kwargs={"section": "oranges"})
response = self.client.get(url)
self.assertIn(self.orange_post, response.context_data["post_list"])
self.assertNotIn(self.apple_post, response.context_data["post_list"])
def test_all_posts(self):
"""
Verify all Posts are in context for All.
"""
url = reverse("pinax_blog:blog")
response = self.client.get(url)
self.assertEqual(response.context_data["post_list"].count(), 2)
self.assertIn(self.orange_post, response.context_data["post_list"])
self.assertIn(self.apple_post, response.context_data["post_list"])
class TestModelFieldValidation(TestBlog):
def test_overlong_slug(self):
title_len = Post._meta.get_field("title").max_length
title = randomword(title_len)
slug_len = Post._meta.get_field("slug").max_length
slug = randomword(slug_len + 1)
slug_post = Post(blog=self.blog,
section=self.apples,
title=title,
slug=slug,
author=self.user,
state=Post.STATE_CHOICES[-1][0])
with self.assertRaises(ValidationError) as context_manager:
slug_post.save()
the_exception = context_manager.exception
self.assertIn(
"Ensure this value has at most {} characters (it has {})."
.format(slug_len, len(slug)),
the_exception.messages
)
class TestContextProcessors(TestBlog):
def test_no_resolver_match(self):
"""
Ensure no problem when `request.resolver_match` is None
"""
request = HttpRequest()
self.assertEqual(request.resolver_match, None)
result = scoped(request)
self.assertEqual(result, {"scoper_lookup": ""})
class TestViews(TestBlog):
def test_manage_post_create_get(self):
"""
Ensure template with external URL references renders properly
for user with proper credentials.
"""
with self.login(self.user):
response = self.client.get("pinax_blog:manage_post_create")
self.assertEqual(response.status_code, 404)
self.user.is_staff = True
self.user.save()
with self.login(self.user):
self.get("pinax_blog:manage_post_create")
self.response_200()
self.assertTemplateUsed("pinax/blog/manage_post_create")
pinax_images_upload_url = reverse("pinax_images:imageset_new_upload")
self.assertResponseContains(pinax_images_upload_url, html=False)
def test_manage_post_create_post(self):
"""
Ensure template with external URL references renders properly
for user with proper credentials.
"""
self.user.is_staff = True
self.user.save()
post_title = "You'll never believe what happened next!"
post_data = dict(
section=self.apples.pk,
title=post_title,
teaser="teaser",
content="content",
description="description",
state=Post.STATE_CHOICES[-1][0],
)
with self.login(self.user):
self.post("pinax_blog:manage_post_create", data=post_data)
self.assertRedirects(self.last_response, reverse("pinax_blog:manage_post_list"))
self.assertTrue(Post.objects.get(title=post_title))
|
PythonMiddleware/instance.py | chandlerette/Python-Middleware | 101 | 11082627 | <filename>PythonMiddleware/instance.py
import PythonMiddleware as gph
_shared_graphene_instance = None
def shared_graphene_instance():
""" This method will initialize ``_shared_graphene_instance`` and return it.
The purpose of this method is to have offer single default
graphene instance that can be reused by multiple classes.
"""
global _shared_graphene_instance
if not _shared_graphene_instance:
_shared_graphene_instance = gph.Graphene()
return _shared_graphene_instance
#print("dasd")
def set_shared_graphene_instance(graphene_instance):
""" This method allows us to override default graphene instance for all users of
``_shared_graphene_instance``.
:param graphene.Graphene graphene_instance: Graphene instance
"""
global _shared_graphene_instance
_shared_graphene_instance = graphene_instance
|
tests/test_config.py | deisenroth/prysm | 110 | 11082656 | """Tests verifying the functionality of the global prysm config."""
import pytest
import numpy as np
from prysm.conf import config
PRECISIONS = {
32: np.float32,
64: np.float64,
}
PRECISIONS_COMPLEX = {
32: np.complex64,
64: np.complex128
}
@pytest.mark.parametrize('precision', [32, 64])
def test_set_precision(precision):
config.precision = precision
assert config.precision == PRECISIONS[precision]
assert config.precision_complex == PRECISIONS_COMPLEX[precision]
def test_rejects_bad_precision():
with pytest.raises(ValueError):
config.precision = 1
|
aiosocks/__init__.py | nibrag/aiosocks | 126 | 11082683 | import asyncio
from .errors import (
SocksError, NoAcceptableAuthMethods, LoginAuthenticationFailed,
SocksConnectionError, InvalidServerReply, InvalidServerVersion
)
from .helpers import (
SocksAddr, Socks4Addr, Socks5Addr, Socks4Auth, Socks5Auth
)
from .protocols import Socks4Protocol, Socks5Protocol, DEFAULT_LIMIT
__version__ = '0.2.6'
__all__ = ('Socks4Protocol', 'Socks5Protocol', 'Socks4Auth',
'Socks5Auth', 'Socks4Addr', 'Socks5Addr', 'SocksError',
'NoAcceptableAuthMethods', 'LoginAuthenticationFailed',
'SocksConnectionError', 'InvalidServerVersion',
'InvalidServerReply', 'create_connection', 'open_connection')
async def create_connection(protocol_factory, proxy, proxy_auth, dst, *,
remote_resolve=True, loop=None, ssl=None, family=0,
proto=0, flags=0, sock=None, local_addr=None,
server_hostname=None, reader_limit=DEFAULT_LIMIT):
assert isinstance(proxy, SocksAddr), (
'proxy must be Socks4Addr() or Socks5Addr() tuple'
)
assert proxy_auth is None or isinstance(proxy_auth,
(Socks4Auth, Socks5Auth)), (
'proxy_auth must be None or Socks4Auth() '
'or Socks5Auth() tuple', proxy_auth
)
assert isinstance(dst, (tuple, list)) and len(dst) == 2, (
'invalid dst format, tuple("dst_host", dst_port))'
)
if (isinstance(proxy, Socks4Addr) and not
(proxy_auth is None or isinstance(proxy_auth, Socks4Auth))):
raise ValueError(
"proxy is Socks4Addr but proxy_auth is not Socks4Auth"
)
if (isinstance(proxy, Socks5Addr) and not
(proxy_auth is None or isinstance(proxy_auth, Socks5Auth))):
raise ValueError(
"proxy is Socks5Addr but proxy_auth is not Socks5Auth"
)
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# read details: asyncio.create_connection
server_hostname = dst[0]
loop = loop or asyncio.get_event_loop()
waiter = asyncio.Future(loop=loop)
def socks_factory():
if isinstance(proxy, Socks4Addr):
socks_proto = Socks4Protocol
else:
socks_proto = Socks5Protocol
return socks_proto(proxy=proxy, proxy_auth=proxy_auth, dst=dst,
app_protocol_factory=protocol_factory,
waiter=waiter, remote_resolve=remote_resolve,
loop=loop, ssl=ssl, server_hostname=server_hostname,
reader_limit=reader_limit)
try:
transport, protocol = await loop.create_connection(
socks_factory, proxy.host, proxy.port, family=family,
proto=proto, flags=flags, sock=sock, local_addr=local_addr)
except OSError as exc:
raise SocksConnectionError(
'[Errno %s] Can not connect to proxy %s:%d [%s]' %
(exc.errno, proxy.host, proxy.port, exc.strerror)) from exc
try:
await waiter
except: # noqa
transport.close()
raise
return protocol.app_transport, protocol.app_protocol
async def open_connection(proxy, proxy_auth, dst, *, remote_resolve=True,
loop=None, limit=DEFAULT_LIMIT, **kwds):
_, protocol = await create_connection(
None, proxy, proxy_auth, dst, reader_limit=limit,
remote_resolve=remote_resolve, loop=loop, **kwds)
return protocol.reader, protocol.writer
|
bsuite/bsuite/logging/csv_load.py | hbutsuak95/iv_rl | 1,337 | 11082684 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Read functionality for local csv-based experiments."""
import glob
import os
from typing import List, Tuple
from bsuite import sweep
from bsuite.logging import csv_logging
from bsuite.logging import logging_utils
import pandas as pd
def load_one_result_set(results_dir: str) -> pd.DataFrame:
"""Returns a pandas DataFrame of bsuite results stored in results_dir."""
data = []
for file_path in glob.glob(os.path.join(results_dir, '*.csv')):
_, name = os.path.split(file_path)
# Rough and ready error-checking for only bsuite csv files.
if not name.startswith(csv_logging.BSUITE_PREFIX):
print('Warning - we recommend you use a fresh folder for bsuite results.')
continue
# Then we will assume that the file is actually a bsuite file
df = pd.read_csv(file_path)
file_bsuite_id = name.strip('.csv').split(csv_logging.INITIAL_SEPARATOR)[1]
bsuite_id = file_bsuite_id.replace(csv_logging.SAFE_SEPARATOR,
sweep.SEPARATOR)
df['bsuite_id'] = bsuite_id
df['results_dir'] = results_dir
data.append(df)
df = pd.concat(data, sort=False)
return logging_utils.join_metadata(df)
def load_bsuite(
results_dirs: logging_utils.PathCollection
) -> Tuple[pd.DataFrame, List[str]]:
"""Returns a pandas DataFrame of bsuite results."""
return logging_utils.load_multiple_runs(
path_collection=results_dirs,
single_load_fn=load_one_result_set,
)
|
sceptre/stack_status.py | dennisconrad/sceptre | 493 | 11082692 | # -*- coding: utf-8 -*-
"""
sceptre.stack_status
This module implemets structs for simplified Stack status and simplified
ChangeSet status values.
"""
class StackStatus(object):
"""
StackStatus stores simplified Stack statuses.
"""
COMPLETE = "complete"
FAILED = "failed"
IN_PROGRESS = "in progress"
PENDING = "pending"
class StackChangeSetStatus(object):
"""
StackChangeSetStatus stores simplified ChangeSet statuses.
"""
PENDING = "pending"
READY = "ready"
DEFUNCT = "defunct"
|
strategies/gekko-japonicus-master/promoterz/sequence/__init__.py | tobby2002/tradyai-api | 229 | 11082695 | #!/bin/python
from .locale import standard_loop
from .world import parallel_world
|
sdk/python/pulumi_gcp/monitoring/get_uptime_check_i_ps.py | sisisin/pulumi-gcp | 121 | 11082706 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetUptimeCheckIPsResult',
'AwaitableGetUptimeCheckIPsResult',
'get_uptime_check_i_ps',
]
@pulumi.output_type
class GetUptimeCheckIPsResult:
"""
A collection of values returned by getUptimeCheckIPs.
"""
def __init__(__self__, id=None, uptime_check_ips=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if uptime_check_ips and not isinstance(uptime_check_ips, list):
raise TypeError("Expected argument 'uptime_check_ips' to be a list")
pulumi.set(__self__, "uptime_check_ips", uptime_check_ips)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="uptimeCheckIps")
def uptime_check_ips(self) -> Sequence['outputs.GetUptimeCheckIPsUptimeCheckIpResult']:
"""
A list of uptime check IPs used by Stackdriver Monitoring. Each `uptime_check_ip` contains:
"""
return pulumi.get(self, "uptime_check_ips")
class AwaitableGetUptimeCheckIPsResult(GetUptimeCheckIPsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUptimeCheckIPsResult(
id=self.id,
uptime_check_ips=self.uptime_check_ips)
def get_uptime_check_i_ps(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUptimeCheckIPsResult:
"""
Returns the list of IP addresses that checkers run from. For more information see
the [official documentation](https://cloud.google.com/monitoring/uptime-checks#get-ips).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
ips = gcp.monitoring.get_uptime_check_i_ps()
pulumi.export("ipList", ips.uptime_check_ips)
```
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:monitoring/getUptimeCheckIPs:getUptimeCheckIPs', __args__, opts=opts, typ=GetUptimeCheckIPsResult).value
return AwaitableGetUptimeCheckIPsResult(
id=__ret__.id,
uptime_check_ips=__ret__.uptime_check_ips)
|
src/utils/casino/table/holdem/numberlizeCards.py | lin483/Funny-Nations | 126 | 11082715 | <gh_stars>100-1000
from typing import List
import copy
from src.utils.poker.Card import Card
import src.utils.casino.table.holdem.checkHandValue as checkHandValue
from src.utils.casino.table.holdem.sortCards import sortCards
"""
For numberlization of cards
There are 11 digit
10101010101
^
First digit represent hand value
0 for high card
1 for pair
2 for two pair
3 for three of a kind
4 for straight
5 for flush
6 for full house
7 for four of a kind
8 for straight flush
9 for Royal flush
10101010101
^^
Second and third digit represent first card rank value
10101010101
^^
4th and 5th digit represent second card rank value
10101010101
^^
6th and 7th digit represent third card rank value
10101010101
^^
8th and 9th digit represent 4th card rank value
10101010101
^^
9th and 10th digit represent 5th card rank value
"""
def numberlizeCards(cards: List[Card]):
sortCards(cards)
handValue = getHandValue(cards)
numberlizedCards = 0
times = 1
for i in range(0, 5):
numberlizedCards += (getCardValue(cards[i]) * times)
times *= 100
numberlizedCards += (handValue * times)
return numberlizedCards
def getCardValue(card: Card):
if card.rank == 1:
return 14
return card.rank
def getHandValue(cards: List[Card]) -> int:
if checkHandValue.isRoyalFlush(cards):
return 9
if checkHandValue.isStraightFlush(cards):
return 8
if checkHandValue.isFourOfAKind(cards):
return 7
if checkHandValue.isFullHouse(cards):
return 6
if checkHandValue.isFlush(cards):
return 5
if checkHandValue.isStraight(cards):
return 4
if checkHandValue.isThreeOfAKind(cards):
return 3
if checkHandValue.isTwoPair(cards):
return 2
if checkHandValue.isPair(cards):
return 1
return 0
|
tests/r/test_kakadu.py | hajime9652/observations | 199 | 11082728 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.kakadu import kakadu
def test_kakadu():
"""Test module kakadu.py by downloading
kakadu.csv and testing shape of
extracted data has 1827 rows and 22 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = kakadu(test_path)
try:
assert x_train.shape == (1827, 22)
except:
shutil.rmtree(test_path)
raise()
|
examples/listener.py | BradleyKirton/circus | 820 | 11082729 | from circus.consumer import CircusConsumer
import json
ZMQ_ENDPOINT = 'tcp://127.0.0.1:5556'
topic = 'show:'
for message, message_topic in CircusConsumer(topic, endpoint=ZMQ_ENDPOINT):
response = json.dumps(dict(message=message, topic=message_topic))
print(response)
|
setup.py | SimlaBurcu/QPyTorch | 172 | 11082738 | <gh_stars>100-1000
from setuptools import setup, find_packages
try:
import torch
has_dev_pytorch = "dev" in torch.__version__
except ImportError:
has_dev_pytorch = False
# Base equirements
install_requires = [
"torch>=1.5.0",
]
if has_dev_pytorch: # Remove the PyTorch requirement
install_requires = [
install_require for install_require in install_requires if "torch" != re.split(r"(=|<|>)", install_require)[0]
]
setup(
name="qtorch",
version="0.2.0",
description="Low-Precision Arithmetic Simulation in Pytorch",
long_description=open("README.md").read(),
author="<NAME>, <NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
project_urls={
"Documentation": "https://qpytorch.readthedocs.io",
"Source": "https://github.com/Tiiiger/QPyTorch/graphs/contributors",
},
packages=find_packages(),
include_package_data=True,
license="MIT",
python_requires=">=3.6",
install_requires=install_requires,
)
|
software/glasgow/arch/mips/core.py | electroniceel/Glasgow | 1,014 | 11082771 | # Ref: MIPS® Architecture For Programmers Vol. III: MIPS32® / microMIPS32™ Privileged Resource Architecture
# Document Number: MD00090 Revision 6.02
# Accession: G00020
from collections import defaultdict
from ...support.bitstruct import *
__all__ = [
# Address space
"KUSEG_addr", "KSEG0_addr", "KSEG1_addr", "KSEG2_addr", "KSEG3_addr", "KSEGx_mask",
# CP0
"CP0_BadVAddr_addr", "CP0_SR_addr", "CP0_Cause_addr", "CP0_Config_addr", "CP0_Config1_addr",
"CP0_Config2_addr", "CP0_Config3_addr", "CP0_Debug_addr", "CP0_Debug2_addr", "CP0_DEPC_addr",
"CP0_DESAVE_addr",
"CP0_Config", "CP0_Config_Kx_values", "CP0_Config_MT_values", "CP0_Config_AR_values",
"CP0_Config_AT_values", "CP0_Config_BE_values",
"CP0_Config1",
"CP0_Debug", "CP0_Debug2",
]
# Address space
KUSEG_addr = 0x0000_0000_0000_0000
KSEG0_addr = 0xffff_ffff_8000_0000
KSEG1_addr = 0xffff_ffff_a000_0000
KSEG2_addr = 0xffff_ffff_c000_0000
KSEG3_addr = 0xffff_ffff_e000_0000
KSEGx_mask = 0xffff_ffff_e000_0000
# CP0 addresses
CP0_BadVAddr_addr = ( 8, 0)
CP0_SR_addr = (12, 0)
CP0_Cause_addr = (13, 0)
CP0_Config_addr = (16, 0)
CP0_Config1_addr = (16, 1)
CP0_Config2_addr = (16, 2)
CP0_Config3_addr = (16, 3)
CP0_Debug_addr = (23, 0)
CP0_Debug2_addr = (23, 6)
CP0_DEPC_addr = (24, 0)
CP0_DESAVE_addr = (31, 0)
# CP0 Config layout
CP0_Config = bitstruct("CP0_Config", 32, [
("K0", 3),
(None, 4),
("MT", 3),
("AR", 3),
("AT", 2),
("BE", 1),
(None, 9),
("KU", 3),
("K23", 3),
("M", 1),
])
CP0_Config_Kx_values = defaultdict(lambda: "unknown", {
# Values 0/1 not defined in MIPS reference, but seem consistent among vendors
0: "write-through write-no-allocate",
1: "write-through write-allocate",
2: "uncached",
3: "write-back write-allocate",
})
CP0_Config_MT_values = defaultdict(lambda: "unknown", {
0: "absent",
1: "standard TLB",
2: "standard BAT",
3: "standard fixed",
})
CP0_Config_AR_values = defaultdict(lambda: "unknown", {
0: "R1",
1: "R2",
})
CP0_Config_AT_values = defaultdict(lambda: "unknown", {
0: "MIPS32",
1: "MIPS64 32-bit",
2: "MIPS64 64-bit",
})
CP0_Config_BE_values = {
0: "little",
1: "big",
}
# CP0 Config1 layout
CP0_Config1 = bitstruct("CP0_Config1", 32, [
("FP", 1),
("EP", 1),
("CA", 1),
("WR", 1),
("PC", 1),
("MD", 1),
("C2", 1),
("DA", 3),
("DL", 3),
("DS", 3),
("IA", 3),
("IL", 3),
("IS", 3),
("MMUSize_m1", 6),
("M", 1),
])
# CP0 Debug layout
CP0_Debug = bitstruct("CP0_Debug", 32, [
("DSS", 1),
("DBp", 1),
("DDBL", 1),
("DDBS", 1),
("DIB", 1),
("DINT", 1),
("DIBImpr", 1),
("OffLine", 1),
("SSt", 1),
("NoSSt", 1),
("DExcCode", 5),
("EJTAGver", 3),
("DDBLImpr", 1),
("DDBSImpr", 1),
("IEXI", 1),
("DBusEP", 1),
("CacheEP", 1),
("MCheckP", 1),
("IBusEP", 1),
("CountDM", 1),
("Halt", 1),
("Doze", 1),
("LSNM", 1),
("NoDCR", 1),
("DM", 1),
("DBD", 1),
])
# CP0 Debug2 layout
CP0_Debug2 = bitstruct("CP0_Debug2", 32, [
("PaCo", 1),
("Tup", 1),
("DQ", 1),
("Prm", 1),
(None, 28),
])
|
tests/nnapi/specs/V1_1/space_to_batch_float_1_nnfw.mod.py | bogus-sudo/ONE-1 | 255 | 11082799 | batch = 2
rows = 3
cols = 4
depth = 5
block_size_height = 2
block_size_width = 3
padding_size_height_top = 1
padding_size_height_bottom = 2
padding_size_width_left = 3
padding_size_width_right = 2
out_batch = batch * block_size_height * block_size_width
out_rows = (int)((rows + padding_size_height_top + padding_size_height_bottom) / block_size_height)
out_cols = (int)((cols + padding_size_width_left + padding_size_width_right) / block_size_width)
input_table = [x for x in range(batch * rows * cols * depth)]
stride_b_in = rows * cols * depth
stride_h_in = cols * depth
stride_w_in = depth
output_table = [0 for x in range(out_batch * out_rows * out_cols * depth)]
stride_b_out = out_rows * out_cols * depth
stride_h_out = out_cols * depth
stride_w_out = depth
for b in range(batch):
for h in range(rows + padding_size_height_top + padding_size_height_bottom):
for w in range(cols + padding_size_width_left + padding_size_width_right):
for d in range(depth):
out_d = d;
out_h = (int)(h / block_size_height);
out_w = (int)(w / block_size_width);
out_b = b + ((h % block_size_height) * block_size_width + w % block_size_width) * batch;
if ((h >= padding_size_height_top) and (h < (rows + padding_size_height_top)) and (w >= padding_size_width_left) and (w < (cols + padding_size_width_left))):
output_table[out_b * stride_b_out + out_h * stride_h_out + out_w * stride_w_out + out_d] = input_table[b * stride_b_in + (h - padding_size_height_top) * stride_h_in + (w - padding_size_width_left) * stride_w_in + d];
i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
block = Parameter("block_size", "TENSOR_INT32", "{2}", [block_size_height, block_size_width])
paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [padding_size_height_top, padding_size_height_bottom, padding_size_width_left, padding_size_width_right])
output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (out_batch, out_rows, out_cols, depth))
model = Model()
model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
input_table}
output0 = {output: # output 0
output_table}
# Instantiate an example
Example((input0, output0))
|
jactorch/functional/__init__.py | dapatil211/Jacinle | 114 | 11082806 | <reponame>dapatil211/Jacinle<gh_stars>100-1000
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 01/24/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
from .arith import *
from .grad import *
from .indexing import *
from .kernel import *
from .linalg import *
from .loglinear import *
from .mask import *
from .meshgrid import *
from .probability import *
from .quantization import *
from .sample import *
from .shape import *
|
profiles/api/views/user_api_views.py | LaudateCorpus1/squest | 112 | 11082812 | from django.contrib.auth.models import User
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.permissions import IsAdminUser
from profiles.api.serializers.user_serializers import UserSerializer
class UserDetails(RetrieveUpdateDestroyAPIView):
serializer_class = UserSerializer
permission_classes = [IsAdminUser]
queryset = User.objects.all()
class UserListCreate(ListCreateAPIView):
serializer_class = UserSerializer
permission_classes = [IsAdminUser]
queryset = User.objects.all()
|
src/gtk/toga_gtk/widgets/box.py | luizoti/toga | 1,261 | 11082858 | from ..libs import Gdk, Gtk
from .base import Widget
class TogaBox(Gtk.Fixed):
def __init__(self, impl):
super().__init__()
self._impl = impl
self.interface = self._impl.interface
def do_get_preferred_width(self):
# Calculate the minimum and natural width of the container.
# print("GET PREFERRED WIDTH", self._impl.native)
width = self._impl.interface.layout.width
min_width = 0 if self._impl.min_width is None else self._impl.min_width
for widget in self.get_children():
if (
min_width
<= widget.interface.layout.absolute_content_right
+ widget.interface.style.padding_right
):
min_width = (
widget.interface.layout.absolute_content_right
+ widget.interface.style.padding_right
)
if min_width > width:
width = min_width
return min_width, width
def do_get_preferred_height(self):
# Calculate the minimum and natural height of the container.
# print("GET PREFERRED HEIGHT", self._impl.native)
height = self._impl.interface.layout.height
min_height = 0 if self._impl.min_height is None else self._impl.min_height
for widget in self.get_children():
if (
min_height
<= widget.interface.layout.absolute_content_bottom
+ widget.interface.style.padding_bottom
):
min_height = (
widget.interface.layout.absolute_content_bottom
+ widget.interface.style.padding_bottom
)
if min_height > height:
height = min_height
return min_height, height
def do_size_allocate(self, allocation):
# print(self._impl, "Container layout",
# allocation.width, 'x', allocation.height,
# ' @ ', allocation.x, 'x', allocation.y
# )
if self._impl.viewport is not None:
self.set_allocation(allocation)
self.interface.refresh()
# WARNING! This list of children is *not* the same
# as the list provided by the interface!
# For GTK's layout purposes, all widgets in the tree
# are children of the *container* - that is, the impl
# object of the root object in the tree of widgets.
for widget in self.get_children():
if not widget.get_visible():
# print("CHILD NOT VISIBLE", widget.interface)
pass
else:
# print("update ", widget.interface, widget.interface.layout)
widget.interface._impl.rehint()
widget_allocation = Gdk.Rectangle()
widget_allocation.x = widget.interface.layout.absolute_content_left + allocation.x
widget_allocation.y = widget.interface.layout.absolute_content_top + allocation.y
widget_allocation.width = widget.interface.layout.content_width
widget_allocation.height = widget.interface.layout.content_height
widget.size_allocate(widget_allocation)
class Box(Widget):
def create(self):
self.min_width = None
self.min_height = None
self.native = TogaBox(self)
|
tools/gitallsecrets/thog/setup.py | ismailbozkurt/kubebot | 171 | 11082899 | from setuptools import setup
setup(
name='truffleHog',
version='1.0.2',
description='Searches through git repositories for high entropy strings, digging deep into commit history.',
url='https://github.com/dxa4481/truffleHog',
author='<NAME>',
author_email='<EMAIL>',
license='GNU',
packages =['truffleHog'],
install_requires=[
'GitPython == 2.1.1'
],
entry_points = {
'console_scripts': ['trufflehog = truffleHog.truffleHog:main'],
},
)
|
dl_lib/network/head/centernet_head.py | harrylin-hyl/CenterNet-better | 543 | 11082916 | <filename>dl_lib/network/head/centernet_head.py
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
class SingleHead(nn.Module):
def __init__(self, in_channel, out_channel, bias_fill=False, bias_value=0):
super(SingleHead, self).__init__()
self.feat_conv = nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1)
self.relu = nn.ReLU()
self.out_conv = nn.Conv2d(in_channel, out_channel, kernel_size=1)
if bias_fill:
self.out_conv.bias.data.fill_(bias_value)
def forward(self, x):
x = self.feat_conv(x)
x = self.relu(x)
x = self.out_conv(x)
return x
class CenternetHead(nn.Module):
"""
The head used in CenterNet for object classification and box regression.
It has three subnet, with a common structure but separate parameters.
"""
def __init__(self, cfg):
super(CenternetHead, self).__init__()
self.cls_head = SingleHead(
64,
cfg.MODEL.CENTERNET.NUM_CLASSES,
bias_fill=True,
bias_value=cfg.MODEL.CENTERNET.BIAS_VALUE,
)
self.wh_head = SingleHead(64, 2)
self.reg_head = SingleHead(64, 2)
def forward(self, x):
cls = self.cls_head(x)
cls = torch.sigmoid(cls)
wh = self.wh_head(x)
reg = self.reg_head(x)
pred = {
'cls': cls,
'wh': wh,
'reg': reg
}
return pred
|
boto3_type_annotations/boto3_type_annotations/cloud9/paginator.py | cowboygneox/boto3_type_annotations | 119 | 11082943 | from typing import Dict
from typing import List
from botocore.paginate import Paginator
class DescribeEnvironmentMemberships(Paginator):
def paginate(self, userArn: str = None, environmentId: str = None, permissions: List = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListEnvironments(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
|
monk/tf_keras_1/schedulers/common.py | take2rohit/monk_v1 | 542 | 11082955 | <gh_stars>100-1000
from monk.tf_keras_1.schedulers.imports import *
from monk.system.imports import *
# Code inspired from: https://www.pyimagesearch.com/2019/07/22/keras-learning-rate-schedules-and-decay/
#Level - 1 - Helper classes - Keras
class LearningRateDecay:
def plot(self, epochs, title="Learning Rate Schedule"):
# compute the set of learning rates for each corresponding
# epoch
lrs = [self(i) for i in epochs]
# the learning rate schedule
plt.style.use("ggplot")
plt.figure()
plt.plot(epochs, lrs)
plt.title(title)
plt.xlabel("Epoch #")
plt.ylabel("Learning Rate")
class StepDecay(LearningRateDecay):
def __init__(self, initAlpha=0.01, factor=0.25, dropEvery=10):
# store the base initial learning rate, drop factor, and
# epochs to drop every
self.initAlpha = initAlpha
self.factor = factor
self.dropEvery = dropEvery
def __call__(self, epoch):
# compute the learning rate for the current epoch
exp = np.floor((1 + epoch) / self.dropEvery)
alpha = self.initAlpha * (self.factor ** exp)
# return the learning rate
print("New Lr: ", float(alpha))
return float(alpha)
class PolynomialDecay(LearningRateDecay):
def __init__(self, maxEpochs=100, initAlpha=0.01, power=1.0):
# store the maximum number of epochs, base learning rate,
# and power of the polynomial
self.maxEpochs = maxEpochs
self.initAlpha = initAlpha
self.power = power
def __call__(self, epoch):
# compute the new learning rate based on polynomial decay
decay = (1 - (epoch / float(self.maxEpochs))) ** self.power
alpha = self.initAlpha * decay
# return the learning rate
print("New Lr: ", float(alpha))
return float(alpha)
|
third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py | zealoussnow/chromium | 14,668 | 11082967 | <reponame>zealoussnow/chromium
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
import unittest
from blinkpy.common import path_finder
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.controllers import web_test_finder
from blinkpy.web_tests.models import test_expectations
import mock
class WebTestFinderTests(unittest.TestCase):
def test_skip_tests_expectations(self):
"""Tests that tests are skipped based on to expectations and options."""
host = MockHost()
port = host.port_factory.get('test-win-win7', None)
all_tests = [
'fast/css/passes.html',
'fast/css/fails.html',
'fast/css/times_out.html',
'fast/css/skip.html',
]
# Patch port.tests() to return our tests
port.tests = lambda paths: paths or all_tests
options = optparse.Values({
'no_expectations': False,
'enable_sanitizer': False,
'skipped': 'default',
'skip_timeouts': False,
'skip_failing_tests': False,
})
finder = web_test_finder.WebTestFinder(port, options)
expectations = test_expectations.TestExpectations(port)
expectations.merge_raw_expectations(
('# results: [ Failure Timeout Skip ]'
'\nfast/css/fails.html [ Failure ]'
'\nfast/css/times_out.html [ Timeout ]'
'\nfast/css/skip.html [ Skip ]'))
# When run with default settings, we only skip the tests marked Skip.
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(tests, set(['fast/css/skip.html']))
# Specify test on the command line; by default should not skip.
tests = finder.skip_tests(['fast/css/skip.html'], all_tests,
expectations)
self.assertEqual(tests, set())
# Specify test on the command line, but always skip.
finder._options.skipped = 'always'
tests = finder.skip_tests(['fast/css/skip.html'], all_tests,
expectations)
self.assertEqual(tests, set(['fast/css/skip.html']))
finder._options.skipped = 'default'
# Only run skip tests, aka skip all non-skipped tests.
finder._options.skipped = 'only'
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(
tests,
set([
'fast/css/passes.html', 'fast/css/fails.html',
'fast/css/times_out.html'
]))
finder._options.skipped = 'default'
# Ignore any skip entries, aka never skip anything.
finder._options.skipped = 'ignore'
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(tests, set())
finder._options.skipped = 'default'
# Skip tests that are marked TIMEOUT.
finder._options.skip_timeouts = True
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(
tests, set(['fast/css/times_out.html', 'fast/css/skip.html']))
finder._options.skip_timeouts = False
# Skip tests that are marked FAILURE
finder._options.skip_failing_tests = True
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(tests,
set(['fast/css/fails.html', 'fast/css/skip.html']))
finder._options.skip_failing_tests = False
# Disable expectations entirely; nothing should be skipped by default.
finder._options.no_expectations = True
tests = finder.skip_tests([], all_tests, None)
self.assertEqual(tests, set())
def test_skip_tests_idlharness(self):
"""Tests that idlharness tests are skipped on MSAN/ASAN runs.
See https://crbug.com/856601
"""
host = MockHost()
port = host.port_factory.get('test-win-win7', None)
non_idlharness_test = 'external/wpt/dir1/dir2/foo.html'
idlharness_test_1 = 'external/wpt/dir1/dir2/idlharness.any.html'
idlharness_test_2 = 'external/wpt/dir1/dir2/idlharness.any.worker.html'
all_tests = [
non_idlharness_test,
idlharness_test_1,
idlharness_test_2,
]
# Patch port.tests() to return our tests
port.tests = lambda paths: paths or all_tests
options = optparse.Values({
'no_expectations': False,
'enable_sanitizer': False,
'skipped': 'default',
'skip_timeouts': False,
'skip_failing_tests': False,
})
finder = web_test_finder.WebTestFinder(port, options)
# Default case; not MSAN/ASAN so should not skip anything.
expectations = test_expectations.TestExpectations(port)
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(tests, set())
for test in all_tests:
self.assertTrue(
expectations.get_expectations(test).is_default_pass)
# MSAN/ASAN, with no paths specified explicitly, so should skip both
# idlharness tests.
expectations = test_expectations.TestExpectations(port)
finder._options.enable_sanitizer = True
tests = finder.skip_tests([], all_tests, expectations)
self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2]))
self.assertTrue(
expectations.get_expectations(non_idlharness_test).is_default_pass)
self.assertEquals(
expectations.get_expectations(idlharness_test_1).results, {'SKIP'})
self.assertEquals(
expectations.get_expectations(idlharness_test_2).results, {'SKIP'})
# Disable expectations entirely; we should still skip the idlharness
# tests but shouldn't touch the expectations parameter.
finder._options.no_expectations = True
tests = finder.skip_tests([], all_tests, None)
self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2]))
# MSAN/ASAN, with one of the tests specified explicitly (and
# --skipped=default), so should skip only the unspecified test.
expectations = test_expectations.TestExpectations(port)
tests = finder.skip_tests([idlharness_test_1], all_tests, expectations)
self.assertEqual(tests, set([idlharness_test_2]))
# Although we will run the test because it was specified explicitly, it
# is still *expected* to Skip. This is consistent with how entries in
# TestExpectations work.
self.assertTrue(
expectations.get_expectations(non_idlharness_test).is_default_pass)
self.assertEquals(
expectations.get_expectations(idlharness_test_1).results, {'SKIP'})
self.assertEquals(
expectations.get_expectations(idlharness_test_2).results, {'SKIP'})
def test_find_fastest_tests(self):
host = MockHost()
port = host.port_factory.get('test-win-win7', None)
all_tests = [
'path/test.html',
'new/test.html',
'fast/css/1.html',
'fast/css/2.html',
'fast/css/3.html',
'fast/css/skip1.html',
'fast/css/skip2.html',
'fast/css/skip3.html',
'fast/css/skip4.html',
'fast/css/skip5.html',
]
port.tests = lambda paths: paths or all_tests
finder = web_test_finder.WebTestFinder(port, {})
finder._times_trie = lambda: {
'fast': {
'css': {
'1.html': 1,
'2.html': 2,
'3.html': 3,
'skip1.html': 0,
'skip2.html': 0,
'skip3.html': 0,
'skip4.html': 0,
'skip5.html': 0,
}
},
'path': {
'test.html': 4,
}
}
tests = finder.find_tests(fastest_percentile=50, args=[])
self.assertEqual(
set(tests[1]),
set(['fast/css/1.html', 'fast/css/2.html', 'new/test.html']))
tests = finder.find_tests(
fastest_percentile=50, args=['path/test.html'])
self.assertEqual(
set(tests[1]),
set([
'fast/css/1.html', 'fast/css/2.html', 'path/test.html',
'new/test.html'
]))
tests = finder.find_tests(args=[])
self.assertEqual(tests[1], all_tests)
tests = finder.find_tests(args=['path/test.html'])
self.assertEqual(tests[1], ['path/test.html'])
def test_find_fastest_tests_excludes_deleted_tests(self):
host = MockHost()
port = host.port_factory.get('test-win-win7', None)
all_tests = [
'fast/css/1.html',
'fast/css/2.html',
]
port.tests = lambda paths: paths or all_tests
finder = web_test_finder.WebTestFinder(port, {})
finder._times_trie = lambda: {
'fast': {
'css': {
'1.html': 1,
'2.html': 2,
'non-existant.html': 1,
}
},
}
tests = finder.find_tests(fastest_percentile=90, args=[])
self.assertEqual(set(tests[1]), set(['fast/css/1.html']))
def test_split_chunks(self):
split = web_test_finder.WebTestFinder._split_into_chunks # pylint: disable=protected-access
tests = ['1', '2', '3', '4']
self.assertEqual(['1', '2', '3', '4'], split(tests, 0, 1))
self.assertEqual(['3', '4'], split(tests, 0, 2))
self.assertEqual(['1', '2'], split(tests, 1, 2))
self.assertEqual(['1', '2', '4'], split(tests, 0, 3))
self.assertEqual([], split(tests, 1, 3))
self.assertEqual(['3'], split(tests, 2, 3))
tests = ['1', '2', '3', '4', '5']
self.assertEqual(['1', '2', '3', '4', '5'], split(tests, 0, 1))
self.assertEqual(['3', '4'], split(tests, 0, 2))
self.assertEqual(['1', '2', '5'], split(tests, 1, 2))
self.assertEqual(['1', '2', '4'], split(tests, 0, 3))
self.assertEqual(['5'], split(tests, 1, 3))
self.assertEqual(['3'], split(tests, 2, 3))
tests = ['1', '2', '3', '4', '5', '6']
self.assertEqual(['1', '2', '3', '4', '5', '6'], split(tests, 0, 1))
self.assertEqual(['3', '4'], split(tests, 0, 2))
self.assertEqual(['1', '2', '5', '6'], split(tests, 1, 2))
self.assertEqual(['1', '2', '4'], split(tests, 0, 3))
self.assertEqual(['5', '6'], split(tests, 1, 3))
self.assertEqual(['3'], split(tests, 2, 3))
class FilterTestsTests(unittest.TestCase):
simple_test_list = ['a/a1.html', 'a/a2.html', 'b/b1.html']
def check(self, tests, filters, expected_tests):
self.assertEqual(expected_tests,
web_test_finder.filter_tests(tests, filters))
def test_no_filters(self):
self.check(self.simple_test_list, [], self.simple_test_list)
def test_empty_glob_is_rejected(self):
self.assertRaises(ValueError, self.check, self.simple_test_list,
[['']], [])
self.assertRaises(ValueError, self.check, self.simple_test_list,
[['-']], [])
def test_one_all_positive_filter(self):
self.check(self.simple_test_list, [['a*']], ['a/a1.html', 'a/a2.html'])
self.check(self.simple_test_list, [['a*', 'b*']],
self.simple_test_list)
def test_one_all_negative_filter(self):
self.check(self.simple_test_list, [['-c*']], self.simple_test_list)
def test_one_mixed_filter(self):
self.check(self.simple_test_list, [['a*', '-c*']],
['a/a1.html', 'a/a2.html'])
def test_two_all_positive_filters(self):
self.check(self.simple_test_list, [['a*'], ['b*']], [])
def test_two_all_negative_filters(self):
self.check(self.simple_test_list, [['-a*'], ['-b*']], [])
self.check(self.simple_test_list, [['-a*'], ['-c*']], ['b/b1.html'])
def test_two_mixed_filters(self):
self.check(self.simple_test_list, [['a*'], ['-b*']],
['a/a1.html', 'a/a2.html'])
def test_longest_glob_wins(self):
# These test that if two matching globs are specified as
# part of the same filter expression, the longest matching
# glob wins (takes precedence). The order of the two globs
# must not matter.
self.check(self.simple_test_list, [['a/a*', '-a/a2*']], ['a/a1.html'])
self.check(self.simple_test_list, [['-a/a*', 'a/a2*']], ['a/a2.html'])
# In this test, the positive and negative globs are in
# separate filter expressions, so a2 should be filtered out
# and nothing should run (tests should only be run if they
# would be run by every filter individually).
self.check(self.simple_test_list, [['-a/a*'], ['a/a2*']], [])
def test_only_trailing_globs_work(self):
self.check(self.simple_test_list, [['a*']], ['a/a1.html', 'a/a2.html'])
# These test that if you have a glob that contains a "*" that isn't
# at the end, it is rejected; only globs at the end should work.
self.assertRaises(ValueError, self.check, self.simple_test_list,
[['*1.html']], [])
self.assertRaises(ValueError, self.check, self.simple_test_list,
[['a*.html']], [])
class NegativeFilterTestsNoGlobTests(unittest.TestCase):
simple_test_list = ['a/a1.html', 'a/a2.html', 'b/b1.html']
def check(self, tests, filters, expected_tests):
self.assertEqual(
expected_tests,
web_test_finder.filter_out_exact_negative_matches(tests, filters))
def test_no_filters(self):
self.check(self.simple_test_list, [], self.simple_test_list)
def test_one_all_negative_filter(self):
self.check(self.simple_test_list, ['-' + self.simple_test_list[0]],
self.simple_test_list[1:])
|
tools/render_videos.py | ford442/oglplu2 | 103 | 11082973 | #!/usr/bin/python3
# coding=utf-8
# Copyright <NAME>.
# Distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
#
import os
import re
import sys
import shutil
import subprocess
# ------------------------------------------------------------------------------
# The fallback console GUI
class FallbackUI:
def __init__(self, options):
self.title = options.example_name
def __enter__(self): return self
def __exit__(self, type, value, traceback): pass
# Simple progress class for quick actions
class SimpleProgress:
def __init__(self, title): sys.stdout.write(title + " ... ")
def __enter__(self): return self
def __exit__(self, type, value, traceback):
sys.stdout.write("Done\n")
def simple_action(self, title):
return FallbackUI.SimpleProgress(title)
# Progress class for the framedump action
class FramedumpProgress:
def __init__(self, title): print(title)
def __enter__(self): return self
def __exit__(self, type, value, traceback):
print("Done")
def update(self, frame_no, frame_path):
print("Rendered frame: %d" % frame_no)
def framedump(self, title):
return FallbackUI.FramedumpProgress(title)
# Progress class for the video encoding action
class VideoEncProgress:
def __init__(self, title): print(title)
def __enter__(self): return self
def __exit__(self, type, value, traceback):
print("Done")
def update(self, message):
print(message)
def videoenc(self, title):
return FallbackUI.VideoEncProgress(title)
# ------------------------------------------------------------------------------
# wxPython-based GUI
try:
class wxPyGUI:
import wx, threading
# The main frame of the GUI
class MainFrame(wx.Frame):
def __init__(self, options):
import wx
wx.Frame.__init__(
self,
None,
wx.ID_ANY,
"Rendering video of '%s'" %
options.example_name,
wx.DefaultPosition,
wx.Size(400, 170),
wx.CAPTION | wx.CLIP_CHILDREN
)
border_sizer = wx.BoxSizer(wx.HORIZONTAL)
border_sizer.AddSpacer(8)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(16);
sizer.Add(
wx.StaticText(
self,
wx.ID_ANY,
"Work directory: '%s'" %
options.work_dir
), 0, wx.EXPAND
)
sizer.Add(
wx.StaticText(
self,
wx.ID_ANY,
"Bin directory: '%s'" %
options.bin_dir
), 0, wx.EXPAND
)
sizer.Add(
wx.StaticText(
self,
wx.ID_ANY,
"Example: '%s'" %
options.example
), 0, wx.EXPAND
)
sizer.Add(
wx.StaticText(
self,
wx.ID_ANY,
"Frame size: %dx%d" %
(options.width, options.height)
), 0, wx.EXPAND
)
sizer.AddSpacer(4)
self.gauge = wx.Gauge(self, wx.ID_ANY)
sizer.Add(self.gauge, 0, wx.EXPAND)
sizer.AddSpacer(4)
self.description = wx.StaticText(self)
sizer.Add(self.description, 0, wx.EXPAND)
border_sizer.Add(sizer, 1, wx.EXPAND)
border_sizer.AddSpacer(8)
self.SetSizer(border_sizer)
self.status_bar = wx.StatusBar(self)
self.SetStatusBar(self.status_bar)
self.status_bar.SetStatusText("Starting")
def AcceptProgress(self, progress):
progress.register(self)
# The thread for the GUI
class GUIThread(threading.Thread):
def __init__(self, options):
import wx, threading
threading.Thread.__init__(self)
self.initialized = threading.Event()
self.options = options
self.app = wx.App(False)
def run(self):
import wx
self.main_frame = wxPyGUI.MainFrame(self.options)
self.main_frame.Show()
wx.SafeYield()
self.initialized.set()
self.app.MainLoop()
def set_progress(self, progress):
import wx
self.initialized.wait()
wx.CallAfter(
self.main_frame.AcceptProgress,
progress
)
def finish(self):
import wx
wx.CallAfter(self.main_frame.Destroy)
def __init__(self, options):
self.gui_thread = wxPyGUI.GUIThread(options)
def __enter__(self):
self.gui_thread.start()
return self
def __exit__(self, type, value, traceback):
self.gui_thread.finish()
self.gui_thread.join()
# Simple progress class for quick actions
class SimpleProgress:
def __init__(self, ui, title):
self.title = title
ui.gui_thread.set_progress(self)
def __enter__(self): return self
def __exit__(self, type, value, traceback): pass
def register(self, main_frame):
import wx
wx.CallAfter(main_frame.gauge.Pulse)
wx.CallAfter(
main_frame.status_bar.SetStatusText,
self.title
)
wx.CallAfter(main_frame.gauge.Pulse)
def simple_action(self, title):
return wxPyGUI.SimpleProgress(self, title)
# Progress class for the framedump action
class FramedumpProgress(SimpleProgress):
def __init__(self, ui, title):
self.base = wxPyGUI.SimpleProgress
self.base.__init__(self, ui, title)
def register(self, main_frame):
self.main_frame = main_frame;
self.base.register(self, main_frame)
def update(self, frame_no, frame_path):
import wx
wx.CallAfter(self.main_frame.gauge.Pulse)
wx.CallAfter(
self.main_frame.description.SetLabel,
"Frame number: %d" % frame_no
)
wx.CallAfter(self.main_frame.gauge.Pulse)
def framedump(self, title):
return wxPyGUI.FramedumpProgress(self, title)
# Progress class for the video encoding action
class VideoEncProgress(SimpleProgress):
def __init__(self, ui, title):
self.base = wxPyGUI.SimpleProgress
self.base.__init__(self, ui, title)
def register(self, main_frame):
self.main_frame = main_frame;
self.base.register(self, main_frame)
def update(self, message):
import wx
wx.CallAfter(self.main_frame.gauge.Pulse)
wx.CallAfter(
self.main_frame.description.SetLabel,
message
)
wx.CallAfter(self.main_frame.gauge.Pulse)
def videoenc(self, title):
return wxPyGUI.VideoEncProgress(self, title)
except ImportError: pass
# ------------------------------------------------------------------------------
# Creates a user interface
def create_ui(options):
try:
return wxPyGUI(options)
except NameError:
return FallbackUI(options)
# ------------------------------------------------------------------------------
def remove_dir(work_dir):
shutil.rmtree(work_dir)
# ------------------------------------------------------------------------------
def make_work_dir():
from tempfile import mkdtemp
return mkdtemp()
# ------------------------------------------------------------------------------
# returns a normalized path to the project root directory
def get_root_dir():
import sys
return os.path.normpath(os.path.dirname(sys.argv[0]))
# ------------------------------------------------------------------------------
# returns the path to the default build directory
def get_default_build_dir():
try:
try:
if os.environ['BUILD_DIR']:
return os.path.join(os.environ['BUILD_DIR'], 'oglplu2')
if os.environ['BINARY_DIR']:
return os.path.join(os.environ['BINARY_DIR'], 'oglplu2')
except: pass
with open(os.path.join(get_root_dir(), "BINARY_DIR"), "rt") as bdf:
return bdf.read()
except: return os.path.join(get_root_dir(), "_build");
# ------------------------------------------------------------------------------
def parse_args(args):
import argparse
import datetime
def OutputSizeType(arg):
if re.match("[0-9]+[kMG]?", arg):
return arg
msg = "'%s' is not a valid file size specification" % str(arg)
raise argparse.ArgumentTypeError(msg)
def FrameDimType(arg):
try: dims = [int(dim) for dim in arg.split('x')]
except: dims = list()
def valid_coord(dim):
return isinstance(dim, int) and dim > 0
if (len(dims) == 2) and all(valid_coord(dim) for dim in dims):
return dims
msg = "'%s' is not a valid frame dimension specification" % str(arg)
raise argparse.ArgumentTypeError(msg)
argparser = argparse.ArgumentParser(
prog=os.path.basename(args[0]),
description="""Script for rendering videos from OGLplus examples""",
epilog="""
Copyright (c) 2008 - %(year)d <NAME>.
Permission is granted to copy, distribute and/or modify this document
under the terms of the Boost Software License, Version 1.0.
(See a copy at http://www.boost.org/LICENSE_1_0.txt)
""" % { "year": datetime.datetime.now().year }
)
argparser.add_argument(
"--build-dir",
help="""The name of the build directory""",
default=get_default_build_dir(),
action="store",
dest="build_dir"
)
argparser.add_argument(
"--scale",
help="""
Scaling factor for the render frame.
""",
type=int,
dest="render_scale",
action="store",
default="1"
)
argparser.add_argument(
"--size",
help="""
The dimensions in pixels of the video frame,
specified as WxH where W and H are positive integers.
""",
type=FrameDimType,
dest="frame_size",
action="store",
default="852x480"
)
argparser.add_argument(
"--half-hd",
help="""Sets the dimensions of the output to 640x360.""",
dest="frame_size",
action="store_const",
const=[640,360]
)
argparser.add_argument(
"--hd",
help="""Sets the dimensions of the output to 1280x720.""",
dest="frame_size",
action="store_const",
const=[1280,720]
)
argparser.add_argument(
"--full-hd",
help="""Sets the dimensions of the output to 1920x1080.""",
dest="frame_size",
action="store_const",
const=[1920,1080]
)
argparser.add_argument(
"--fps",
help="""Number of frames per second""",
type=int,
default="30",
action="store",
dest="fps"
)
argparser.add_argument(
"--samples",
help="""Number of multisampling samples""",
type=int,
default="0",
action="store",
dest="samples"
)
argparser.add_argument(
"--max-bytes",
help="""
Maximum output video size in bytes.
""",
type=OutputSizeType,
dest="max_bytes",
action="store",
default=None
)
argparser.add_argument(
"--gif",
help="""Render output into GIF format""",
default=False,
action="store_true",
dest="gif_output"
)
argparser.add_argument(
"--twitter-gif",
help="""Render a GIF for Twitter""",
default=False,
action="store_true",
dest="twitter_gif"
)
argparser.add_argument(
"--sample-label",
help="""Example label string.""",
dest="sample_label",
action="store",
default=None
)
argparser.add_argument(
"--version-label",
help="""Use version number as label string.""",
default=False,
action="store_true",
dest="version_label"
)
argparser.add_argument(
"example_args",
help="""
List of example arguments to use to render.
The example should be specified as path to the example executable
relative to the build directory.
""",
nargs="*"
)
return argparser.parse_args()
# ------------------------------------------------------------------------------
def fix_options(options):
if options.twitter_gif:
options.gif_output = True
options.frame_size[0] = 420
options.frame_size[1] = 240
options.fps = 24
options.render_scale = 2
options.max_bytes = "14500k"
options.width = options.frame_size[0]
options.height= options.frame_size[1]
return options
# ------------------------------------------------------------------------------
# checks if we have everything we need to run the example
def check_example(root_dir, example):
if not os.path.isdir(root_dir):
msg = "Could not find directory '%s'." % root_dir
raise Exception(msg)
example_path = os.path.join(
root_dir,
"example",
"combined",
example,
"combined-%s" % example
)
if not os.path.isfile(example_path) or not os.access(example_path, os.X_OK):
msg = "Could not find example '%s'." % example
raise Exception(msg)
return example_path
# ------------------------------------------------------------------------------
# runs imagemagick convert
def run_convert(work_dir, args):
cmd_line = ['convert'] + args
ret = subprocess.call(cmd_line,cwd=work_dir)
if ret < 0:
raise RuntimeError("Convert killed by signal %d" % -ret)
elif ret > 0:
raise RuntimeError("Convert failed with code %d" % ret)
# ------------------------------------------------------------------------------
# runs the example, dumps the frames, renders the video
def render_video(
example_path,
example_args,
main_label_file,
sample_label_file,
logo_file,
options,
ui
):
prefix = os.path.join(options.work_dir, 'frame')
try:
cmd_line = [example_path] + example_args + [
'--framedump', '%s-'%prefix,
'--width', str(options.width * options.render_scale),
'--height', str(options.height * options.render_scale),
'--fixed-fps', str(options.fps),
]
if options.samples > 0:
cmd_line = cmd_line + [
'--samples', str(options.samples)
]
proc = subprocess.Popen(
cmd_line,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=None
)
proc.stdin.write(('%s-\n' % prefix).encode('utf-8'))
proc.stdin.flush()
with ui.framedump('Rendering frames') as progress:
frame_no = 0
prev_frame_path_pic = str()
while True:
frame_path_raw = proc.stdout.readline()
if not frame_path_raw:
break
proc.stdin.write(frame_path_raw)
proc.stdin.flush()
frame_path_raw = frame_path_raw.decode('utf-8').rstrip()
frame_path_pic = frame_path_raw.replace('.rgba', '.png')
try:
run_convert(options.work_dir, [
'-size', '%dx%d' % (
options.width * options.render_scale,
options.height * options.render_scale
),
'-depth', '8',
frame_path_raw,
'-flip',
'-scale', '%dx%d' % (options.width, options.height),
'-alpha', 'Off',
'-gravity', 'SouthEast',
main_label_file,
'-composite',
'-gravity', 'SouthEast',
sample_label_file,
'-composite',
'-gravity', 'SouthEast',
logo_file,
'-composite',
'-quality', '100',
frame_path_pic
])
except RuntimeError:
shutil.copy2(prev_frame_path_pic, frame_path_pic)
if os.path.isfile(frame_path_raw):
os.remove(frame_path_raw)
prev_frame_path_pic = frame_path_pic
progress.update(frame_no, frame_path_pic)
frame_no += 1
except OSError as os_error:
print("Failed to execute '%(cmd)s': %(error)s" % {
"cmd": str(' ').join(cmd_line),
"error": os_error
})
sys.exit(1)
try:
cmd_line = ['ffmpeg',
'-loglevel', 'error',
'-y', '-f', 'image2',
'-i', prefix+'-%06d.png',
'-r', str(options.fps)
]
if options.max_bytes:
cmd_line += ['-fs', options.max_bytes]
if options.gif_output:
cmd_line += [
prefix+'.gif'
]
else:
cmd_line += [
'-vcodec', 'mpeg4',
'-b', '8000k',
prefix+'.avi'
]
proc = subprocess.Popen(
cmd_line,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
with ui.videoenc('Encoding video') as progress:
while True:
message = proc.stdout.readline().decode('utf-8').rstrip()
if not message: break
progress.update(message)
except OSError as os_error:
print("Failed to execute '%(cmd)s': %(error)s" % {
"cmd": str(' ').join(cmd_line),
"error": os_error
})
sys.exit(2)
if options.gif_output:
shutil.move(prefix+'.gif', 'oglplus-'+options.example_name+'.gif')
else:
shutil.move(prefix+'.avi', 'oglplus-'+options.example_name+'.avi')
# ------------------------------------------------------------------------------
# renders a video for a single example
def render_example(root_dir, example, example_args, options):
options.work_dir = make_work_dir()
options.root_dir = root_dir
options.bin_dir = options.build_dir
options.example = example
options.example_name = os.path.basename(example)
if options.sample_label is None:
if options.version_label:
version_path = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"VERSION"
)
with open(version_path, "rt") as vfd:
options.sample_label = vfd.readline()
else:
options.sample_label = options.example_name
example_path = check_example(options.bin_dir, example)
main_label = "http://oglplus.org"
main_label_file = os.path.join(options.work_dir, 'main_label.png')
sample_label_file = os.path.join(options.work_dir, 'sample_label.png')
logo_file = os.path.join(options.work_dir, 'logo.png')
with create_ui(options) as ui:
# render the main text label
with ui.simple_action('Rendering main label') as progress:
run_convert(options.work_dir, [
'-size', '%dx24'%(len(main_label)*12 + 144), 'xc:none',
'-background', 'none',
'-pointsize', '24',
'-gravity', 'center',
'-stroke', 'black',
'-strokewidth', '7',
'-annotate', '0', main_label,
'-blur', '0x4',
'-shadow', '%dx5+1+1' % int(options.width * 0.5),
'+repage',
'-stroke', 'none',
'-strokewidth', '1',
'-fill', 'white',
'-annotate', '0', main_label,
main_label_file
])
# render the example name label
with ui.simple_action('Rendering example label') as progress:
run_convert(options.work_dir, [
'-size', '%dx70'%(len(options.sample_label)*12 + 144), 'xc:none',
'-background', 'none',
'-pointsize', '16',
'-gravity', 'center',
'-stroke', 'black',
'-strokewidth', '3',
'-annotate', '0', options.sample_label,
'-blur', '0x4',
'-shadow', '%dx4+1+1' % int(options.width * 0.6),
'+repage',
'-stroke', 'none',
'-strokewidth', '1',
'-fill', 'white',
'-annotate', '0', options.sample_label,
sample_label_file
])
# render the logo
with ui.simple_action('Rendering logo') as progress:
run_convert(options.work_dir, [
'-size', '144x144', 'xc:none',
'-background', 'white',
'-gravity', 'center',
'-stroke', 'white',
'-fill', 'white',
'-draw', 'circle 72,72, 72,144',
'-blur', '2x2',
'-shadow', '%dx6' % options.width,
'+repage',
os.path.join(options.root_dir,'doc','logo','oglplus_circular.png'),
'-composite',
'-adaptive-resize', '72x72',
'-border', '16x0',
logo_file
])
# run the example and dump the frames
render_video(
example_path,
example_args,
main_label_file,
sample_label_file,
logo_file,
options,
ui
)
remove_dir(options.work_dir)
# ------------------------------------------------------------------------------
def main():
options = fix_options(parse_args(sys.argv))
render_example(
os.path.abspath(
os.path.join(
os.path.dirname(sys.argv[0]),
os.path.pardir
)
),
os.path.splitext(options.example_args[0])[0],
options.example_args,
options
)
# ------------------------------------------------------------------------------
# run the main function
if __name__ == "__main__": main()
|
imagenet/lip_resnet.py | sebgao/LIP | 209 | 11082985 | <gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet50': './lip_resnet-50.pth',
'resnet101': './lip_resnet-101.pth',
}
BOTTLENECK_WIDTH = 128
COEFF = 12.0
def lip2d(x, logit, kernel=3, stride=2, padding=1):
weight = logit.exp()
return F.avg_pool2d(x*weight, kernel, stride, padding)/F.avg_pool2d(weight, kernel, stride, padding)
class SoftGate(nn.Module):
def __init__(self):
super(SoftGate, self).__init__()
def forward(self, x):
return torch.sigmoid(x).mul(COEFF)
class BottleneckShared(nn.Module):
def __init__(self, channels):
super(BottleneckShared, self).__init__()
rp = BOTTLENECK_WIDTH
self.logit = nn.Sequential(
OrderedDict((
('conv1', conv1x1(channels, rp)),
('bn1', nn.InstanceNorm2d(rp, affine=True)),
('relu1', nn.ReLU(inplace=True)),
('conv2', conv3x3(rp, rp)),
('bn2', nn.InstanceNorm2d(rp, affine=True)),
('relu2', nn.ReLU(inplace=True)),
))
)
def init_layer(self):
pass
def forward(self, x):
return self.logit(x)
class BottleneckLIP(nn.Module):
def __init__(self, channels):
super(BottleneckLIP, self).__init__()
rp = BOTTLENECK_WIDTH
self.postprocessing = nn.Sequential(
OrderedDict((
('conv', conv1x1(rp, channels)),
('bn', nn.InstanceNorm2d(channels, affine=True)),
('gate', SoftGate()),
))
)
def init_layer(self):
self.postprocessing[0].weight.data.fill_(0.0)
pass
def forward_with_shared(self, x, shared):
frac = lip2d(x, self.postprocessing(shared))
return frac
class SimplifiedLIP(nn.Module):
def __init__(self, channels):
super(SimplifiedLIP, self).__init__()
rp = channels
self.logit = nn.Sequential(
OrderedDict((
('conv', nn.Conv2d(channels, channels, 3, padding=1, bias=False)),
('bn', nn.InstanceNorm2d(channels, affine=True)),
('gate', SoftGate()),
))
)
def init_layer(self):
self.logit[0].weight.data.fill_(0.0)
def forward(self, x):
frac = lip2d(x, self.logit(x))
return frac
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
# class BasicBlock(nn.Module):
# expansion = 1
# def __init__(self, inplanes, planes, stride=1, downsample=None):
# super(BasicBlock, self).__init__()
# self.conv1 = conv3x3(inplanes, planes, stride)
# self.bn1 = nn.BatchNorm2d(planes)
# self.relu = nn.ReLU(inplace=True)
# self.conv2 = conv3x3(planes, planes)
# self.bn2 = nn.BatchNorm2d(planes)
# self.downsample = downsample
# self.stride = stride
# def init_layer(self):
# self.bn2.weight.data.zero_()
# def forward(self, x):
# residual = x
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
# if self.downsample is not None:
# residual = self.downsample(x)
# out += residual
# out = self.relu(out)
# return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
if stride == 2:
kplanes = planes
self.bottleneck_shared = BottleneckShared(inplanes)
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Sequential(
BottleneckLIP(planes),
conv1x1(planes, planes),
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
else:
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def init_layer(self):
self.bn3.weight.data.zero_()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.stride == 2:
shared = self.bottleneck_shared(x)
out = self.conv2[0].forward_with_shared(out, shared)
for layer in self.conv2[1:]:
out = layer(out)
else:
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
if self.stride == 1:
residual = self.downsample(x)
else:
residual = self.downsample[0].forward_with_shared(x, shared)
for layer in self.downsample[1:]:
residual = layer(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = SimplifiedLIP(64)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.zero_()
for m in self.modules():
if hasattr(m, 'init_layer'):
m.init_layer()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if stride != 1:
downsample = OrderedDict([
('lip', BottleneckLIP(self.inplanes)),
('conv', conv1x1(self.inplanes, planes * block.expansion)),
('bn', nn.BatchNorm2d(planes * block.expansion)),
])
else:
downsample = OrderedDict([
('conv', conv1x1(self.inplanes, planes * block.expansion, stride)),
('bn', nn.BatchNorm2d(planes * block.expansion)),
])
downsample = nn.Sequential(downsample)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
#model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
model.load_state_dict(torch.load(model_urls['resnet50'], map_location='cpu'))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
#model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
model.load_state_dict(torch.load(model_urls['resnet101'], map_location='cpu'))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
model = resnet50()
print(model)
model(torch.randn((1, 3, 224, 224)))
|
pyocd/target/builtin/target_M251.py | majorlin/pyOCD | 276 | 11082994 | # pyOCD debugger
# Copyright (c) 2019 Nuvoton
# Copyright (c) 2021 <NAME>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO_AP_256 = {
'load_address' : 0x20000000,
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0xb088b5b0, 0x460c4613, 0x90064605, 0x92049105, 0x90032064, 0x1000f240, 0x0000f2c4, 0x60012159,
0x60012116, 0x60012188, 0x21016800, 0x93024208, 0x95009401, 0xe7ffd103, 0x90072001, 0xf240e038,
0xf2c42000, 0x68010000, 0x43112204, 0xf2406001, 0xf2c42004, 0x68010000, 0x60014311, 0x9803e7ff,
0x91031e41, 0xd0012800, 0xe7f8e7ff, 0x0000f24c, 0x0000f2c4, 0x222d6801, 0x60014311, 0x011cf24c,
0x0100f2c4, 0x2301680a, 0x600a431a, 0x42186800, 0xe7ffd103, 0x90072001, 0xf24ce00a, 0xf2c40000,
0x68010000, 0x43112240, 0x20006001, 0xe7ff9007, 0xb0089807, 0xb082bdb0, 0x90014601, 0xe7ff9100,
0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c, 0x0000f2c4, 0x222d6801,
0x60014391, 0x001cf24c, 0x0000f2c4, 0x22016801, 0x60014391, 0xb0022000, 0xb0854770, 0x4603460a,
0xa8029003, 0x92017001, 0xe7ff9300, 0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff,
0x0000f24c, 0x0000f2c4, 0x22406801, 0x60014311, 0x000cf24c, 0x0000f2c4, 0x60012122, 0xf24c9803,
0xf2c40104, 0x60080100, 0x7800a802, 0xd1082800, 0x2000e7ff, 0xf24c43c0, 0xf2c40108, 0x60080100,
0xf24ce009, 0xf2c40008, 0xf64a0000, 0xf2c02103, 0x60010155, 0xf24ce7ff, 0xf2c40010, 0x21010000,
0xf3bf6001, 0xe7ff8f6f, 0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c,
0x0000f2c4, 0x21406800, 0xd00b4208, 0xf24ce7ff, 0xf2c40000, 0x68010000, 0x43112240, 0x20016001,
0xe0029004, 0x90042000, 0x9804e7ff, 0x4770b005, 0xb084b580, 0x90024601, 0x220f9802, 0x40100512,
0x05522201, 0x91014290, 0xe7ffd10b, 0xf2409802, 0xf6cf0100, 0x184071e0, 0xf7ff2101, 0x9003ff7e,
0x9802e005, 0xf7ff2100, 0x9003ff78, 0x9803e7ff, 0xbd80b004, 0xb088b580, 0x4603460a, 0x91059006,
0x90042000, 0x93019202, 0x9804e7ff, 0x42889905, 0xe7ffd210, 0x99049806, 0x92041c4a, 0x58400089,
0xffc6f7ff, 0x28009003, 0xe7ffd003, 0x90079803, 0xe7eae003, 0x90072000, 0x9807e7ff, 0xbd80b008,
0xb08ab5b0, 0x460c4613, 0x90084605, 0x92069107, 0x90042000, 0x93029003, 0x95009401, 0xf24ce7ff,
0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68010000, 0x43112240,
0x98076001, 0x21031cc0, 0x90074388, 0x9807e7ff, 0xd04c2800, 0xa808e7ff, 0x06407800, 0xd10f2800,
0x9807e7ff, 0xd30b2880, 0x2080e7ff, 0x98089005, 0x9a069905, 0x18d29b04, 0xf83ff000, 0xe0229003,
0x7800a808, 0x28000640, 0xe7ffd111, 0x28109807, 0xe7ffd30d, 0x210f9807, 0x90054388, 0x99059808,
0x9b049a06, 0xf00018d2, 0x9003f828, 0x9807e00a, 0x98089005, 0x9a069905, 0x18d29b04, 0xf8e0f000,
0xe7ff9003, 0x9805e7ff, 0x18089908, 0x98059008, 0x18089904, 0x98059004, 0x1a089907, 0x98039007,
0xd0032800, 0x2001e7ff, 0xe0039009, 0x2000e7af, 0xe7ff9009, 0xb00a9809, 0xb5b0bdb0, 0x4613b088,
0x4605460c, 0x91069007, 0x20009205, 0x98059003, 0x98069004, 0x210f300f, 0x90064388, 0x94019302,
0xe7ff9500, 0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c, 0x0000f2c4,
0x22406801, 0x60014311, 0x210f9807, 0xf24c4388, 0xf2c40104, 0x60080100, 0x000cf24c, 0x0000f2c4,
0x60012127, 0x99039804, 0x92031c4a, 0x58400089, 0x0180f24c, 0x0100f2c4, 0x98046008, 0x1c4a9903,
0x00899203, 0xf24c5840, 0xf2c40184, 0x60080100, 0x99039804, 0x92031c4a, 0x58400089, 0x0188f24c,
0x0100f2c4, 0x98046008, 0x1c4a9903, 0x00899203, 0xf24c5840, 0xf2c4018c, 0x60080100, 0x0010f24c,
0x0000f2c4, 0x60012101, 0x38109806, 0xe7ff9006, 0x28009806, 0xe7ffd046, 0xf24ce7ff, 0xf2c400c0,
0x68000000, 0x42082130, 0xe7ffd001, 0x9804e7f5, 0x1c4a9903, 0x00899203, 0xf24c5840, 0xf2c40180,
0x60080100, 0x99039804, 0x92031c4a, 0x58400089, 0x0184f24c, 0x0100f2c4, 0xe7ff6008, 0x00c0f24c,
0x0000f2c4, 0x21c06800, 0xd0014208, 0xe7f5e7ff, 0x99039804, 0x92031c4a, 0x58400089, 0x0188f24c,
0x0100f2c4, 0x98046008, 0x1c4a9903, 0x00899203, 0xf24c5840, 0xf2c4018c, 0x60080100, 0x38109806,
0xe7b59006, 0xf24ce7ff, 0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0x2000e7f5, 0xbdb0b008,
0xb087b5b0, 0x460c4613, 0x90054605, 0x92039104, 0x1cc09804, 0x43882103, 0x93029004, 0x95009401,
0xf24ce7ff, 0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68010000,
0x43112240, 0xf24c6001, 0xf2c4000c, 0x21210000, 0xe7ff6001, 0x28009804, 0xe7ffd040, 0x21039805,
0xf24c4388, 0xf2c40104, 0x60080100, 0x68009803, 0x0108f24c, 0x0100f2c4, 0xf24c6008, 0xf2c40010,
0x21010000, 0xf3bf6001, 0xe7ff8f6f, 0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff,
0x0000f24c, 0x0000f2c4, 0x21406800, 0xd00b4208, 0xf24ce7ff, 0xf2c40000, 0x68010000, 0x43112240,
0x20016001, 0xe00c9006, 0x1d009805, 0x98039005, 0x90031d00, 0x1f009804, 0xe7bb9004, 0x90062000,
0x9806e7ff, 0xbdb0b007, 0xb087b5b0, 0x460c4613, 0x90054605, 0x92039104, 0x1cc09804, 0x43882103,
0x93029004, 0x95009401, 0xf24ce7ff, 0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5,
0xf2c40000, 0x68010000, 0x43112240, 0xf24c6001, 0xf2c4000c, 0x21000000, 0xe7ff6001, 0x28009804,
0xe7ffd04c, 0x21039805, 0xf24c4388, 0xf2c40104, 0x60080100, 0x0008f24c, 0x0000f2c4, 0x60012100,
0x0010f24c, 0x0000f2c4, 0x60012101, 0x8f6ff3bf, 0xf24ce7ff, 0xf2c40010, 0x68000000, 0x42082101,
0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68000000, 0x42082140, 0xe7ffd00b, 0x0000f24c, 0x0000f2c4,
0x22406801, 0x60014311, 0x90062001, 0xf24ce019, 0xf2c40008, 0x68000000, 0x68099903, 0xd0034288,
0x2001e7ff, 0xe00c9006, 0x1d009805, 0x98039005, 0x90031d00, 0x1f009804, 0xe7af9004, 0x90062000,
0x9806e7ff, 0xbdb0b007, 0x00000000
],
# Relative function addresses
'pc_init': 0x20000021,
'pc_unInit': 0x200000d7,
'pc_program_page': 0x20000281,
'pc_erase_sector': 0x200001f1,
'pc_eraseAll': 0x0,
'static_base' : 0x20000000 + 0x00000020 + 0x000006c8,
'begin_stack' : 0x20000900,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x200,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering
'min_program_length' : 0x200,
# Flash information
'flash_start': 0x0,
'flash_size': 0x40000,
'sector_sizes': (
(0x0, 0x200),
)
}
FLASH_ALGO_LD_4 = {
'load_address' : 0x20000000,
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0xb089b5b0, 0x460c4613, 0x90074605, 0x92059106, 0x90032064, 0x1000f240, 0x0000f2c4, 0x60012159,
0x60012116, 0x60012188, 0x21016800, 0x93024208, 0x95009401, 0xe7ffd103, 0x90082001, 0xf240e044,
0xf2c42000, 0x68010000, 0x43112204, 0xf2406001, 0xf2c42004, 0x68010000, 0x60014311, 0x9803e7ff,
0x91031e41, 0xd0012800, 0xe7f8e7ff, 0x0000f24c, 0x0000f2c4, 0x222d6801, 0x60014311, 0x011cf24c,
0x0100f2c4, 0x2301680a, 0x600a431a, 0x42186800, 0xe7ffd103, 0x90082001, 0xf24ce016, 0xf2c40000,
0x68000000, 0x42082120, 0xe7ffd103, 0x90082001, 0xf24ce00a, 0xf2c40000, 0x68010000, 0x43112240,
0x20006001, 0xe7ff9008, 0xb0099808, 0xb082bdb0, 0x90014601, 0xe7ff9100, 0x0010f24c, 0x0000f2c4,
0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c, 0x0000f2c4, 0x222d6801, 0x60014391, 0x001cf24c,
0x0000f2c4, 0x22016801, 0x60014391, 0xb0022000, 0xb5b04770, 0x4613b086, 0x4605460c, 0x91049005,
0x7002a803, 0x93022001, 0x95009401, 0xbdb0b006, 0x460ab085, 0x90034603, 0x7001a802, 0x93009201,
0xf24ce7ff, 0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68010000,
0x43112240, 0xf24c6001, 0xf2c4000c, 0x21220000, 0x98036001, 0x0104f24c, 0x0100f2c4, 0xa8026008,
0x28007800, 0xe7ffd108, 0x43c02000, 0x0108f24c, 0x0100f2c4, 0xe0096008, 0x0008f24c, 0x0000f2c4,
0x2103f64a, 0x0155f2c0, 0xe7ff6001, 0x0010f24c, 0x0000f2c4, 0x60012101, 0x8f6ff3bf, 0xf24ce7ff,
0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68000000, 0x42082140,
0xe7ffd00b, 0x0000f24c, 0x0000f2c4, 0x22406801, 0x60014311, 0x90042001, 0x2000e002, 0xe7ff9004,
0xb0059804, 0xb5804770, 0x4601b084, 0x98029002, 0x0512220f, 0x22014010, 0x42900552, 0xd10b9101,
0x9802e7ff, 0x0100f240, 0x71e0f6cf, 0x21011840, 0xff7ef7ff, 0xe0059003, 0x21009802, 0xff78f7ff,
0xe7ff9003, 0xb0049803, 0xb580bd80, 0x460ab088, 0x90064603, 0x20009105, 0x92029004, 0xe7ff9301,
0x99059804, 0xd2104288, 0x9806e7ff, 0x1c4a9904, 0x00899204, 0xf7ff5840, 0x9003ffc6, 0xd0032800,
0x9803e7ff, 0xe0039007, 0x2000e7ea, 0xe7ff9007, 0xb0089807, 0xb5b0bd80, 0x4613b08a, 0x4605460c,
0x91079008, 0x20009206, 0x90039004, 0x94019302, 0xe7ff9500, 0x0010f24c, 0x0000f2c4, 0x21016800,
0xd0014208, 0xe7f5e7ff, 0x0000f24c, 0x0000f2c4, 0x22406801, 0x60014311, 0x1cc09807, 0x43882103,
0xe7ff9007, 0x28009807, 0xe7ffd04c, 0x7800a808, 0x28000640, 0xe7ffd10f, 0x28809807, 0xe7ffd30b,
0x90052080, 0x99059808, 0x9b049a06, 0xf00018d2, 0x9003f83f, 0xa808e022, 0x06407800, 0xd1112800,
0x9807e7ff, 0xd30d2810, 0x9807e7ff, 0x4388210f, 0x98089005, 0x9a069905, 0x18d29b04, 0xf828f000,
0xe00a9003, 0x90059807, 0x99059808, 0x9b049a06, 0xf00018d2, 0x9003f8e0, 0xe7ffe7ff, 0x99089805,
0x90081808, 0x99049805, 0x90041808, 0x99079805, 0x90071a08, 0x28009803, 0xe7ffd003, 0x90092001,
0xe7afe003, 0x90092000, 0x9809e7ff, 0xbdb0b00a, 0xb088b5b0, 0x460c4613, 0x90074605, 0x92059106,
0x90032000, 0x90049805, 0x300f9806, 0x4388210f, 0x93029006, 0x95009401, 0xf24ce7ff, 0xf2c40010,
0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68010000, 0x43112240, 0x98076001,
0x4388210f, 0x0104f24c, 0x0100f2c4, 0xf24c6008, 0xf2c4000c, 0x21270000, 0x98046001, 0x1c4a9903,
0x00899203, 0xf24c5840, 0xf2c40180, 0x60080100, 0x99039804, 0x92031c4a, 0x58400089, 0x0184f24c,
0x0100f2c4, 0x98046008, 0x1c4a9903, 0x00899203, 0xf24c5840, 0xf2c40188, 0x60080100, 0x99039804,
0x92031c4a, 0x58400089, 0x018cf24c, 0x0100f2c4, 0xf24c6008, 0xf2c40010, 0x21010000, 0x98066001,
0x90063810, 0x9806e7ff, 0xd0462800, 0xe7ffe7ff, 0x00c0f24c, 0x0000f2c4, 0x21306800, 0xd0014208,
0xe7f5e7ff, 0x99039804, 0x92031c4a, 0x58400089, 0x0180f24c, 0x0100f2c4, 0x98046008, 0x1c4a9903,
0x00899203, 0xf24c5840, 0xf2c40184, 0x60080100, 0xf24ce7ff, 0xf2c400c0, 0x68000000, 0x420821c0,
0xe7ffd001, 0x9804e7f5, 0x1c4a9903, 0x00899203, 0xf24c5840, 0xf2c40188, 0x60080100, 0x99039804,
0x92031c4a, 0x58400089, 0x018cf24c, 0x0100f2c4, 0x98066008, 0x90063810, 0xe7ffe7b5, 0x0010f24c,
0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff, 0xb0082000, 0xb5b0bdb0, 0x4613b087, 0x4605460c,
0x91049005, 0x98049203, 0x21031cc0, 0x90044388, 0x94019302, 0xe7ff9500, 0x0010f24c, 0x0000f2c4,
0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c, 0x0000f2c4, 0x22406801, 0x60014311, 0x000cf24c,
0x0000f2c4, 0x60012121, 0x9804e7ff, 0xd0402800, 0x9805e7ff, 0x43882103, 0x0104f24c, 0x0100f2c4,
0x98036008, 0xf24c6800, 0xf2c40108, 0x60080100, 0x0010f24c, 0x0000f2c4, 0x60012101, 0x8f6ff3bf,
0xf24ce7ff, 0xf2c40010, 0x68000000, 0x42082101, 0xe7ffd001, 0xf24ce7f5, 0xf2c40000, 0x68000000,
0x42082140, 0xe7ffd00b, 0x0000f24c, 0x0000f2c4, 0x22406801, 0x60014311, 0x90062001, 0x9805e00c,
0x90051d00, 0x1d009803, 0x98049003, 0x90041f00, 0x2000e7bb, 0xe7ff9006, 0xb0079806, 0xb5b0bdb0,
0x4613b087, 0x4605460c, 0x91049005, 0x98049203, 0x21031cc0, 0x90044388, 0x94019302, 0xe7ff9500,
0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c, 0x0000f2c4, 0x22406801,
0x60014311, 0x000cf24c, 0x0000f2c4, 0x60012100, 0x9804e7ff, 0xd04c2800, 0x9805e7ff, 0x43882103,
0x0104f24c, 0x0100f2c4, 0xf24c6008, 0xf2c40008, 0x21000000, 0xf24c6001, 0xf2c40010, 0x21010000,
0xf3bf6001, 0xe7ff8f6f, 0x0010f24c, 0x0000f2c4, 0x21016800, 0xd0014208, 0xe7f5e7ff, 0x0000f24c,
0x0000f2c4, 0x21406800, 0xd00b4208, 0xf24ce7ff, 0xf2c40000, 0x68010000, 0x43112240, 0x20016001,
0xe0199006, 0x0008f24c, 0x0000f2c4, 0x99036800, 0x42886809, 0xe7ffd003, 0x90062001, 0x9805e00c,
0x90051d00, 0x1d009803, 0x98049003, 0x90041f00, 0x2000e7af, 0xe7ff9006, 0xb0079806, 0x0000bdb0,
0x00000000
],
# Relative function addresses
'pc_init': 0x20000021,
'pc_unInit': 0x200000ef,
'pc_program_page': 0x200002b7,
'pc_erase_sector': 0x20000227,
'pc_eraseAll': 0x0,
'static_base' : 0x20000000 + 0x00000020 + 0x00000700,
'begin_stack' : 0x20000a00,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x200,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering
'min_program_length' : 0x200,
# Flash information
'flash_start': 0x100000,
'flash_size': 0x1000,
'sector_sizes': (
(0x0, 0x200),
)
}
class M252KG6AE(CoreSightTarget):
VENDOR = "Nuvoton"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x40000, sector_size=0x0200,
page_size=0x0200,
is_boot_memory=True,
algo=FLASH_ALGO_AP_256),
FlashRegion( start=0x00100000, length=0x1000, sector_size=0x0200,
page_size=0x0200,
algo=FLASH_ALGO_LD_4),
RamRegion( start=0x20000000, length=0x8000)
)
def __init__(self, session):
super(M252KG6AE, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("M251_v1.svd")
|
examples/cp/basic/golomb_ruler_all_solutions.py | yukarinoki/docplex-examples | 302 | 11083005 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
In mathematics, a Golomb ruler is a set of marks at integer positions along
an imaginary ruler such that no two pairs of marks are the same distance apart.
The number of marks on the ruler is its order, and the largest distance
between two of its marks is its length.
This implementation differs from the 'basic' implementation, given in the
examp;e module golomb_ruler.py, because it calls the solver twice:
* First time to know the minimal size of the ruler for the required order,
* A second time to list all possible rulers for this optimal size
See https://en.wikipedia.org/wiki/Golomb_ruler for more information.
For order 5: 2 solutions: 0 1 4 9 11
0 2 7 8 11
For order 7: 6 solutions: 0 1 4 10 18 23 25
0 1 7 11 20 23 25
0 1 11 16 19 23 25
0 2 3 10 16 21 25
0 2 7 13 21 22 25
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel
from docplex.cp.utils import CpoNotSupportedException
from sys import stdout
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Number of marks on the ruler
ORDER = 7
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Estimate an upper bound to the ruler length
MAX_LENGTH = (ORDER - 1) ** 2
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Create array of variables corresponding to position rule marks
marks = mdl.integer_var_list(ORDER, 0, MAX_LENGTH, "M")
# Create marks distances that should be all different
dist = [marks[i] - marks[j] for i in range(1, ORDER) for j in range(0, i)]
mdl.add(mdl.all_diff(dist))
# Avoid symmetric solutions by ordering marks
mdl.add(marks[0] == 0)
for i in range(1, ORDER):
mdl.add(marks[i] > marks[i - 1])
# Avoid mirror solution
mdl.add((marks[1] - marks[0]) < (marks[ORDER - 1] - marks[ORDER - 2]))
# Minimize ruler size (position of the last mark)
minexpr = mdl.minimize(marks[ORDER - 1])
mdl.add(minexpr)
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
# First solve the model to find the smallest ruler length
msol = mdl.solve(TimeLimit=100)
if not msol:
print("No Golomb ruler available for order " + str(ORDER))
else:
rsize = msol[marks[ORDER - 1]]
print("Shortest ruler for order " + str(ORDER) + " has length " + str(rsize))
# Remove minimization from the model
mdl.remove(minexpr)
# Force position of last mark
mdl.add(marks[ORDER - 1] == rsize)
# Request all solutions
print("List of all possible rulers for length {}:".format(rsize))
siter = mdl.start_search(SearchType='DepthFirst', Workers=1, TimeLimit=100) # Parameters needed to avoid duplicate solutions
try:
for i, msol in enumerate(siter):
stdout.write(str(i + 1) + ": ")
for v in marks:
stdout.write(" " + str(msol[v]))
stdout.write("\n")
except CpoNotSupportedException:
print("This instance of the solver does not support solution iteration.")
|
Tests/image_tests/helpers.py | gonsolo/Falcor | 1,615 | 11083025 | def render_frames(m, name, frames=[1], framerate=60, resolution=[1280,720]):
m.resizeSwapChain(*resolution)
m.ui = False
m.clock.framerate = framerate
m.clock.time = 0
m.clock.pause()
m.frameCapture.baseFilename = name
frame = 0
for capture_frame in frames:
while frame < capture_frame:
frame += 1
m.clock.frame = frame
m.renderFrame()
m.frameCapture.capture()
|
src/encoded/tests/test_upgrade_platform.py | procha2/encoded | 102 | 11083040 | <gh_stars>100-1000
import pytest
def test_platform_upgrade(upgrader, platform_1):
value = upgrader.upgrade('platform', platform_1, target_version='2')
assert value['schema_version'] == '2'
assert 'encode2_dbxrefs' not in value
assert 'geo_dbxrefs' not in value
assert value['dbxrefs'] == ['UCSC-ENCODE-cv:AB_SOLiD_3.5', 'GEO:GPL9442']
def test_platform_upgrade_status(upgrader, platform_2):
value = upgrader.upgrade('platform', platform_2, target_version='3')
assert value['schema_version'] == '3'
assert value['status'] == 'current'
def test_platform_upgrade_6_7(upgrader, platform_6):
value = upgrader.upgrade('platform', platform_6, current_version='6', target_version='7')
assert value['schema_version'] == '7'
assert value['status'] == 'released'
platform_6['status'] = 'disabled'
platform_6['schema_version'] = '6'
value = upgrader.upgrade('platform', platform_6, current_version='6', target_version='7')
assert value['schema_version'] == '7'
assert value['status'] == 'deleted'
|
tests/import/pkg_m/pkg2/__init__.py | Euromance/pycopy | 663 | 11083058 | print("pkg_m.pkg2:", __name__)
|
jira_agile_metrics/utils.py | arulvelkumar/jira-agile-metrics | 206 | 11083070 | <filename>jira_agile_metrics/utils.py
import datetime
import os.path
import numpy as np
import pandas as pd
import seaborn as sns
class StatusTypes:
backlog = "backlog"
accepted = "accepted"
complete = "complete"
def extend_dict(d, e):
r = d.copy()
r.update(e)
return r
def to_json_string(value):
if isinstance(value, pd.Timestamp):
return value.strftime("%Y-%m-%d")
if value in (None, np.NaN, pd.NaT):
return ""
try:
return str(value)
except TypeError:
return value
def get_extension(filename):
return os.path.splitext(filename)[1].lower()
def to_days_since_epoch(d):
return (d - datetime.date(1970, 1, 1)).days
def set_chart_context(context):
sns.set_context(context)
def set_chart_style(style="whitegrid", despine=True):
sns.set_style(style)
if despine:
sns.despine()
def breakdown_by_month(
df,
start_column,
end_column,
key_column,
value_column,
output_columns=None,
aggfunc="count",
):
"""If `df` is a DataFrame of items that are valid/active between the
timestamps stored in `start_column` and `end_column`, and where each item
is uniquely identified by `key_column` and has a categorical value in
`value_column`, return a new DataFrame counting the number of items in
each month broken down by each unique value in `value_column`. To restrict
(and order) the value columns, pass a list of valid values as
`output_columns`.
"""
def build_df(t):
start_date = getattr(t, start_column)
end_date = getattr(t, end_column)
key = getattr(t, key_column)
value = getattr(t, value_column)
if end_date is pd.NaT:
end_date = pd.Timestamp.today()
first_month = (
start_date.normalize().to_period("M").to_timestamp("D", "S")
)
last_month = end_date.normalize().to_period("M").to_timestamp("D", "S")
index = pd.date_range(first_month, last_month, freq="MS")
return pd.DataFrame(index=index, data=[[key]], columns=[value])
breakdown = (
pd.concat([build_df(t) for t in df.itertuples()], sort=True)
.resample("MS")
.agg(aggfunc)
)
if output_columns:
breakdown = breakdown[
[s for s in output_columns if s in breakdown.columns]
]
return breakdown
def breakdown_by_month_sum_days(
df,
start_column,
end_column,
value_column,
output_columns=None,
aggfunc="sum",
):
"""If `df` is a DataFrame of items that are valid/active between the
timestamps stored in `start_column` and `end_column`, and where each has a
categorical value in `value_column`, return a new DataFrame summing the
overlapping days of items in each month broken down by each unique value in
`value_column`. To restrict (and order) the value columns, pass a list of
valid values as `output_columns`.
"""
def build_df(t):
start_date = getattr(t, start_column)
end_date = getattr(t, end_column)
value = getattr(t, value_column)
if end_date is pd.NaT:
end_date = pd.Timestamp.today()
days_range = pd.date_range(start_date, end_date, freq="D")
first_month = (
start_date.normalize().to_period("M").to_timestamp("D", "S")
)
last_month = end_date.normalize().to_period("M").to_timestamp("D", "S")
index = pd.date_range(first_month, last_month, freq="MS")
return pd.DataFrame(
index=index,
data=[
[
len(
pd.date_range(
month_start,
month_start + pd.tseries.offsets.MonthEnd(1),
freq="D",
).intersection(days_range)
)
]
for month_start in index
],
columns=[value],
)
breakdown = (
pd.concat([build_df(t) for t in df.itertuples()], sort=True)
.resample("MS")
.agg(aggfunc)
)
if output_columns:
breakdown = breakdown[
[s for s in output_columns if s in breakdown.columns]
]
return breakdown
def to_bin(value, edges):
"""Pass a list of numbers in `edges` and return which of them `value` falls
between. If < the first item, return (0, <first>). If > last item, return
(<last>, None).
"""
previous = 0
for v in edges:
if previous <= value <= v:
return (previous, v)
previous = v
return (previous, None)
|
fpn_dcr/symbols/__init__.py | weiyc/Decoupled-Classification-Refinement | 148 | 11083080 | <reponame>weiyc/Decoupled-Classification-Refinement
import resnet_v1_101_fpn_rcnn_dcr_res2
import resnet_v1_101_fpn_dcn_rcnn_dcr_res2
|
libs/sqlobject/tests/test_SQLMultipleJoin.py | scambra/HTPC-Manager | 422 | 11083088 | from sqlobject import *
from sqlobject.tests.dbtest import *
class Race(SQLObject):
name = StringCol()
fightersAsList = MultipleJoin('RFighter', joinColumn="rf_id")
fightersAsSResult = SQLMultipleJoin('RFighter', joinColumn="rf_id")
class RFighter(SQLObject):
name = StringCol()
race = ForeignKey('Race', dbName="rf_id")
power = IntCol()
def createAllTables():
setupClass([Race, RFighter])
def test_1():
createAllTables()
# create some races
human=Race(name='human')
saiyajin=Race(name='saiyajin')
hibrid=Race(name='hibrid (human with sayajin)')
namek=Race(name='namekuseijin')
# create some fighters
gokou=RFighter(name='Gokou (Kakaruto)', race=saiyajin, power=10)
vegeta=RFighter(name='Vegeta', race=saiyajin, power=9)
krilim=RFighter(name='Krilim', race=human, power=3)
yancha=RFighter(name='Yancha', race=human, power=2)
jackiechan=RFighter(name='<NAME>', race=human, power=2)
gohan=RFighter(name='Gohan', race=hibrid, power=8)
goten=RFighter(name='Goten', race=hibrid, power=7)
trunks=RFighter(name='Trunks', race=hibrid, power=8)
picollo=RFighter(name='Picollo', race=namek, power=6)
neil=RFighter(name='Neil', race=namek, power=5)
# testing the SQLMultipleJoin stuff
for i, j in zip(human.fightersAsList, human.fightersAsSResult):
assert i is j # the 2 ways should give the same result
assert namek.fightersAsSResult.count() == len(namek.fightersAsList)
assert saiyajin.fightersAsSResult.max('power') == 10
assert trunks in hibrid.fightersAsSResult
assert picollo not in hibrid.fightersAsSResult
assert str(hibrid.fightersAsSResult.sum('power')) == '23'
def test_multiple_join_transaction():
if not supports('transactions'):
return
createAllTables()
trans = Race._connection.transaction()
try:
namek=Race(name='namekuseijin', connection=trans)
gokou=RFighter(name='Gokou (Kakaruto)', race=namek, power=10, connection=trans)
assert namek.fightersAsSResult.count() == 1
assert namek.fightersAsSResult[0]._connection == trans
finally:
trans.commit(True)
Race._connection.autoCommit = True
|
src/ppl/nn/engines/cuda/impls/src/nn/conv/int8_idxn/gen_idxn_int8_kernel.py | tangyanf/ppl.nn | 764 | 11083096 | <reponame>tangyanf/ppl.nn
#!/usr/bin/env python3
"""generate idxn conv kernels dynamically
"""
import os
import sys
import hashlib
class KernelInfo:
def __init__(self, path, s_size, k_num, cta_y_num, cta_x_num, warp_y, warp_x):
self.path = path
self.s_size = s_size
self.k_num = k_num
self.k_size = self.k_num * self.s_size
self.cta_y_num = cta_y_num
self.cta_x_num = cta_x_num
self.warp_y = warp_y
self.warp_x = warp_x
self.cta_y = self.cta_y_num * self.warp_y
self.cta_x = self.cta_x_num * self.warp_x
self.kconfig = "_b" + str(self.cta_y) + "x" + str(self.cta_x) + \
"_w" + str(self.warp_y) + "x" + str(self.warp_x) + \
"_k" + str(self.k_size) + "_s" + str(self.s_size)
self.kname = "nvIdxnConv_imma8816_nhwc" + self.kconfig + "_nosmem"
self.fname = "kernels" + "/int8_idxn" + self.kconfig + ".cu"
self.WARP_SIZE = 32
self.MMA_Y = 8
self.MMA_X = 8
self.MMA_Y_HALF = 8#self.MMA_Y / 2
self.cta_num = cta_y_num * cta_x_num
self.cta_size = self.cta_num * self.WARP_SIZE
self.dAvn_size = self.warp_y / self.MMA_Y_HALF
self.dBvn_size = self.warp_x / self.MMA_X
def GenKernel(self):
f = open(os.path.join(self.path, self.fname), "w")
f.write("#define TILE_N_PER_CTA %d\n" % self.cta_x)
f.write("#define TILE_M_PER_CTA %d\n\n" % self.cta_y)
f.write("#define TILE_N_PER_WARP %d\n" % self.warp_x)
f.write("#define TILE_M_PER_WARP %d\n\n" % self.warp_y)
f.write("#define TILE_K_PER_CTA %d\n" % self.k_size)
f.write("#define TILE_K_PER_STEP %d\n\n" % self.s_size)
f.write("#define KERNEL_NAME %s\n\n" % self.kname)
f.write("#include <cuda_fp16.h>\n\n")
f.write("#include \"int8_idxn/common/const_macros.h\"\n\n")
if self.s_size == 16:
f.write("#include \"int8_idxn/common/dmem_i1_macros.h\"\n\n")
f.write("#include \"int8_idxn/common/imma_i1_macros.h\"\n\n")
f.write("#define LOAD_dAv1(_regA, _dAv1, _in_id, _in_off) LOAD_dAv1_SIZE%d(_regA, _dAv1, _in_id, _in_off)\n" % self.dAvn_size)
f.write("#define LOAD_dBv1(_regB, _dBv1, _dBv1_off) LOAD_dBv1_SIZE%d(_regB, _dBv1, _dBv1_off)\n\n" % self.dBvn_size)
f.write("#define MMA_INSTS(_C, _A, _B) MMA_INST_1INT_%dx%d(_C, _A, _B)\n\n" % (self.dAvn_size, self.dBvn_size))
elif self.s_size == 32:
f.write("#include \"int8_idxn/common/dmem_i2_macros.h\"\n\n")
f.write("#include \"int8_idxn/common/imma_i2_macros.h\"\n\n")
f.write("#define LOAD_dAv2(_regA, _dAv2, _in_id, _in_off) LOAD_dAv2_SIZE%d(_regA, _dAv2, _in_id, _in_off)\n" % self.dAvn_size)
f.write("#define LOAD_dBv2(_regB, _dBv2, _dBv2_off) LOAD_dBv2_SIZE%d(_regB, _dBv2, _dBv2_off)\n\n" % self.dBvn_size)
f.write("#define MMA_INSTS(_C, _A, _B) MMA_INST_2INT_%dx%d(_C, _A, _B)\n\n" % (self.dAvn_size, self.dBvn_size))
elif self.s_size == 64:
f.write("#include \"int8_idxn/common/dmem_i4_macros.h\"\n\n")
f.write("#include \"int8_idxn/common/imma_i4_macros.h\"\n\n")
f.write("#define LOAD_dAv4(_regA, _dAv4, _in_id, _in_off) LOAD_dAv4_SIZE%d(_regA, _dAv4, _in_id, _in_off)\n" % self.dAvn_size)
f.write("#define LOAD_dBv4(_regB, _dBv4, _dBv4_off) LOAD_dBv4_SIZE%d(_regB, _dBv4, _dBv4_off)\n\n" % self.dBvn_size)
f.write("#define MMA_INSTS(_C, _A, _B) MMA_INST_4INT_%dx%d(_C, _A, _B)\n\n" % (self.dAvn_size, self.dBvn_size))
f.write("#include \"int8_idxn/common/quant_macros.h\"\n\n")
f.write("#include \"int8_idxn/common/output_macros.h\"\n\n")
f.write("#include \"int8_idxn/common/main_body.h\"\n\n")
f.write("#include \"int8_idxn/common/uni_undefs.h\"\n\n")
class IdxSourceFile:
def __init__(self, path):
self.path = path
self.fname = "int8_idxn_kernels.cu"
self.f = open(os.path.join(self.path, self.fname), "w")
self.f.write("#include \"int8_idxn/idxn_kernels.h\"\n\n")
self.f.write("#define ENABLE_FUSE\n\n")
def AppendKernel(self, fname):
self.f.write("#include \"int8_idxn/%s\"\n" % fname)
def Close(self):
self.f.close()
class IdxHeaderFile:
def __init__(self, path):
self.path = path
self.fname = "idxn_kernels.h"
self.f = open(os.path.join(self.path, self.fname), "w")
self.f.write("#ifndef __PPLCUDA_IDXN_KERNELS_H__\n")
self.f.write("#define __PPLCUDA_IDXN_KERNELS_H__\n")
self.f.write("\n\n#include \"kernel_type.h\"\n\n")
def AppendKernel(self, kname):
self.f.write("__global__ int8_idx_kernel_t %s;\n" % kname)
def Close(self):
self.f.write("\n\n#endif\n")
self.f.close()
class InitFile:
def __init__(self, path):
self.path = path
self.fname = "init_idxn_kernels.cu"
self.f = open(os.path.join(self.path, self.fname), "w")
self.f.write("#include \"conv_common.h\"\n\n")
self.f.write("#include \"int8_idxn/idxn_kernels.h\"\n\n")
self.f.write("void InitializeInt8IdxnConvKernelContainer(std::vector<kernel_info_t> & kernel_container)\n{\n")
def AppendKernel(self, s_size, kname):
if s_size == 16:
self.f.write("\tADD_KERNEL(CONV_IDXN_C2, \"%s\", NULL, NULL, &%s);\n" % (kname, kname))
elif s_size == 32:
self.f.write("\tADD_KERNEL(CONV_IDXN_C4, \"%s\", NULL, NULL, &%s);\n" % (kname, kname))
elif s_size == 64:
self.f.write("\tADD_KERNEL(CONV_IDXN_C32, \"%s\", NULL, NULL, &%s);\n" % (kname, kname))
else:
exit(1)
def Close(self):
self.f.write("\n}\n")
self.f.close()
class HashFile:
def __init__(self, path, hash_path):
self.path = path
self.fname = ".hash_file.txt"
self.current_hash = dict()
for root, dirs, files in os.walk(hash_path):
for file in files:
fname = os.path.join(root, file)
fhash = hashlib.md5(open(fname, 'rb').read()).hexdigest()
self.current_hash[fname] = fhash
def CheckFileExist(self):
return os.path.isfile(os.path.join(self.path, self.fname))
def CompareWithPreviousHash(self):
previous_hash = dict()
for line in open(os.path.join(self.path, self.fname), "r"):
fname, fhash = line.split()
previous_hash[fname] = fhash
return previous_hash == self.current_hash
def WriteCurrentHash(self):
self.f = open(os.path.join(self.path, self.fname), "w")
for fname, fhash in self.current_hash.items():
self.f.write("%s\t%s\n" % (fname, fhash))
def Close(self):
self.f.close()
def GenAllKernels(parent_path):
idx_header_file = IdxHeaderFile(parent_path)
idx_source_file = IdxSourceFile(parent_path)
init_file = InitFile(parent_path)
path = parent_path + '/kernels'
if not os.path.exists(path):
os.makedirs(path)
for s_size in [16, 32, 64]:
for k_num in [1, 2]:
#for warp_y in [16, 32, 64]:
for warp_y in [16, 32]:
for warp_x in [8, 16, 32]:
for cta_y_num in [1, 2, 4]:
for cta_x_num in [1, 2, 4]:
if cta_y_num == 4 and cta_x_num == 4:
continue
kernel = KernelInfo(parent_path, s_size, k_num, cta_y_num, cta_x_num, warp_y, warp_x)
kernel.GenKernel()
idx_header_file.AppendKernel(kernel.kname)
idx_source_file.AppendKernel(kernel.fname)
init_file.AppendKernel(s_size, kernel.kname)
idx_header_file.Close()
idx_source_file.Close()
init_file.Close()
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit(1)
path = sys.argv[1]
if not os.path.exists(path):
os.makedirs(path)
hash_file = HashFile(path, os.path.dirname(os.path.abspath(__file__)))
if not hash_file.CheckFileExist() or not hash_file.CompareWithPreviousHash():
GenAllKernels(path)
hash_file.WriteCurrentHash()
hash_file.Close()
|
cantoolz/module.py | The-Cracker-Technology/CANToolz | 194 | 11083119 | <reponame>The-Cracker-Technology/CANToolz<gh_stars>100-1000
import codecs
import threading
import traceback
import collections
class Command(object):
"""Command class helper to easily identify command attributes."""
def __init__(self, description, num_params, desc_params, callback, is_enabled, index=None):
#: str -- Description of what the command does.
self.description = description
#: int -- Number of parameters.
self.num_params = num_params
#: str -- Description of the parameters if any.
self.desc_params = desc_params
#: function -- Function to call when executing the command.
self.callback = callback
#: bool -- Command is enabled (or not).
self.is_enabled = is_enabled
#: int -- Call command by index instead of parameter. None if using parameter.
self.index = index
class CANModule:
"""Generic class for all modules.
Defines all default behaviors that must/should be overriden by other modules.
"""
#: Name of the module.
name = 'Abstract CANSploit module'
#: Help of the module (can be multiline).
help = 'No help available'
#: ID of the module.
id = 0
#: Version of the module.
version = 0.0
#: Debug flag for verbose output.
DEBUG = 0
_active = True # True if module is enabled. False otherwise.
_timeout = 3 # Maximum timeout value when running thread
thr_block = threading.Event() # Blocking mode (using events)
def __init__(self, params):
"""Initialize the module.
:param dict params: Parameters of the module.
"""
self.DEBUG = int(params.get('debug', 0))
self._bus = params.get('bus', self.__class__.__name__)
self._active = False if params.get('active') in ["False", "false", "0", "-1"] else True
#: List of commands supported by the module.
self.commands = collections.OrderedDict() # Command list (doInit section)
self.commands['S'] = Command('Current status', 0, '', self.get_status, True)
self.commands['s'] = Command('Stop/Activate current module', 0, '', self.do_activate, True)
self._status = 0
self._error_text = ""
self.do_init(params)
@staticmethod
def get_hex(bytes_in):
"""Get hexadecimal representation of `bytes_in`."""
return (codecs.encode(bytes_in, 'hex_codec')).decode("ISO-8859-1")
@property
def is_active(self):
"""Module is active or not.
:returns: boolean -- `True` if module is active, `False` otherwise.
"""
return self._active
def get_status(self):
"""Human-representation of the module status.
:returns: str -- Human-readable status of the module.
"""
return 'Current status: {0}'.format(self._active)
def do_activate(self, mode=-1):
"""Force the status of the module to `mode`.
:param int mode: Mode the module should be switched to (default: `-1`)
- `0` module is switched off
- `-1` module is switched to the opposite of its current state (active -> inactive / inactive -> active)
- else module is switched on
:returns: str -- Human-readable status of the module.
"""
if mode == -1:
self._active = not self._active
elif mode == 0:
self._active = False
else:
self._active = True
return 'Active status: {0}'.format(self._active)
def dprint(self, level, msg):
"""Print function for debugging purpose."""
str_msg = '{0}: {1}'.format(self.__class__.__name__, msg)
if level <= self.DEBUG:
print(str_msg)
def raw_write(self, string):
"""Call the command specified in `string` and return its result. Used for direct input.
:param str string: The full command with its paramaters (e.g. 's' to stop the module)
:returns: str -- Result of the command execution.
"""
ret = ''
self.thr_block.wait(timeout=self._timeout) # Timeout of 3 seconds
self.thr_block.clear()
full_cmd = string.lstrip()
if ' ' in full_cmd: # Any parameters specified?
in_cmd, parameters = full_cmd.split(' ', maxsplit=1)
else:
in_cmd = full_cmd
parameters = None
if in_cmd in self.commands:
cmd = self.commands[in_cmd]
if cmd.is_enabled:
try:
# TODO: Clean the logic once we get rid of call-by-index completely
if cmd.num_params == 0 or (cmd.num_params == 1 and parameters is None):
if cmd.index is None:
ret = cmd.callback()
else:
ret = cmd.callback(cmd.index)
elif cmd.num_params == 1:
if cmd.index is None:
ret = cmd.callback(parameters)
else:
ret = cmd.callback(cmd.index, parameters)
else: # For command called by index (see CANMusic example).
ret = cmd.callback(cmd.index)
except Exception as e:
ret = "ERROR: " + str(e)
traceback.print_exc()
else:
ret = 'Error: command is disabled!'
self.thr_block.set()
return ret
def do_effect(self, can_msg, args):
"""Function to override when implementing an effect (fuzzing, sniffer, filtering operations, etc.)
:param cantoolz.can.CANSploitMessage can_msg: The CAN message in the pipe.
:param dict args: The action arguments as configured in the configuration file.
:returns: str -- CAN message after effect has been applied.
"""
return can_msg
def get_name(self):
"""Get the name of the module.
:returns: str -- Name of the module.
"""
return self.name
def get_help(self):
"""Get the help of the module.
:returns: str -- Help of the module.
"""
return self.commands
def get_status_bar(self):
"""Get the status of the module.
:returns: dict -- 'bar': Progress abr of the module. 'text': Current text for errors and notifications
"""
self.thr_block.wait(timeout=self._timeout)
self.thr_block.clear()
status = int(self._status)
error_text = ""
if self._error_text != "":
error_text = self._error_text
self._error_text = ""
self.thr_block.set()
return {'bar': status, 'text': error_text}
def set_error_text(self, text):
"""Function to set current notification or error
:returns: int -- Status of init.
"""
print("!!! " + text)
self._error_text = self.__class__.name + ": " + text
def do_init(self, params):
"""Function to initialize the module before doing any actual work.
:returns: int -- Status of init.
"""
return 0
def do_stop(self, params):
"""Function called when stopping activity of the module.
:returns: int -- Status of stop.
"""
return 0
def do_start(self, params):
"""Function called when starting activity of the module.
:returns: int -- Status of start.
"""
return 0
def do_exit(self, params):
"""Function called when exiting activity of the module.
:returns: int -- Status of exit.
"""
return 0
|
py_pdf_parser/visualise/info_figure.py | d-hoke/py-pdf-parser | 186 | 11083136 | <gh_stars>100-1000
from typing import Dict, List, Optional, TYPE_CHECKING
from matplotlib.backend_bases import MouseButton
if TYPE_CHECKING:
from py_pdf_parser.components import PDFElement
def get_clicked_element_info(clicked_elements: Dict[MouseButton, "PDFElement"]) -> str:
left_element = clicked_elements.get(MouseButton.LEFT)
right_element = clicked_elements.get(MouseButton.RIGHT)
output = []
output.append("Left clicked element:")
output.append("---------------------")
output += _get_element_info(left_element)
output.append("")
output.append("Right clicked element:")
output.append("---------------------")
output += _get_element_info(right_element)
output.append("")
output.append("Element comparison:")
output.append("-------------------")
output += _get_element_comparison_info(left_element, right_element)
return "\n".join(output)
def _get_element_info(element: Optional["PDFElement"]) -> List[str]:
if not element:
return ["Click an element to see details"]
return [
f"Text: {element.text(stripped=False)}",
f"Font: {element.font}",
f"Tags: {element.tags}",
f"Bounding box: {element.bounding_box}",
f"Width: {element.bounding_box.width}",
f"Height: {element.bounding_box.height}",
]
def _get_element_comparison_info(
element1: Optional["PDFElement"], element2: Optional["PDFElement"]
) -> List[str]:
if element1 is None or element2 is None:
return ["Left click one element and right click another to see comparison"]
bbox1 = element1.bounding_box
bbox2 = element2.bounding_box
# Height
height_diff = abs(bbox1.height - bbox2.height)
relative_height_diff = height_diff / bbox1.height
# Line margin (i.e. vertical gap)
line_margin = max(bbox1.y0 - bbox2.y1, bbox2.y0 - bbox1.y1)
relative_line_margin = line_margin / bbox1.height
# Alignment
alignments = {
"left": abs(bbox1.x0 - bbox2.x0),
"right": abs(bbox1.x1 - bbox2.x1),
"center": abs((bbox1.x0 + bbox1.x1) / 2 - (bbox2.x0 + bbox2.x1) / 2),
}
sorted_alignments = sorted(alignments.items(), key=lambda x: x[1])
alignment_name, alignment_value = sorted_alignments[0]
relative_alignment_value = alignment_value / bbox1.height
return [
"Note 'relative' is relative to the left clicked element",
f"Height diff: {height_diff}",
f"Relative height diff {relative_height_diff}",
f"Line margin: {line_margin}",
f"Relative line margin: {relative_line_margin}",
f"Closest alignment: {alignment_value} ({alignment_name})",
f"Relative alignment: {relative_alignment_value}",
]
|
tests/changes/api/test_build_tag.py | vault-the/changes | 443 | 11083140 | <gh_stars>100-1000
import json
from changes.testutils import APITestCase
NONE = None
NO_TAGS = []
ONE_TAG = ['one_tag']
MULTI_TAGS = ['one_tag', 'two_tags']
TAG_LISTS = [MULTI_TAGS, ONE_TAG, NO_TAGS, NONE]
PATH = '/api/0/builds/{0}/tags'
class BuildTagTest(APITestCase):
def test_get_tags(self):
project = self.create_project()
for tag_list in TAG_LISTS:
build = self.create_build(project, tags=tag_list)
path = PATH.format(build.id.hex)
resp = self.client.get(path)
data = self.unserialize(resp)
assert data['tags'] == build.tags
def test_set_tags(self):
project = self.create_project()
# try updating to none, one tag, or two tags
# from varying start states (none, one tag, or two tags)
for tag_list_update in TAG_LISTS:
for tag_list_build in TAG_LISTS:
build = self.create_build(project, tags=tag_list_build)
path = PATH.format(build.id.hex)
self.client.post(path, data={'tags': json.dumps(tag_list_update)})
resp = self.client.get(path)
data = self.unserialize(resp)
if tag_list_update is None:
assert data['tags'] == []
else:
assert data['tags'] == tag_list_update
def test_bad_tag_format(self):
project = self.create_project()
tags = {'tags': 'one_tag'}
build = self.create_build(project, tags=tags)
path = PATH.format(build.id.hex)
resp = self.client.post(path, data=tags)
assert resp.status_code == 400
def test_long_tags(self):
project = self.create_project()
build = self.create_build(project)
path = PATH.format(build.id.hex)
tags = ['ok_tag_length', 'one_really_stupidly_long_tag']
resp = self.client.post(path, data={'tags': json.dumps(tags)})
assert resp.status_code == 400
|
social_core/backends/eventbrite.py | shnaqawi/social-core | 745 | 11083141 | from .oauth import BaseOAuth2
class EventbriteOAuth2(BaseOAuth2):
"""Eventbrite OAuth2 authentication backend"""
name = 'eventbrite'
AUTHORIZATION_URL = 'https://www.eventbrite.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://www.eventbrite.com/oauth/token'
METADATA_URL = 'https://www.eventbriteapi.com/v3/users/me'
ACCESS_TOKEN_METHOD = 'POST'
STATE_PARAMETER = False
REDIRECT_STATE = False
def get_user_details(self, response):
"""Return user details from an Eventbrite metadata response"""
email = next(iter(filter(lambda x: x['primary'], response['emails'])))['email']
return {
'username': email,
'email': email,
'first_name': response['first_name'],
'last_name': response['last_name']
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data and datacenter information from service"""
return self.get_json(self.METADATA_URL, headers={
'Authorization': 'Bearer ' + access_token
})
|
conan/tools/__init__.py | Wonders11/conan | 6,205 | 11083206 | <reponame>Wonders11/conan
CONAN_TOOLCHAIN_ARGS_FILE = "conanbuild.conf"
CONAN_TOOLCHAIN_ARGS_SECTION = "toolchain"
|
tools/gsuite-grant-analyzer/gsuite_grant_analyzer/bigquery_helpers.py | ruchirjain86/professional-services | 2,116 | 11083216 | """Helper functions for BigQuery."""
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is a proof of concept and is not meant to be fully functional
# nor feature-complete. Error checking and log reporting should be adjusted
# based on the user's needs. Script invocation is still in its infancy.
#
# Please, refer to README.md file for instructions.
import datetime
import logging
from retrying import retry
from google.cloud import bigquery
logger = logging.getLogger(__name__)
# Name of the dataset in BigQuery. The dataset will be automatically created.
# Change it if you already have a dataset with the same name.
DATASET_NAME = "app_scopes"
# Determines where BigQuery data is saved (US, EU)
DATASET_LOCATION = "EU"
# Maximum number of rows that can be inserted into BQ in one operation.
# Currently, the hard limit for the python client is 10000.
MAX_BQ_INSERT_SIZE = 10000
def bq_create_client(project, credentials):
"""Creates BigQuery client.
Args:
project: GCP project where to create the BigQuery dataset
credentials: credentials of the service account used for access
Returns:
Instance of the BigQuery client.
"""
return bigquery.Client(project=project, credentials=credentials)
def bq_create_dataset(bq_client):
"""Creates the BigQuery dataset.
If the dataset already exists, the existing dataset will be returned.
Dataset will be create in the location specified by DATASET_LOCATION.
Args:
bq_client: BigQuery client
Returns:
BigQuery dataset that will be used to store data.
"""
dataset_id = "{}.{}".format(bq_client.project, DATASET_NAME)
dataset = bigquery.Dataset(dataset_id)
dataset.location = DATASET_LOCATION
dataset = bq_client.create_dataset(dataset, exists_ok=True)
return dataset
def bq_create_table(bq_client, dataset):
"""Creates a table in the supplied dataset, with a unique name based on time.
Args:
bq_client: BigQuery client
dataset: BigQuery dataset where table must be created
Returns:
Table that will be used to store data.
"""
schema = [
bigquery.SchemaField("user", "STRING", mode="REQUIRED"),
bigquery.SchemaField("clientId", "STRING", mode="REQUIRED"),
bigquery.SchemaField("scope", "STRING", mode="REQUIRED"),
bigquery.SchemaField("displayText", "STRING", mode="REQUIRED"),
]
table_id = \
"{}.{}.{}_{}".format(
bq_client.project, dataset.dataset_id,
"app_scopes",
datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f"))
table = bigquery.Table(table_id, schema=schema)
table = bq_client.create_table(table)
logger.info("Created table %s", table.full_table_id)
return table
def print_bq_insert_errors(rows, errors):
"""Prints errors that occurred during the insertions of rows.
Parses the results of the BigQuery insert and prints a human readable
representation of the errors, suppressing noise by removing the rows that
were not inserted due to errors in other rows, and adding information about
the data that generated actual errors.
Args:
rows: original data, used to print the data that caused an error
errors: error dictionary as returned by the BigQuery client
"""
logger.error("The following errors have been detected:")
stopped_rows = 0
for item in errors:
index = item["index"]
row_errors = item["errors"]
for error in row_errors:
if error["reason"] != "stopped":
logger.error("Row number: %d, Row data: %s, Error: %s", index,
rows[index], error)
else:
stopped_rows += 1
if stopped_rows:
logger.error(
"Also, %d rows were stopped (not inserted) due to the errors "
"above.", stopped_rows)
@retry(wait_exponential_multiplier=1000,
wait_exponential_max=60000,
stop_max_attempt_number=10)
def _insert_rows(bq_client, table, rows):
return bq_client.insert_rows(table, rows)
def _batch_insert(bq_client, table, rows):
"""Inserts rows into a BigQuery table in batches of MAX_BQ_INSERT_SIZE each.
Args:
bq_client: BigQuery client
table: table where rows must be inserted
rows: a list of rows to insert
"""
total_rows = len(rows)
inserted_rows = 0
batch = 1
logger.info("Inserting %d rows into table %s", total_rows,
table.full_table_id)
while inserted_rows < total_rows:
start = (batch - 1) * MAX_BQ_INSERT_SIZE
end = batch * MAX_BQ_INSERT_SIZE
batch_rows = rows[start:end]
inserted_rows += len(batch_rows)
errors = _insert_rows(bq_client, table, batch_rows)
if errors:
print_bq_insert_errors(batch_rows, errors)
logger.error(
"The program has been terminated due to BigQuery insertion "
"errors.")
exit(1)
else:
logger.info("Batch %d: inserted rows %d to %d", batch, start + 1,
min(end, len(rows)))
batch += 1
logger.info("All rows inserted.")
def bq_insert_rows(bq_client, table, rows):
"""Inserts rows into a BigQuery table.
Args:
bq_client: BigQuery client
table: table where rows must be inserted
rows: a list of rows to insert
"""
_batch_insert(bq_client, table, rows)
|
tests/orm/test_computers.py | aiidateam/aiida_core | 153 | 11083240 | <filename>tests/orm/test_computers.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=no-self-use
"""Tests for the `Computer` ORM class."""
import pytest
from aiida.common import exceptions
from aiida.orm import AuthInfo, Computer, User
from aiida.plugins import TransportFactory
@pytest.mark.usefixtures('aiida_profile_clean')
class TestComputer:
"""Tests for the `Computer` ORM class."""
def test_get_transport(self):
"""
Test the get_transport method of Computer
"""
import tempfile
new_comp = Computer(
label='bbb',
hostname='localhost',
transport_type='core.local',
scheduler_type='core.direct',
workdir='/tmp/aiida'
).store()
# Configure the computer - no parameters for local transport
authinfo = AuthInfo(computer=new_comp, user=User.collection.get_default())
authinfo.store()
transport = new_comp.get_transport()
# It's on localhost, so I see files that I create
with transport:
with tempfile.NamedTemporaryFile() as handle:
assert transport.isfile(handle.name) is True
# Here the file should have been deleted
assert transport.isfile(handle.name) is False
def test_delete(self):
"""Test the deletion of a `Computer` instance."""
new_comp = Computer(
label='aaa',
hostname='aaa',
transport_type='core.local',
scheduler_type='core.pbspro',
workdir='/tmp/aiida'
).store()
comp_pk = new_comp.pk
check_computer = Computer.collection.get(id=comp_pk)
assert comp_pk == check_computer.pk
Computer.collection.delete(comp_pk)
with pytest.raises(exceptions.NotExistent):
Computer.collection.get(id=comp_pk)
def test_get_minimum_job_poll_interval(self):
"""Test the :meth:`aiida.orm.Computer.get_minimum_job_poll_interval` method."""
computer = Computer()
# No transport class defined: fall back on class default.
assert computer.get_minimum_job_poll_interval() == Computer.PROPERTY_MINIMUM_SCHEDULER_POLL_INTERVAL__DEFAULT
# Transport class defined: use default of the transport class.
transport = TransportFactory('core.local')
computer.transport_type = 'core.local'
assert computer.get_minimum_job_poll_interval() == transport.DEFAULT_MINIMUM_JOB_POLL_INTERVAL # pylint: disable=protected-access
# Explicit value defined: use value of the instance.
interval = -10
computer.set_minimum_job_poll_interval(interval)
assert computer.get_minimum_job_poll_interval() == interval
class TestComputerConfigure:
"""Tests for the configuring of instance of the `Computer` ORM class."""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_profile_clean): # pylint: disable=unused-argument
"""Prepare current user and computer builder with common properties."""
# pylint: disable=attribute-defined-outside-init
from aiida.orm.utils.builders.computer import ComputerBuilder
self.comp_builder = ComputerBuilder(label='test', description='computer', hostname='localhost')
self.comp_builder.scheduler = 'core.direct'
self.comp_builder.work_dir = '/tmp/aiida'
self.comp_builder.prepend_text = ''
self.comp_builder.append_text = ''
self.comp_builder.use_double_quotes = False
self.comp_builder.mpiprocs_per_machine = 8
self.comp_builder.default_memory_per_machine = 1000000
self.comp_builder.mpirun_command = 'mpirun'
self.comp_builder.shebang = '#!xonsh'
self.user = User.collection.get_default()
def test_configure_local(self):
"""Configure a computer for local transport and check it is configured."""
self.comp_builder.label = 'test_configure_local'
self.comp_builder.transport = 'core.local'
comp = self.comp_builder.new()
comp.store()
comp.configure()
assert comp.is_user_configured(self.user)
def test_configure_ssh(self):
"""Configure a computer for ssh transport and check it is configured."""
self.comp_builder.label = 'test_configure_ssh'
self.comp_builder.transport = 'core.ssh'
comp = self.comp_builder.new()
comp.store()
comp.configure(username='radames', port='22')
assert comp.is_user_configured(self.user)
def test_configure_ssh_invalid(self):
"""Try to configure computer with invalid auth params and check it fails."""
self.comp_builder.label = 'test_configure_ssh_invalid'
self.comp_builder.transport = 'core.ssh'
comp = self.comp_builder.new()
comp.store()
with pytest.raises(ValueError):
comp.configure(username='radames', invalid_auth_param='TEST')
def test_non_configure_error(self):
"""Configure a computer for local transport and check it is configured."""
self.comp_builder.label = 'test_non_configure_error'
self.comp_builder.transport = 'core.local'
comp = self.comp_builder.new()
comp.store()
with pytest.raises(exceptions.NotExistent) as exc:
comp.get_authinfo(self.user)
assert str(comp.pk) in str(exc)
assert comp.label in str(exc)
assert self.user.get_short_name() in str(exc)
assert str(self.user.pk) in str(exc)
assert 'verdi computer configure' in str(exc)
|
scripts/utils.py | PavelBlend/fluid-engine-dev | 1,355 | 11083251 | """
Copyright (c) 2018 <NAME>
I am making my contributions/submissions to this project solely in my personal
capacity and am not conveying any rights to any intellectual property of any
third parties.
"""
"""
Jet uses portions of Chromium V8.
Copyright 2008 the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import platform
import re
import os
import fnmatch
def guess_os():
"""
Returns the name of the operating system.
This will return 'linux' for Linux compatible system, 'macos' for Macs,
'win32' for Windows, and 'freebsd' for FreeBSD.
"""
id = platform.system()
if id == 'Linux':
return 'linux'
elif id == 'Darwin':
return 'macosx'
elif id == 'Windows' or id == 'Microsoft':
return 'win32'
else:
return None
def guess_word_size():
"""
Returns the size of the pointer. For 64-bit systems, this will return '64',
and '32' for 32-bit systems.
"""
if '64' in platform.machine():
return '64'
else:
archs = platform.architecture()
for a in archs:
if '64' in a:
return '64'
return '32'
def guess_arch():
"""
Returns the architecture name of the system.
"""
if is_windows():
if guess_word_size() == '64':
return 'x64'
else:
return 'win32'
id = platform.machine()
if is_mac():
if guess_word_size() == '64' and id == 'i386':
return 'x86_64'
else:
return id
if id.startswith('arm'):
return 'arm'
elif (not id) or (not re.match('(x|i[3-6])86', id) is None):
return id
else:
return None
def detect_num_cpus():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
def is_windows():
"""
Returns True if you are using Windows.
"""
return guess_os() == 'win32'
def is_windows64():
"""
Returns True if you are using Visual Studio compiler in 64-bit mode.
"""
if is_windows():
return '64' in os.environ['LIB']
else:
return False
def is_unix():
"""
Returns True if you are using Unix compatible system (Linux, Mac, and
FreeBSD).
"""
return not is_windows()
def is_mac():
"""
Returns True if you are using Mac.
"""
return guess_os() == 'macosx'
def is_linux():
"""
Returns True if you are using Linux.
"""
return guess_os() == 'linux'
def is64():
"""
Returns True if running on 64-bit machine
"""
return guess_word_size() == '64'
def navigate_all_files(root_path, patterns):
"""
A generator function that iterates all files that matches the given patterns
from the root_path.
"""
for root, dirs, files in os.walk(root_path):
for pattern in patterns:
for filename in fnmatch.filter(files, pattern):
yield os.path.join(root, filename)
def get_all_files(root_path, patterns):
"""
Returns a list of all files that matches the given patterns from the
root_path.
"""
ret = []
for filepath in navigate_all_files(root_path, patterns):
ret.append(filepath)
return ret
|
apps/incidents/views.py | seanlefevre/openduty | 145 | 11083253 | import uuid
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.utils import timezone
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.contrib import messages
from rest_framework import viewsets
from rest_framework import status
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from apps.notification.models import ScheduledNotification
from apps.notification.helper import NotificationHelper
from apps.services.models import ServiceSilenced, ServiceTokens
from apps.events.models import EventLog
from apps.services.models import Token
from apps.openduty.tasks import unsilence_incident
from apps.incidents.serializers import IncidentSerializer
from apps.incidents.escalation_helper import services_where_user_is_on_call
from apps.incidents.models import Incident, IncidentSilenced
class IncidentViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows incidents to be viewed or edited.
"""
queryset = Incident.objects.all()
serializer_class = IncidentSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
def is_relevant(self, incident, new_event_type):
"""
Check incident conditions
:param incident: Actual incident
:param new_event_type: Reported event_type
:return: True if relevant else False
"""
valid_list = [Incident.ACKNOWLEDGE, Incident.RESOLVE, Incident.TRIGGER, Incident.UNACKNOWLEDGE, Incident.ESCALATE]
# There is already an incident
if incident.event_type and incident.event_type in valid_list:
# True if not acknowleged or type is resolve
return (incident.event_type != Incident.ACKNOWLEDGE or
(incident.event_type == Incident.ACKNOWLEDGE and
new_event_type == Incident.RESOLVE) or (incident.event_type == Incident.ACKNOWLEDGE and new_event_type == Incident.UNACKNOWLEDGE))
# New incident
else:
# True if this is a trigger action
return new_event_type == Incident.TRIGGER
def create(self, request, *args, **kwargs):
try:
assert request.data.get("event_type")
assert request.data.get("incident_key")
assert request.data.get("service_key")
except AssertionError:
response = {"status": "failure", "message": "Mandatory parameter missing"}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
try:
token = Token.objects.get(key=request.data.get("service_key"))
service_token = ServiceTokens.objects.get(token_id=token)
service = service_token.service_id
except Token.DoesNotExist:
return Response({"No service key"}, status=status.HTTP_403_FORBIDDEN)
except ServiceTokens.DoesNotExist:
return Response({"Service key does not exist"}, status=status.HTTP_404_NOT_FOUND)
esc = False
incidents = Incident.objects.filter(
incident_key=request.data.get("incident_key"), service_key=service
)
if incidents:
incident = incidents[0]
print(f"Received {request.data.get('event_type')} for"
f" {request.data.get('incident_key')} on service {service_token.name}")
# check if type is ACK or resolve and if there's an
# escalation to a different escalation policy, remove it
if request.data.get('event_type') in [Incident.ACKNOWLEDGE, Incident.RESOLVE]:
print("ACK or Resolve, removing specific escalation")
esc = True
incident.service_to_escalate_to = None
incident.save()
# check if incident is resolved and refuse to ACK
event_log_message = f"{service_token.name} api key changed {incident.incident_key} " \
f"from {incident.event_type} to {request.data.get('event_type')}"
if incident.event_type not in [Incident.RESOLVE, Incident.ACKNOWLEDGE] and esc:
event_log_message += ", unescalated"
else:
event_type = request.data.get("event_type")
incident_key = str(uuid.uuid1()) if (event_type == Incident.TRIGGER) else request.data.get("incident_key")
incident_service_key = service
incident = Incident.objects.create(
event_type=event_type,
incident_key=incident_key,
service_key=incident_service_key,
description=request.data.get("description", " "),
details=request.data.get("details", " "),
occurred_at=timezone.now()
)
event_log_message = f"{service_token.name} api key created " \
f"{incident.incident_key} with status {request.data.get('event_type')}"
if self.is_relevant(incident, request.data.get('event_type')):
user = None if request.user.is_anonymous else request.user
event_log = EventLog(
user=user,
service_key=incident.service_key,
data=event_log_message,
occurred_at=timezone.now()
)
event_log.incident_key = incident
event_log.action = incident.event_type
event_log.save()
service_silenced = ServiceSilenced.objects.filter(service=service).count() > 0
if incident.event_type == Incident.TRIGGER and not service_silenced:
NotificationHelper.notify_incident(incident)
if incident.event_type == Incident.RESOLVE or incident.event_type == Incident.ACKNOWLEDGE:
ScheduledNotification.remove_all_for_incident(incident)
if incident.event_type == Incident.RESOLVE and service.send_resolve_enabled:
NotificationHelper.notify_incident(incident)
response = {"status": "success", "message": "Event processed", "incident_key": incident.incident_key}
return Response(response, status=status.HTTP_201_CREATED)
@detail_route(methods=['put'])
def escalate(self, request, *args, **kwargs):
"""
escalate an incident to another service's escalation rule; persists until ACK
"""
try:
token = Token.objects.get(key=request.data.get("service_key"))
service_token = ServiceTokens.objects.get(token_id=token)
service = service_token.service_id
except ServiceTokens.DoesNotExist:
return Response({"Service key does not exist"}, status=status.HTTP_404_NOT_FOUND)
except Token.DoesNotExist:
return Response({"No service key"}, status=status.HTTP_403_FORBIDDEN)
try:
token2 = Token.objects.get(key=request.data.get("service_key_to_escalate_to"))
service_token2 = ServiceTokens.objects.get(token_id=token2)
service2 = service_token2.service_id
except ServiceTokens.DoesNotExist:
return Response({"Service to escalate to key does not exist"}, status=status.HTTP_404_NOT_FOUND)
except Token.DoesNotExist:
return Response({"No service to escalate to key"}, status=status.HTTP_403_FORBIDDEN)
try:
# get service_to_escalate to and modify incident object
incident = Incident.objects.get(incident_key=request.data.get("incident_key"), service_key=service)
except (Incident.DoesNotExist, KeyError):
return Response({"Incident does not exist"}, status=status.HTTP_404_NOT_FOUND)
incident.service_to_escalate_to = service2
incident.event_type = "escalated"
if request.data.get("IncidentDetailView"):
incident.details = request.data.get("IncidentDetailView")
incident.save()
username = 'no user' if request.user.is_anonymous else request.user.username
event_log_message = f"{username} escalated to " \
f"service escalation policy : {incident.incident_key} to {service2.name}"
EventLog.objects.create(
user=request.user,
action="escalate",
incident_key=incident,
service_key=incident.service_key,
data=event_log_message,
occurred_at=timezone.now()
)
# remove all planned notifs
ScheduledNotification.remove_all_for_incident(incident)
# notify anew, this time notify_incident will detect the service_to_escalate to and notify its escalation rule
NotificationHelper.notify_incident(incident)
headers = self.get_success_headers(request.POST)
return Response(
{f"Incident successfully escalated to service {service2.name} escalation policy"},
status=status.HTTP_200_OK,
headers=headers
)
class OnCallIncidentsListView(LoginRequiredMixin, ListView):
model = Incident
context_object_name = 'all_incidents'
template_name = 'incidents/list.html'
def get_queryset(self, *args, **kwargs):
services = services_where_user_is_on_call(self.request.user)
queryset = Incident.objects.filter(service_key__in=services).order_by("-occurred_at")
return queryset
class IncidentsListView(LoginRequiredMixin, ListView):
model = Incident
queryset = Incident.objects.all().order_by('id')
template_name = 'incidents/list.html'
context_object_name = 'all_incidents'
class IncidentDetailView(LoginRequiredMixin, DetailView):
model = Incident
template_name = 'incidents/details.html'
context_object_name = 'item'
def get_context_data(self, **kwargs):
context = super(IncidentDetailView, self).get_context_data(**kwargs)
incident = self.object
try:
service_silenced = ServiceSilenced.objects.get(service=incident.service_key).silenced
except ServiceSilenced.DoesNotExist:
service_silenced = False
try:
is_obj = IncidentSilenced.objects.get(incident=incident)
incident_silenced = str(is_obj.silenced_until - timezone.now()).split(".")[0]
except IncidentSilenced.DoesNotExist:
incident_silenced = False
users = User.objects.all()
history = EventLog.objects.filter(
incident_key=incident).order_by('-occurred_at')
extra_context = {
'item': incident,
'users': users,
'url': self.request.get_full_path(),
'history_list': history,
'service_silenced': service_silenced,
'incident_silenced': incident_silenced
}
context.update(extra_context)
return context
def _update_type(user, ids, event_type):
for incident_id in ids:
incident = Incident.objects.get(id=int(incident_id))
log_message_data = f"{user.username} changed {incident.incident_key} from {incident.event_type} to {event_type}"
if incident.service_to_escalate_to is not None:
incident.service_to_escalate_to = None
log_message_data += ", unescalated"
EventLog.objects.create(
service_key=incident.service_key,
user=user,
action=event_type,
data=log_message_data,
occurred_at=timezone.now(),
incident_key=incident
)
incident.event_type = event_type
incident.occurred_at = timezone.now()
incident.save()
if incident.event_type == Incident.RESOLVE or incident.event_type == Incident.ACKNOWLEDGE:
ScheduledNotification.remove_all_for_incident(incident)
# TODO: Needs Refactoring work + changed update_type url
@login_required()
@require_http_methods(["POST"])
def update_type(request):
event_type = request.POST.get('event_type')
incident_ids = request.POST.getlist('selection', [])
incident_ids.append(request.POST.get('id'))
url = reverse('IncidentsListView')
request_url = request.POST.get('url', url)
if event_type is None:
messages.error(request, 'Invalid event modification!')
return HttpResponseRedirect(request_url)
try:
for pk in incident_ids:
incident = Incident.objects.get(id=pk)
if incident.event_type == 'resolve' and event_type == 'acknowledge':
messages.error(request, 'Can\' ACK a resolved incident!')
return HttpResponseRedirect(request_url)
else:
_update_type(request.user, incident_ids, event_type)
except Incident.DoesNotExist:
messages.error(request, 'Incident not found')
return HttpResponseRedirect(request_url)
# TODO: Needs Refactoring work + changed forward_incident url
@login_required()
@require_http_methods(["POST"])
def forward_incident(request):
url = reverse('IncidentsListView')
request_url = request.POST.get('url', url)
try:
incident = Incident.objects.get(id=request.POST.get('id'))
user = User.objects.get(id=request.POST.get('user_id'))
ScheduledNotification.remove_all_for_incident(incident)
NotificationHelper.notify_user_about_incident(incident, user)
event_log_message = f"{request.user.username} changed assignee of " \
f"incident : {incident.incident_key} to {user.username}"
EventLog.objects.create(
user=request.user,
action="forward",
incident_key=incident,
service_key=incident.service_key,
data=event_log_message,
occurred_at=timezone.now()
)
except Incident.DoesNotExist:
messages.error(request, 'Incident not found')
return HttpResponseRedirect(request_url)
except User.DoesNotExist:
messages.error(request, 'Incident not found')
return HttpResponseRedirect(request_url)
return HttpResponseRedirect(request_url)
# TODO: Needs Refactoring work + changed silence url
@login_required()
@require_http_methods(["POST"])
def silence(request, incident_id):
silence_for = request.POST.get('silence_for', 0)
url = reverse('IncidentsListView')
request_url = request.POST.get('url', url)
try:
incident = Incident.objects.get(id=int(incident_id))
except Incident.DoesNotExist:
messages.error(request, 'Incident not found')
return HttpResponseRedirect(request_url)
if IncidentSilenced.objects.filter(incident=incident).count() < 1:
silenced_incident = IncidentSilenced.objects.create(
incident=incident,
silenced_until=timezone.now() + timezone.timedelta(hours=int(silence_for)),
silenced=True
)
event_log_message = f"{request.user.username} silenced " \
f"incident {incident.incident_key} for {silence_for} hours"
EventLog.objects.create(
incident_key=incident,
action='silence_incident',
user=request.user,
service_key=incident.service_key,
data=event_log_message,
occurred_at=timezone.now()
)
ScheduledNotification.remove_all_for_incident(incident)
incident.event_type = Incident.ACKNOWLEDGE
incident.save()
unsilence_incident.apply_async((incident_id,), eta=silenced_incident.silenced_until)
messages.success(request, event_log_message)
return HttpResponseRedirect(request_url)
# TODO: Needs Refactoring work + changed unsilence url
@login_required()
@require_http_methods(["POST"])
def unsilence(request, incident_id):
url = reverse('IncidentsListView')
request_url = request.POST.get('url', url)
try:
incident = Incident.objects.get(id=incident_id)
except Incident.DoesNotExist:
messages.error(request, 'Incident not found')
return HttpResponseRedirect(request_url)
IncidentSilenced.objects.filter(incident=incident).delete()
event_log_message = f"{request.user.username} removed silence from incident {incident.incident_key}"
EventLog.objects.create(
action='unsilence_incident',
user=request.user,
incident_key=incident,
service_key=incident.service_key,
data=event_log_message,
occurred_at=timezone.now()
)
messages.success(request, event_log_message)
return HttpResponseRedirect(request_url)
|
graphql_compiler/query_formatting/__init__.py | manesioz/graphql-compiler | 521 | 11083287 | <gh_stars>100-1000
# Copyright 2017-present <NAME>, LLC.
"""Safely insert runtime arguments into compiled GraphQL queries."""
from .common import insert_arguments_into_query, validate_argument_type # noqa
|
bilibili.py | squ33ker/Dlink_Parse | 142 | 11083327 | import requests
import re
class Bili:
def __init__(self, url):
self.url = url
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36¬"
}
def start(self):
res = requests.get(self.url)
result = re.findall("window.__playinfo__=(.*?)</script>", res.text)
print(result[0])
return result[0]
if __name__ == '__main__':
Bili().start()
|
src/test/python/apache/aurora/executor/common/fixtures.py | jeremyvdw/aurora | 479 | 11083361 | <filename>src/test/python/apache/aurora/executor/common/fixtures.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
from apache.aurora.config.schema.base import (
MB,
DefaultLifecycleConfig,
MesosJob,
MesosTaskInstance,
Process,
Resources,
Task
)
BASE_MTI = MesosTaskInstance(
instance=0,
lifecycle=DefaultLifecycleConfig,
role=getpass.getuser(),
)
BASE_TASK = Task(resources=Resources(cpu=1.0, ram=16 * MB, disk=32 * MB))
HELLO_WORLD_TASK_ID = 'hello_world-001'
HELLO_WORLD = BASE_TASK(
name='hello_world',
processes=[Process(name='hello_world_{{thermos.task_id}}', cmdline='echo hello world')])
HELLO_WORLD_MTI = BASE_MTI(task=HELLO_WORLD)
SLEEP60 = BASE_TASK(processes=[Process(name='sleep60', cmdline='sleep 60')])
SLEEP2 = BASE_TASK(processes=[Process(name='sleep2', cmdline='sleep 2')])
SLEEP60_MTI = BASE_MTI(task=SLEEP60)
MESOS_JOB = MesosJob(
name='does_not_matter',
instances=1,
role=getpass.getuser(),
)
HELLO_WORLD_UNBOUND = BASE_TASK(
name='{{unbound_cmd}}',
processes=[Process(name='hello_world_{{thermos.task_id}}', cmdline='echo hello {{unbound}}')])
|
Alignment/OfflineValidation/test/DiMuonVertexValidation_cfg.py | Purva-Chaudhari/cmssw | 852 | 11083374 | from __future__ import print_function
from fnmatch import fnmatch
import FWCore.ParameterSet.Config as cms
import FWCore.Utilities.FileUtils as FileUtils
import FWCore.ParameterSet.VarParsing as VarParsing
import sys
from Configuration.StandardSequences.Eras import eras
###################################################################
def best_match(rcd):
###################################################################
'''
find out where to best match the input conditions
'''
print(rcd)
for pattern, string in connection_map:
print(pattern, fnmatch(rcd, pattern))
if fnmatch(rcd, pattern):
return string
options = VarParsing.VarParsing ()
options.register('maxEvents',
-1,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"number of events to process (\"-1\" for all)")
options.register ('era',
'2017', # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"CMS running era")
options.register ('GlobalTag',
'113X_mc2017_realistic_v4', # default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"seed number")
options.register ('records',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"record:tag names to be used/changed from GT")
options.register ('external',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"record:fle.db picks the following record from this external file")
options.register ('myseed',
'1', # default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"seed number")
options.register ('myfile',
'/store/relval/CMSSW_10_6_1/RelValZMM_13/GEN-SIM-RECO/PU25ns_106X_mc2017_realistic_v6_HS-v1/10000/44690279-DDF3-0D43-B92D-F5CB57EF7E6A.root', # default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"file name")
options.register ('FileList',
'', # default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"FileList in DAS format")
options.register ('outputName',
'default', # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"output file")
options.parseArguments()
if(options.FileList):
print("FileList: ", options.FileList)
else:
print("inputFile: ", options.myfile)
print("outputFile: ", "DiMuonVertexValidation_{fname}_{fseed}.root".format(fname = options.outputName,fseed=options.myseed))
print("era: ", options.era)
print("conditionGT: ", options.GlobalTag)
print("conditionOverwrite: ", options.records)
print("external conditions:", options.external)
print("max events: ", options.maxEvents)
if options.era=='2016':
print("===> running era 2016")
process = cms.Process('Analysis',eras.Run2_2016)
elif options.era=='2017':
print("===> running era 2017")
process = cms.Process('Analysis',eras.Run2_2017)
elif options.era=='2018':
print("===> running era 2018")
process = cms.Process('Analysis',eras.Run2_2018)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
###################################################################
# Tell the program where to find the conditons
connection_map = [
('Tracker*', 'frontier://PromptProd/CMS_CONDITIONS'),
('SiPixel*', 'frontier://PromptProd/CMS_CONDITIONS'),
('SiStrip*', 'frontier://PromptProd/CMS_CONDITIONS'),
('Beam*', 'frontier://PromptProd/CMS_CONDITIONS'),
]
if options.external:
connection_map.extend(
(i.split(':')[0], 'sqlite_file:%s' % i.split(':')[1]) for i in options.external
)
connection_map.sort(key=lambda x: -1*len(x[0]))
###################################################################
# creat the map for the GT toGet
records = []
if options.records:
for record in options.records:
rcd, tag = tuple(record.split(':'))
print("control point:",rcd,tag)
if len(rcd)==0:
print("no overriding will occur")
continue
records.append(
cms.PSet(
record = cms.string(rcd),
tag = cms.string(tag),
connect = cms.string(best_match(rcd))
)
)
###################################################################
# configure the Global Tag
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.GlobalTag, '')
process.GlobalTag.toGet = cms.VPSet(*records)
'''
process.GlobalTag.toGet = cms.VPSet(
cms.PSet(record = cms.string("TrackerAlignmentRcd"),
tag = cms.string("TrackerAlignment_Upgrade2017_design_v4"),
#tag = cms.string("TrackerAlignment_2017_ultralegacymc_v1"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
),
cms.PSet(record = cms.string("TrackerAlignmentErrorExtendedRcd"),
tag = cms.string("TrackerAlignmentErrorsExtended_Upgrade2017_design_v0"),
#tag = cms.string("TrackerAlignmentExtendedErrors_2017_ultralegacymc_v1"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
)
'''
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) )
#process.load('FWCore.MessageService.MessageLogger_cfi')
#process.MessageLogger.cerr.FwkReport.reportEvery = 1
###################################################################
# Messages
###################################################################
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.enable = False
process.MessageLogger.TrackRefitter=dict()
process.MessageLogger.PrimaryVertexProducer=dict()
process.MessageLogger.DiMuonVertexValidation=dict()
process.MessageLogger.DiLeptonHelpCounts=dict()
process.MessageLogger.PlotsVsKinematics=dict()
process.MessageLogger.cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string("INFO"),
default = cms.untracked.PSet(limit = cms.untracked.int32(0)),
FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(-1),
reportEvery = cms.untracked.int32(100)
),
DiMuonVertexValidation = cms.untracked.PSet( limit = cms.untracked.int32(-1)),
DiLeptonHelpCounts = cms.untracked.PSet( limit = cms.untracked.int32(-1)),
enableStatistics = cms.untracked.bool(True)
)
###################################################################
# Source
###################################################################
if(options.FileList):
print('Loading file list from ASCII file')
filelist = FileUtils.loadListFromFile (options.FileList)
readFiles = cms.untracked.vstring( *filelist)
else:
readFiles = cms.untracked.vstring([options.myfile])
process.source = cms.Source("PoolSource",
fileNames = readFiles,
#skipEvents = cms.untracked.uint32(45000)
)
###################################################################
# TransientTrack from https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideTransientTracks
###################################################################
process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi")
process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi')
process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi')
process.load('TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff')
####################################################################
# Get the BeamSpot
####################################################################
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
####################################################################
# Track Refitter
####################################################################
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
import RecoTracker.TrackProducer.TrackRefitters_cff
process.TrackRefitter = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone()
process.TrackRefitter.src = "generalTracks"
#process.TrackRefitter.src = "ALCARECOTkAlDiMuonVertexTracks"
process.TrackRefitter.TrajectoryInEvent = True
process.TrackRefitter.NavigationSchool = ''
process.TrackRefitter.TTRHBuilder = "WithAngleAndTemplate"
####################################################################
# Sequence
####################################################################
process.seqTrackselRefit = cms.Sequence(process.offlineBeamSpot*
# in case NavigatioSchool is set !=''
#process.MeasurementTrackerEvent*
process.TrackRefitter)
####################################################################
# Re-do vertices
####################################################################
from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices
process.offlinePrimaryVerticesFromRefittedTrks = offlinePrimaryVertices.clone()
process.offlinePrimaryVerticesFromRefittedTrks.TrackLabel = cms.InputTag("TrackRefitter")
####################################################################
# Output file
####################################################################
process.TFileService = cms.Service("TFileService",fileName=cms.string("DiMuonVertexValidation_"+options.outputName+"_"+options.myseed+".root"))
# Additional output definition
process.analysis = cms.EDAnalyzer("DiMuonVertexValidation",
useReco = cms.bool(True),
## the two parameters below are mutually exclusive,
## depending if RECO or ALCARECO is used
muons = cms.InputTag('muons'),
#muonTracks = cms.InputTag('ALCARECOTkAlDiMuon'),
tracks = cms.InputTag('TrackRefitter'),
vertices = cms.InputTag('offlinePrimaryVerticesFromRefittedTrks'))
####################################################################
# Path
####################################################################
process.p = cms.Path(process.seqTrackselRefit +
process.offlinePrimaryVerticesFromRefittedTrks +
process.analysis
)
|
contrib/performance/jobqueue/workrate.py | backwardn/ccs-calendarserver | 462 | 11083378 | #!/usr/bin/env python
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from getopt import getopt, GetoptError
import errno
import json
import os
import sched
import socket
import sys
import time
def usage(e=None):
name = os.path.basename(sys.argv[0])
print("usage: %s [options]" % (name,))
print("")
print("options:")
print(" -h --help: print this help and exit")
print(" -s: server host (and optional port) [localhost:8100]")
print(" or unix socket path prefixed by 'unix:'")
print("")
print("This tool monitors the server's job assignment rate.")
if e:
sys.exit(64)
else:
sys.exit(0)
def main():
try:
(optargs, _ignore_args) = getopt(
sys.argv[1:], "hs:", [
"help",
],
)
except GetoptError, e:
usage(e)
#
# Get configuration
#
server = ("localhost", 8100)
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
elif opt in ("-s"):
if not arg.startswith("unix:"):
server = arg.split(":")
if len(server) == 1:
server.append(8100)
else:
server[1] = int(server[1])
server = tuple(server)
else:
server = arg
else:
raise NotImplementedError(opt)
d = Monitor(server)
d.run()
class Monitor(object):
"""
Main monitor controller. Use Python's L{sched} feature to schedule
updates.
"""
screen = None
registered_windows = {}
registered_order = []
def __init__(self, server):
self.paused = False
self.seconds = 1.0
self.sched = sched.scheduler(time.time, time.sleep)
self.client = MonitorClient(server)
self.client.addItem("test_work")
self.last_queued = None
self.last_completed = None
self.last_time = None
def run(self):
"""
Create the initial window and run the L{scheduler}.
"""
self.sched.enter(self.seconds, 0, self.updateResults, ())
self.sched.run()
def updateResults(self):
"""
Periodic update of the current window and check for a key press.
"""
t = time.time()
self.client.update()
if len(self.client.currentData) == 0:
print("Failed to read any valid data from the server - exiting")
sys.exit(1)
queued = self.client.currentData["test_work"]["queued"]
completed = self.client.currentData["test_work"]["completed"]
assigned = self.client.currentData["test_work"]["assigned"]
if self.last_queued is not None:
diff_queued = (self.last_queued - queued) / (t - self.last_time)
diff_completed = (completed - self.last_completed) / (t - self.last_time)
else:
diff_queued = 0
diff_completed = 0
self.last_queued = queued
self.last_completed = completed
self.last_time = t
print("{}\t{}\t{:.1f}\t{:.1f}".format(queued, assigned, diff_queued, diff_completed,))
self.sched.enter(max(self.seconds - (time.time() - t), 0), 0, self.updateResults, ())
class MonitorClient(object):
"""
Client that connects to a server and fetches information.
"""
def __init__(self, sockname):
self.socket = None
if isinstance(sockname, str):
self.sockname = sockname[5:]
self.useTCP = False
else:
self.sockname = sockname
self.useTCP = True
self.currentData = {}
self.items = []
def readSock(self, items):
"""
Open a socket, send the specified request, and retrieve the response. Keep the socket open.
"""
try:
if self.socket is None:
self.socket = socket.socket(socket.AF_INET if self.useTCP else socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(self.sockname)
self.socket.setblocking(0)
self.socket.sendall(json.dumps(items) + "\r\n")
data = ""
t = time.time()
while not data.endswith("\n"):
try:
d = self.socket.recv(1024)
except socket.error as se:
if se.args[0] != errno.EWOULDBLOCK:
raise
if time.time() - t > 5:
raise socket.error
continue
if d:
data += d
else:
break
data = json.loads(data)
except socket.error:
data = {}
self.socket = None
except ValueError:
data = {}
return data
def update(self):
"""
Update the current data from the server.
"""
# Only read each item once
self.currentData = self.readSock(list(set(self.items)))
def getOneItem(self, item):
"""
Update the current data from the server.
"""
data = self.readSock([item])
return data[item] if data else None
def addItem(self, item):
"""
Add a server data item to monitor.
"""
self.items.append(item)
def removeItem(self, item):
"""
No need to monitor this item.
"""
self.items.remove(item)
if __name__ == "__main__":
main()
|
fastfold/model/hub/__init__.py | hpcaitech/FastFold | 303 | 11083434 | from .alphafold import AlphaFold
__all__ = ["AlphaFold"] |
tock/employees/signals.py | mikiec84/tock | 134 | 11083442 | <reponame>mikiec84/tock
import logging
from django.db.models.signals import pre_save
logger = logging.getLogger('tock-employees')
def employee_grade_creation(sender, instance=None, **kwargs):
if instance is not None and instance.pk is None:
logger.info(
f'Creating EmployeeGrade for {instance.employee.username}.'
)
def user_data_creation(sender, instance=None, **kwargs):
if instance is not None and instance.pk is None:
logger.info(
f'Creating UserData for {instance.user.username}.'
)
def setup_signals():
from .models import EmployeeGrade, UserData
pre_save.connect(
employee_grade_creation,
sender=EmployeeGrade,
dispatch_uid="employees_employee_grade_creation"
)
pre_save.connect(
user_data_creation,
sender=UserData,
dispatch_uid="employees_user_data_creation"
)
|
Tools/uorb_graph/create.py | imchangchang/PX4-Autopilot | 1,537 | 11083444 | <reponame>imchangchang/PX4-Autopilot
#! /usr/bin/env python3
from __future__ import print_function
import argparse
import codecs
import colorsys
import json
import logging
import os
import re
import sys
from typing import Optional, Set, Tuple
parser = argparse.ArgumentParser(
description='Generate uORB pub/sub dependency graph from source code')
parser.add_argument('-s', '--src-path', action='append',
help='Source path(s) (default=src, can be specified multiple times)',
default=[])
parser.add_argument('-e', '--exclude-path', action='append',
help='Excluded path(s), can be specified multiple times',
default=[])
parser.add_argument('--merge-depends', action='store_true',
help='Merge library topics in the modules that depend on them.')
parser.add_argument('-v','--verbosity', action='count',
help='increase output verbosity; primarily for debugging; repeat for more detail',
default=0)
parser.add_argument('-f', '--file', metavar='file', action='store',
help='output file name prefix',
default='graph')
parser.add_argument('-o', '--output', metavar='output', action='store',
help='output format (json or graphviz)',
default='json')
parser.add_argument('-u','--use-topic-union', action='store_true',
help='''
Use the union of all publication and subscription topics (useful for complete
graphs or only few/single module(s)). The default is to use the intersection
(remove topics that have no subscriber or no publisher)''')
parser.add_argument('-m', '--modules', action='store',
help='Comma-separated whitelist of modules (the module\'s '+
'MAIN, e.g. from a startup script)',
default='')
logging.basicConfig(level=logging.WARNING,format='%(message)s')
log = logging.getLogger()
def get_N_colors(N, s=0.8, v=0.9):
""" get N distinct colors as a list of hex strings """
HSV_tuples = [(x*1.0/N, s, v) for x in range(N)]
hex_out = []
for rgb in HSV_tuples:
rgb = map(lambda x: int(x*255), colorsys.hsv_to_rgb(*rgb))
hex_out.append("#"+"".join(map(lambda x: format(x, '02x'), rgb)))
return hex_out
class PubSub(object):
""" Collects either publication or subscription information for nodes
(modules and topics) & edges """
# special value to signal an ambiguous was found -- don't record this topic, and stop processing.
AMBIGUOUS_SITE_TOPIC = "AMBIGUOUS"
def __init__(self, name, topic_blacklist, regexes):
"""
:param is_publication: if True, publications, False for
subscriptions
:param topic_blacklist: list of topics to blacklist
:param orb_pub_sub_regexes: list of regexes to extract orb calls
(e.g. orb_subscribe). They need to have 2 captures, the second
one is the one capturing ORB_ID(<topic>
"""
self._name = name
self._topic_blacklist = topic_blacklist
self._regexes = set([ re.compile(regex) for regex in regexes])
def match(self, source_line: str) -> str:
""" Extract subscribed/published topics from a source string
:param src_str: string of C/C++ code with comments and whitespace removed
:return: if any topic was found, returned as a str. On error, raise on exception. On ambiguous line, return `AMBIGUOUS_SITE_TOPIC`. Otherwise, return `None`
"""
for regex in self._regexes:
# just the matches for this particular pattern:
match = regex.search(source_line)
if match is None:
continue
# # all regexes should contain 2 capture groups (or else this code block crashes)
route_group, topic_group = match.groups()
log.debug(" ####:{}: {}, {}".format( self._name, route_group, topic_group))
# # TODO: handle this case... but not sure where, yet
# if match == 'ORB_ID_VEHICLE_ATTITUDE_CONTROLS': # special case
# match = orb_id+orb_id_vehicle_attitude_controls_topic
# match has the form: '[ORB_ID(]<topic_name>'
if route_group:
if route_group == 'ORB_ID':
log.debug(" >>> Found ORB_ID topic: " + topic_group + " w/regex: " + str(regex.pattern))
return self._filter_topic(topic_group)
elif route_group == '[':
if not topic_group:
log.debug(" !! found an ambiguous site => return an empty set")
return PubSub.AMBIGUOUS_SITE_TOPIC
else:
raise SyntaxError('!!! Encountered regex case: `route_group` contains unrecognized value!: '+ route_group+' (::'+str(regex.pattern)+')\n'
+ " ("+ route_group+', '+topic_group +")\n"
+ " " + source_line)
elif route_group.empty() and topic_group.empty():
log.debug('!!! Found ambiguous site, without `ORB_ID` or topic (::'+str(regex.pattern))
return PubSub.AMBIGUOUS_SITE_TOPIC
else:
raise SyntaxError(" !!! unhandled case: unknown-variant: "+route_group+", " + topic_group + " ....from regex: " + str(regex.pattern))
return None
def _filter_topic(self, topic_name: str) -> str:
""" add topic to set, unless the topic is ignored """
if topic_name in self._topic_blacklist:
log.debug(" XX Ignoring blacklisted topic " + topic_name)
return None
else:
return topic_name
class Publications(PubSub):
""" Collects topic publication information for scopes """
def __init__(self, topic_blacklist, regexes):
super().__init__('PUB', topic_blacklist, regexes)
class Subscriptions(PubSub):
""" Collects topic subscription information for scopes """
def __init__(self, topic_blacklist, regexes):
super().__init__('SUB', topic_blacklist, regexes)
class Ambiguities(PubSub):
""" Collects topic information that cannot be classified """
def __init__(self, topic_blacklist, regexes):
super().__init__('AMB', topic_blacklist, regexes)
class Scope(object):
""" Defines a scope to add dependencies or topics to """
def __init__(self, typename, name):
self.publications = set()
self.subscriptions = set()
self.dependencies = set()
self.ambiguities = set()
self._name = name
self._typename = typename
def add_dependency(self, dependency_name: str):
if isinstance( dependency_name, str):
self.dependencies.add(dependency_name)
def is_empty(self):
return (0 == len(self.publications)) and (0==len(self.subscriptions))
@property
def name(self):
return self._name
def reduce_ambiguities(self) -> Set[str]:
self.ambiguities = self.ambiguities - self.subscriptions - self.publications
return self.dependencies
@property
def typename(self):
return self._typename
# define these so we can hash these classes in dicts and sets
def __hash__(self):
return self._name.__hash__()
def __eq__(self, other):
if isinstance(other, str):
return self._name == other
else:
return self._name == other._name
class LibraryScope(Scope):
def __init__(self, name):
super().__init__('Library',name)
class ModuleScope(Scope):
def __init__(self, name):
super().__init__('Module',name)
class Graph(object):
""" Collects Node and Edge information by parsing the source tree """
def __init__(self, **kwargs):
"""
:kwargs:
- scope_whitelist
- scope_blacklist
- topic_blacklist
"""
self._whitespace_pattern = re.compile(r'\s+')
self._scope_blacklist = set(kwargs.get('scope_blacklist',set()))
self._scope_whitelist = set(kwargs.get('scope_whitelist',set()))
self._path_blacklist = []
self._topic_blacklist = set(kwargs.get('topic_blacklist',set()))
self._orb_id_vehicle_attitude_controls_topic = 'actuator_controls_0'
self._orb_id_vehicle_attitude_controls_re = re.compile(r'\#define\s+ORB_ID_VEHICLE_ATTITUDE_CONTROLS\s+([^,)]+)')
self._warnings = [] # list of all ambiguous scan sites
self._current_scope = [] # stack with current module (they can be nested)
self._found_modules = {} # dict of all found modules
self._found_libraries = {} # dict of all found modules
self._print_nodes = set() # combination of libraries + modules
self._print_topics = set() # all topics
self._topic_colors = {} # key = topic, value = color (html string)
# note: the source-file-string is pre-processed to remove whitespace -- regexes should ignore whitespace
# note: the regexes should 2 capture groups '()' to correctly register with downstream code
capture_cases_sub = [r"orb_subscribe\w*\((ORB_ID)(?:\(|::)(\w+)",
r"orb_copy\((ORB_ID)(?:\(|::)(\w+)",
r"Subscription\w*(?:<[^>]+>|)\w*(?:\[[^]]+\]|)[\{\(](ORB_ID)(?:\(|::)(\w+)",
r"SubscriptionCallbackWorkItem\w+\{this,(ORB_ID)(?:\(|::)(\w+)",
]
self._subscriptions = Subscriptions( self._topic_blacklist, capture_cases_sub)
# note: the source-file-string is pre-processed to remove whitespace -- regexes should ignore whitespace
# note: the regexes should 2 capture groups '()' to correctly register with downstream code
capture_cases_pub = [r"orb_advertise(?:_multi|_queue|_multi_queue|)\((ORB_ID)(?:\(|::)(\w+)",
r"orb_publish(?:_auto|)\((ORB_ID)(?:\(|::)(\w+)",
r"Publication\w*<\w+>\w+(?:\[[^]]+\]|)[\(\{]*(ORB_ID)(?:\(|::)(\w+)",
]
self._publications = Publications( self._topic_blacklist, capture_cases_pub)
# note: the source-file-string is pre-processed to remove whitespace -- regexes should ignore whitespace
# note: the regexes should 2 capture groups '()' to correctly register with downstream code
capture_cases_ambiguous = [ r"Publication\w*(?:\<\w+\>|)\w+(\[)()",
r"Subscription\w*(?:\<\w+\>|)\w+(\[)()",
r"(ORB_ID)(?:\(|::)(\w+)",
]
self._ambiguities = Ambiguities( self._topic_blacklist, capture_cases_ambiguous)
def _get_current_scope(self):
if len(self._current_scope) == 0:
return None
return self._current_scope[-1]
def build(self, src_path_list, **kwargs):
""" parse the source tree & extract pub/sub information.
:param use_topic_pubsub_union: if true, use all topics that have a
publisher or subscriber. If false, use only topics with at least one
publisher and subscriber.
fill in self._module_subsciptions & self._module_publications
"""
self._path_blacklist = set([ os.path.normpath(p) for p in kwargs.get('path_blacklist',[]) ])
for path in src_path_list:
log.info("## Add src path: " + path )
self._build_recursive(path, **kwargs)
# Summarize the found counts: (all topics are defined in 'dependency' library)
log.info('### Summary: Total Scanned:')
log.info(' Library Count: '+str(len(self._found_libraries)))
log.info(' Module Count: '+str(len(self._found_modules)))
log.info(' Warning Count: '+str(len(self._warnings)))
if kwargs['merge_depends']:
graph.merge_depends()
# filter all scopes, topics into only the scopes + topics to output
self._generate_print_lists(use_topic_pubsub_union=kwargs['use_topic_pubsub_union'], merge_depends=kwargs['merge_depends'])
# Summarize the found counts:
log.info('### Summary (in-scope):')
log.info(' Scope Count: '+str(len(self._print_scopes)))
log.info(' Ambiguous Topics: '+str(len(self._print_ambiguities)))
log.info(' Linked Topics: '+str(len(self._print_topics)))
log.info(' Warnings: '+str(len(self._warnings)))
if 0 < len(self._warnings):
# print out the list of warning-sites:
log.info('## Warning Sites:')
for w in self._warnings:
scope_name = 'no-scope'
if None is not w[0]:
scope_name = w[0].name
# warnings tuple contains: (current_scope, file_name, line_number, line)
log.info(" -['{}']:{:<64s}:{} = {}".format(scope_name, w[1].lstrip('/.'), w[2], w[3] ))
# initialize colors
color_list = get_N_colors(len(self._print_topics), 0.7, 0.85)
self._topic_colors = {}
for i, topic in enumerate(self._print_topics):
self._topic_colors[topic] = color_list[i]
def _generate_print_lists(self, use_topic_pubsub_union, merge_depends):
""" generate the set of scopes (modules + libraries) and topics to print to output """
subscribed_topics = set()
published_topics = set()
ambiguous_topics = set()
# gather all found scopes:
all_scopes = { **self._found_libraries, **self._found_modules }
if 0 == len(self._scope_whitelist):
select_scopes = self._found_modules
else:
select_scopes = {}
for scope_name in self._scope_whitelist:
if scope_name in all_scopes:
select_scopes[scope_name] = all_scopes[scope_name]
if not isinstance(select_scopes, dict) or 0 == len(select_scopes):
log.error("!! No requested modules not found -- exiting.")
sys.exit(0)
log.debug('### Condensing found topics: scope -> total')
for name,scope in select_scopes.items():
log.debug(' # Scope: '+ name )
log.debug(' ## Subs: ' + str(len(scope.subscriptions)))
for topic in sorted(scope.subscriptions):
log.debug(' - ' + topic)
subscribed_topics.add(topic)
log.debug(' ## Pubs: ' + str(len(scope.publications)))
for topic in sorted(scope.publications):
log.debug(' - ' + topic )
published_topics.add(topic)
scope.reduce_ambiguities()
log.debug(' ## Ambiguities: ' + str(len(scope.ambiguities)))
for topic in sorted(scope.ambiguities):
log.debug(' - ' + topic )
ambiguous_topics.add(topic)
# filter modules iff they have at least a subscription or a publication
scopes_with_topic = {}
for name,scope in select_scopes.items():
if not scope.is_empty():
scopes_with_topic[name] = scope
self._print_ambiguities = ambiguous_topics
if use_topic_pubsub_union:
self._print_topics = subscribed_topics | published_topics
self._print_scopes = scopes_with_topic
else:
self._print_topics = subscribed_topics & published_topics
# cull scopes to only those that pub or sub to a topic that has both
intersect_scopes = {}
for name,scope in scopes_with_topic.items():
all_scope_topics = scope.publications | scope.subscriptions
for topic in all_scope_topics:
if topic in self._print_topics:
intersect_scopes[scope.name] = scope
break
self._print_scopes = intersect_scopes
def _build_recursive(self, path, **kwargs):
if os.path.normpath(path) in self._path_blacklist:
log.debug('ignoring excluded path '+path)
return
entries = os.listdir(path)
# check if entering a new scope
cmake_file = 'CMakeLists.txt'
new_scope = False
if cmake_file in entries:
new_scope = self._extract_build_information(os.path.join(path, cmake_file), **kwargs)
# iterate directories recursively
for entry in entries:
file_name = os.path.join(path, entry)
if os.path.isdir(file_name):
self._build_recursive(file_name, **kwargs)
# iterate source files
# Note: Skip all entries if we're not in a scope -- both finding known pubs/subs and emitting warnings
for entry in entries:
file_name = os.path.join(path, entry)
if os.path.isfile(file_name):
_, ext = os.path.splitext(file_name)
if ext in ['.cpp', '.c', '.h', '.hpp']:
self._process_source_file(file_name)
if new_scope:
self._current_scope.pop()
def _extract_build_information(self, file_name, **kwargs):
""" extract the module or library name from a CMakeLists.txt file and store
in self._current_scope if there is any
Also records dependencies, if any are specified.
"""
datafile = open(file_name)
found_module_def = False
found_module_depends = False
found_library_def = False
scope_added = False
for line in datafile:
if 'px4_add_module' in line: # must contain 'px4_add_module'
found_module_def = True
elif 'px4_add_library' in line: # must contain 'px4_add_module'
tokens = line.split('(')
if 1 < len(tokens):
found_library_def = True
library_name = tokens[1].split()[0].strip().rstrip(')')
library_scope = LibraryScope(library_name)
self._current_scope.append(library_scope)
scope_added = True
self._found_libraries[library_name] = library_scope
if self._in_scope():
log.debug(' >> found library: ' + library_name)
# we can return early because we have no further information to collect from libraries
return True
elif found_module_def and 'DEPENDS' in line.upper():
found_module_depends = True
elif found_module_depends:
# two tabs is a *sketchy* heuristic -- spacing isn't guaranteed by cmake;
# ... but the hard-tabs *is* specified by PX4 coding standards, so it's likely to be consistent
if line.startswith('\t\t') and not line.strip().startswith('#'):
depends = [dep.strip() for dep in line.split()]
for name in depends:
log.debug(' >> {:}: found module dep: {:}'
.format(self._current_scope[-1].name, name))
self._current_scope[-1].add_dependency(name)
if kwargs['merge_depends']:
if (0 < len(self._scope_whitelist)) and self._current_scope[-1].name in self._scope_whitelist:
# if we whitelist a module with dependencies, whitelist the dependencies, too
self._scope_whitelist.add(name)
elif line.strip() != "":
found_module_depends = False ## done with the 'DEPENDS' section.
words = line.split()
# get the definition of MAIN
if found_module_def and 'MAIN' in words and len(words) >= 2:
module_name = words[1]
module_scope = ModuleScope(module_name)
self._current_scope.append(module_scope)
scope_added = True
self._found_modules[module_name] = module_scope
if self._in_scope():
log.debug(' >> Found module name: ' + module_scope.name)
return scope_added
def _process_source_file(self, file_name):
""" extract information from a single source file """
current_scope = self._get_current_scope()
log.debug( " >> {:}extracting topics from file: {:}"
.format(current_scope.name+": " if current_scope is not None else "",
file_name))
with codecs.open(file_name, 'r', 'utf-8') as f:
try:
content = f.read()
except:
print('Failed reading file: %s, skipping content.' % file_name)
return
if current_scope:
if current_scope.name in self._scope_blacklist:
return
elif current_scope.name == 'uorb_tests': # skip this
return
elif current_scope.name == 'uorb':
# search and validate the ORB_ID_VEHICLE_ATTITUDE_CONTROLS define
matches = self._orb_id_vehicle_attitude_controls_re.findall(content)
for match in matches:
if match != 'ORB_ID('+self._orb_id_vehicle_attitude_controls_topic:
# if we land here, you need to change _orb_id_vehicle_attitude_controls_topic
raise Exception(
'The extracted define for ORB_ID_VEHICLE_ATTITUDE_CONTROLS '
'is '+match+' but expected ORB_ID('+
self._orb_id_vehicle_attitude_controls_topic)
return # skip uorb module for the rest
line_number = 0
for full_line in content.splitlines():
line_number += 1
short_line = re.sub(self._whitespace_pattern, '', full_line)
topic = self._publications.match(short_line)
if topic:
if current_scope:
current_scope.publications.add(topic)
continue
else:
raise AssertionError("Encountered Publication topic outside of any scope! " + file_name + " Aborting!")
topic = self._subscriptions.match(short_line)
if topic:
if current_scope:
current_scope.subscriptions.add(topic)
continue
else:
raise AssertionError("Encountered Subscription topic outside of any scope! " + file_name + " Aborting!")
topic = self._ambiguities.match(short_line)
if topic:
if current_scope:
if topic != PubSub.AMBIGUOUS_SITE_TOPIC:
current_scope.ambiguities.add(topic)
self._warnings.append((current_scope, file_name, line_number, full_line))
continue
else:
raise AssertionError("Encountered Ambiguous topic outside of any scope! " + file_name + " Aborting!")
def _in_scope(self, scope_name = None):
if 0 < len(self._current_scope):
if None is scope_name:
scope_name = self._current_scope[-1].name
if scope_name in self._scope_whitelist:
return True
return False
def merge_depends(self):
log.info('### Merge Depends:')
for modname,module in self._found_modules.items():
if modname in self._scope_whitelist or 0==len(self._scope_whitelist):
for depname in module.dependencies:
if depname in self._found_libraries:
dep = self._found_libraries[depname]
# copy topics from library to depending library
for topic in dep.publications:
module.publications.add(topic)
for topic in dep.subscriptions:
module.subscriptions.add(topic)
for topic in dep.ambiguities:
module.ambiguities
# omit all libraries -- they've already been merged into their respective dependees
self._scope_whitelist = set([ str(s) for s in self._scope_whitelist if s not in self._found_libraries])
@property
def output_scopes(self):
""" get the set of all modules """
return self._print_scopes
@property
def output_topics(self):
""" get set set of all topics """
return self._print_topics
@property
def topic_colors(self):
""" get a dict of all topic colors with key=topic, value=color """
return self._topic_colors
class OutputGraphviz(object):
""" write graph using Graphviz """
def __init__(self, graph):
self._graph = graph
def write(self, file_name, engine='fdp',
show_publications=True, show_subscriptions=True):
""" write the graph to a file
:param engine: graphviz engine
- fdp works for large graphs
- neato works better for smaller graphs
- circo works for single modules
CLI: fdp graph.fv -Tpdf -o test.pdf
"""
print('Writing to '+file_name)
ratio = 1 # aspect ratio
output_topics = self._graph.output_topics
output_scopes = self._graph.output_scopes
topic_colors = self._graph.topic_colors
graph_attr={'splines': 'true', 'ratio': str(ratio), 'overlap': 'false'}
graph_attr['sep'] = '"+15,15"' # increase spacing between nodes
graph = Digraph(comment='autogenerated graph with graphviz using uorb_graph.py',
engine=engine, graph_attr=graph_attr)
# scopes: modules
log.info(' > Writing scopes')
for name,_ in output_scopes.items():
graph.node('m_'+name, name, shape='box', fontcolor='#ffffff',
style='filled', color='#666666', fontsize='16')
log.info(' > Writing topics')
for topic in output_topics:
graph.node('t_'+topic, topic, shape='ellipse', fontcolor='#ffffff',
style='filled', color=topic_colors[topic])
# edges
log.info(' > Writing publish edges')
if show_publications:
for scope_name,scope in output_scopes.items():
for topic in scope.publications:
if topic in output_topics:
graph.edge('m_'+scope_name, 't_'+topic, color=topic_colors[topic], style='dashed')
log.info(' > Writing subscribe edges')
if show_subscriptions:
for scope_name,scope in output_scopes.items():
for topic in scope.subscriptions:
if topic in output_topics:
graph.edge('t_'+topic, 'm_'+scope_name, color=topic_colors[topic])
graph.render(file_name, view=False)
class OutputJSON(object):
""" write graph to a JSON file (that can be used with D3.js) """
def __init__(self, graph):
self._graph = graph
def write(self, file_name):
print('Writing to '+file_name)
output_topics = self._graph.output_topics
output_scopes = self._graph.output_scopes
topic_colors = self._graph.topic_colors
data = {}
nodes = []
# nodes
# (sort by length, such that short names are last. The rendering order
# will be the same, so that in case of an overlap, the shorter label
# will be on top)
for scope_tuple in sorted(output_scopes.items(), key=(lambda st: len(st[0])), reverse=True):
node = {}
node['id'] = 'm_'+scope_tuple[0]
node['name'] = scope_tuple[0]
node['type'] = scope_tuple[1].typename
node['color'] = '#666666'
# TODO: add url to open module documentation?
nodes.append(node)
for topic in sorted(output_topics, key=len, reverse=True):
node = {}
node['id'] = 't_'+topic
node['name'] = topic
node['type'] = 'topic'
node['color'] = topic_colors[topic]
# url is opened when double-clicking on the node
# TODO: does not work for multi-topics
node['url'] = 'https://github.com/PX4/PX4-Autopilot/blob/master/msg/'+topic+'.msg'
nodes.append(node)
data['nodes'] = nodes
edges = []
# edges
for name,scope in output_scopes.items():
for topic in scope.publications:
if topic in output_topics:
edge = {}
edge['source'] = 'm_'+name
edge['target'] = 't_'+topic
edge['color'] = topic_colors[topic]
edge['style'] = 'dashed'
edges.append(edge)
for name,scope in output_scopes.items():
for topic in scope.subscriptions:
if topic in output_topics:
edge = {}
edge['source'] = 't_'+topic
edge['target'] = 'm_'+name
edge['color'] = topic_colors[topic]
edge['style'] = 'normal'
edges.append(edge)
data['links'] = edges
with open(file_name, 'w') as outfile:
json.dump(data, outfile) # add indent=2 for readable formatting
if "__main__" == __name__:
args = parser.parse_args()
if 0 < args.verbosity:
if 1 == args.verbosity:
log.setLevel(logging.INFO)
print("set log level to INFO")
else: # implicity 1<
log.setLevel(logging.DEBUG)
print("set log level to DEBUG")
# ignore topics that are subscribed/published by many topics, but are not really
# useful to show in the graph
topic_blacklist = [ 'parameter_update', 'mavlink_log', 'log_message' ]
print('Excluded topics: '+str(topic_blacklist))
if len(args.modules) == 0:
scope_whitelist = []
else:
scope_whitelist = [ m.strip() for m in args.modules.split(',')]
scope_whitelist = set(scope_whitelist)
graph = Graph(scope_whitelist=scope_whitelist, topic_blacklist=topic_blacklist)
# if no source paths are supplied, guess that we're in the project root, and apply it to the entire 'src/' tree
if len(args.src_path) == 0:
args.src_path = ['src']
# transcribe only the source paths that actually exist:
source_paths = []
for path in args.src_path:
if os.path.exists(path):
source_paths.append(path)
else:
log.warn("Could not find path: " + path)
if 0 == len(source_paths):
print("!! None of the source directories were valid -- Exiting.")
sys.exit(-1)
# ignore certain paths
path_blacklist = ['src/lib/parameters/']
if 0 < len(args.exclude_path):
path_blacklist = args.exclude_path
if path_blacklist:
print('Excluded Path: '+str(path_blacklist))
graph.build(source_paths, path_blacklist=path_blacklist, use_topic_pubsub_union=args.use_topic_union, merge_depends=args.merge_depends)
if args.output == 'json':
output_json = OutputJSON(graph)
output_json.write(args.file+'.json')
elif args.output in ('graphviz','gv'):
try:
from graphviz import Digraph
except ImportError as e:
print("Failed to import graphviz: " + str(e))
print("")
print("You may need to install it with:")
print(" pip3 install --user graphviz")
print("")
sys.exit(1)
output_graphviz = OutputGraphviz(graph)
engine='fdp' # use neato or fdp
output_graphviz.write(args.file+'.fv', engine=engine)
output_graphviz.write(args.file+'_subs.fv', show_publications=False, engine=engine)
output_graphviz.write(args.file+'_pubs.fv', show_subscriptions=False, engine=engine)
elif args.output == 'none':
pass
else:
print('Error: unknown output format '+args.output)
|
qa/L0_stability_metrics/test_config_generator.py | triton-inference-server/model_analyzer | 115 | 11083455 | <filename>qa/L0_stability_metrics/test_config_generator.py
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import yaml
class TestConfigGenerator:
"""
This class contains functions that
create configs for various test scenarios.
The `setup` function does the work common to all tests
TO ADD A TEST: Simply add a member function whose name starts
with 'generate'.
"""
def __init__(self):
test_functions = [
self.__getattribute__(name)
for name in dir(self)
if name.startswith('generate')
]
for test_function in test_functions:
self.setup()
test_function()
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument('-m',
'--models',
type=str,
required=True,
help='The models used for this test')
self.args = parser.parse_args()
self.models = sorted(self.args.models.split(','))
self.config = {}
# Profile config
self.config['run_config_search_disable'] = True
self.config['concurrency'] = 16
self.config['batch-size'] = 8
self.config['profile_models'] = self.models
# Analyze config
self.config['summarize'] = False
self.config['collect_cpu_metrics'] = True
self.config['gpu_output_fields'] = [
'model_name', 'batch_size', 'concurrency', 'gpu_used_memory',
'gpu_utilization'
]
self.config['analysis_models'] = {}
for model in self.models:
self.config['analysis_models'][model] = {
'objectives': {
'perf_throughput': 10
}
}
def generate_configs(self):
with open('config.yaml', 'w+') as f:
yaml.dump(self.config, f)
if __name__ == '__main__':
TestConfigGenerator()
|
devil/devil/android/cpu_temperature.py | tingshao/catapult | 138 | 11083469 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides device interactions for CPU temperature monitoring."""
# pylint: disable=unused-argument
import logging
from devil.android import device_utils
from devil.android.perf import perf_control
from devil.utils import timeout_retry
logger = logging.getLogger(__name__)
# NB: when adding devices to this structure, be aware of the impact it may
# have on the chromium.perf waterfall, as it may increase testing time.
# Please contact a person responsible for the waterfall to see if the
# device you're adding is currently being tested.
_DEVICE_THERMAL_INFORMATION = {
# Pixel 3
'blueline': {
'cpu_temps': {
# See /sys/class/thermal/thermal_zone<number>/type for description
# Types:
# cpu0: cpu0-silver-step
# cpu1: cpu1-silver-step
# cpu2: cpu2-silver-step
# cpu3: cpu3-silver-step
# cpu4: cpu0-gold-step
# cpu5: cpu1-gold-step
# cpu6: cpu2-gold-step
# cpu7: cpu3-gold-step
'cpu0': '/sys/class/thermal/thermal_zone11/temp',
'cpu1': '/sys/class/thermal/thermal_zone12/temp',
'cpu2': '/sys/class/thermal/thermal_zone13/temp',
'cpu3': '/sys/class/thermal/thermal_zone14/temp',
'cpu4': '/sys/class/thermal/thermal_zone15/temp',
'cpu5': '/sys/class/thermal/thermal_zone16/temp',
'cpu6': '/sys/class/thermal/thermal_zone17/temp',
'cpu7': '/sys/class/thermal/thermal_zone18/temp'
},
# Different device sensors use different multipliers
# e.g. Pixel 3 35 degrees c is 35000
'temp_multiplier': 1000
},
# Pixel
'sailfish': {
'cpu_temps': {
# The following thermal zones tend to produce the most accurate
# readings
# Types:
# cpu0: tsens_tz_sensor0
# cpu1: tsens_tz_sensor1
# cpu2: tsens_tz_sensor2
# cpu3: tsens_tz_sensor3
'cpu0': '/sys/class/thermal/thermal_zone1/temp',
'cpu1': '/sys/class/thermal/thermal_zone2/temp',
'cpu2': '/sys/class/thermal/thermal_zone3/temp',
'cpu3': '/sys/class/thermal/thermal_zone4/temp'
},
'temp_multiplier': 10
}
}
class CpuTemperature(object):
def __init__(self, device):
"""CpuTemperature constructor.
Args:
device: A DeviceUtils instance.
Raises:
TypeError: If it is not passed a DeviceUtils instance.
"""
if not isinstance(device, device_utils.DeviceUtils):
raise TypeError('Must be initialized with DeviceUtils object.')
self._device = device
self._perf_control = perf_control.PerfControl(self._device)
self._device_info = None
def InitThermalDeviceInformation(self):
"""Init the current devices thermal information.
"""
self._device_info = _DEVICE_THERMAL_INFORMATION.get(
self._device.build_product)
def IsSupported(self):
"""Check if the current device is supported.
Returns:
True if the device is in _DEVICE_THERMAL_INFORMATION and the temp
files exist. False otherwise.
"""
# Init device info if it hasnt been manually initialised already
if self._device_info is None:
self.InitThermalDeviceInformation()
if self._device_info is not None:
return all(
self._device.FileExists(f)
for f in self._device_info['cpu_temps'].values())
return False
def LetCpuCoolToTemperature(self, target_temp, wait_period=30):
"""Lets device sit to give CPU time to cool down.
Implements a similar mechanism to
battery_utils.LetBatteryCoolToTemperature
Args:
temp: A float containing the maximum temperature to allow
in degrees c.
wait_period: An integer indicating time in seconds to wait
between checking.
"""
target_temp = int(target_temp * self._device_info['temp_multiplier'])
def cool_cpu():
# Get the temperatures
cpu_temp_paths = self._device_info['cpu_temps']
temps = []
for temp_path in cpu_temp_paths.values():
temp_return = self._device.ReadFile(temp_path)
# Output is an array of strings, only need the first line.
temps.append(int(temp_return))
if not temps:
logger.warning('Unable to read temperature files provided.')
return True
logger.info('Current CPU temperatures: %s', str(temps)[1:-1])
return all(t <= target_temp for t in temps)
logger.info('Waiting for the CPU to cool down to %s',
target_temp / self._device_info['temp_multiplier'])
# Set the governor to powersave to aid the cooling down of the CPU
self._perf_control.SetScalingGovernor('powersave')
# Retry 3 times, each time waiting 30 seconds.
# This negates most (if not all) of the noise in recorded results without
# taking too long
timeout_retry.WaitFor(cool_cpu, wait_period=wait_period, max_tries=3)
# Set the performance mode
self._perf_control.SetHighPerfMode()
def GetDeviceForTesting(self):
return self._device
def GetDeviceInfoForTesting(self):
return self._device_info
|
neutron/api/converters.py | congnt95/neutron | 1,080 | 11083515 | # Copyright (c) 2021 Ericsson Software Technology
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from neutron_lib.placement import utils as pl_utils
# TODO(przszc): Delete when https://review.opendev.org/813650 is released
def convert_to_sanitized_binding_profile_allocation(allocation, port_id,
min_bw_rules):
"""Return binding-profile.allocation in the new format
:param allocation: binding-profile.allocation attribute containting a
string with RP UUID
:param port_id: ID of the port that is being sanitized
:param min_bw_rules: A list of minimum bandwidth rules associated with the
port.
:return: A dict with allocation in {'<group_uuid>': '<rp_uuid>'} format.
"""
if isinstance(allocation, dict):
return allocation
group_id = str(
pl_utils.resource_request_group_uuid(uuid.UUID(port_id), min_bw_rules))
return {group_id: allocation}
|
python/153_Find_Minimum_in_Rotated_Sorted_Array.py | dvlpsh/leetcode-1 | 4,416 | 11083571 | class Solution(object):
# def findMin(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# return self.get_min(nums, 0, len(nums) - 1)
#
# def get_min(self, nums, start, end):
# mid = (start + end) / 2
# if start == end:
# # one element
# return nums[start]
# if mid == start or mid == end:
# # two element
# return min(nums[start], nums[end])
# if nums[mid] < nums[end]:
# # right side sorted
# if nums[mid] > nums[start]:
# # not rotated
# return nums[start]
# return self.get_min(nums, start, mid)
# elif nums[mid] > nums[end]:
# # left side sorted
# return self.get_min(nums, mid, end)
def findMin(self, nums):
# A[l] > A[r]
l, r = 0, len(nums) - 1
while l < r and nums[l] >= nums[r]:
mid = (l + r) / 2
if nums[mid] > nums[r]:
l = mid + 1
else:
r = mid
return nums[l]
|
corehq/ex-submodules/pillowtop/management/commands/merge_pillow_checkpoints.py | dimagilg/commcare-hq | 471 | 11083608 | import logging
import sys
from django.core.management.base import BaseCommand
from django.db.models import Count, Max, Min
from pillowtop import get_pillow_by_name
from pillowtop.models import KafkaCheckpoint
logger = logging.getLogger('__name__')
def confirm(msg):
return input(msg) == 'y'
class Command(BaseCommand):
"""Merge multiple pillows into one"""
def add_arguments(self, parser):
parser.add_argument(
'pillow_names', nargs='+',
help="Merge all pillow checkpoints into the last one named"
)
def handle(self, pillow_names, **options):
from_pillows = pillow_names[:-1]
to_pillow = pillow_names[-1]
logging.info(f"Attempting to merge {from_pillows} into {to_pillow}")
from_checkpoints = [
get_pillow_by_name(pillow).checkpoint.checkpoint_id
for pillow in from_pillows
]
to_checkpoint = get_pillow_by_name(to_pillow).checkpoint.checkpoint_id
existing_checkpoints = list(
KafkaCheckpoint.objects.filter(checkpoint_id=to_checkpoint)
.values('checkpoint_id').annotate(Max('last_modified'))
)
if existing_checkpoints:
print(f'{to_checkpoint} already exists:')
for checkpoint in existing_checkpoints:
print(f"{checkpoint['checkpoint_id']} last updated {checkpoint['last_modified__max']}")
if not confirm("Do you want to continue and overwrite existing checkpoints? [y/n]"):
sys.exit(1)
if not confirm("Are you sure you want to DELETE existing checkpoints? [y/n]"):
sys.exit(1)
KafkaCheckpoint.objects.filter(checkpoint_id=to_checkpoint).delete()
checkpoint_info = (
KafkaCheckpoint.objects
.filter(checkpoint_id__in=from_checkpoints)
.values('topic', 'partition')
.annotate(Min('offset'), Max('offset'), Count('checkpoint_id'))
)
number_checkpoints = checkpoint_info[0]['checkpoint_id__count']
number_nonstandard_checkpoints = sum(
1 for info in checkpoint_info
if info['checkpoint_id__count'] != number_checkpoints
)
if number_nonstandard_checkpoints > 0:
logger.error(
f'Not all checkpoints have the same topics and partitions specified. '
'Aborting pillow merging'
)
sys.exit(2)
minimum_difference = min(
info['offset__max'] - info['offset__min']
for info in checkpoint_info
)
if minimum_difference < 0:
logger.error("The minimum difference between checkpoints between pillows is less than zero")
sys.exit(4)
maximum_difference = max(
info['offset__max'] - info['offset__min']
for info in checkpoint_info
)
if maximum_difference > 0:
logger.warning(f"At least one checkpoint will need to reprocess {maximum_difference} changes")
if confirm("Is this amount of reprocessing acceptable y/N?"):
sys.exit(3)
else:
logger.info("All pillows have the same offsets")
for info in checkpoint_info:
KafkaCheckpoint.objects.update_or_create(
checkpoint_id=to_checkpoint,
topic=info['topic'],
partition=info['partition'],
defaults={
'offset': info['offset__min']
}
)
logger.info(f"{to_checkpoint} checkpoints created")
|
desktop/core/ext-py/docutils-0.14/test/test_parsers/test_rst/test_TableParser.py | kokosing/hue | 5,079 | 11083633 | #! /usr/bin/env python
# coding: utf-8
# $Id: test_TableParser.py 7668 2013-06-04 12:46:30Z milde $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.GridTableParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['grid_tables'] = [
["""\
+-------------------------------------+
| A table with one cell and one line. |
+-------------------------------------+
""",
[(0, 0, 2, 38, ['A table with one cell and one line.'])],
([37],
[],
[[(0, 0, 1, ['A table with one cell and one line.'])]])],
["""\
+--------------+--------------+
| A table with | two columns. |
+--------------+--------------+
""",
[(0, 0, 2, 15, ['A table with']),
(0, 15, 2, 30, ['two columns.'])],
([14, 14],
[],
[[(0, 0, 1, ['A table with']),
(0, 0, 1, ['two columns.'])]])],
# Combining chars in grid tables still fail
# [u"""\
# +--------------+------------------+
# | A tāble w̅ith | comb̲ining chars. |
# +--------------+------------------+
# """,
# [(0, 0, 2, 15, [u'A table with']),
# (0, 15, 2, 30, [u'combining chars.'])],
# ([14, 14],
# [],
# [[(0, 0, 1, [u'A table with']),
# (0, 0, 1, [u'combining chars.'])]])],
["""\
+--------------+-------------+
| A table with | two columns |
+--------------+-------------+
| and | two rows. |
+--------------+-------------+
""",
[(0, 0, 2, 15, ['A table with']),
(0, 15, 2, 29, ['two columns']),
(2, 0, 4, 15, ['and']),
(2, 15, 4, 29, ['two rows.'])],
([14, 13],
[],
[[(0, 0, 1, ['A table with']),
(0, 0, 1, ['two columns'])],
[(0, 0, 3, ['and']),
(0, 0, 3, ['two rows.'])]])],
["""\
+--------------------------+
| A table with three rows, |
+------------+-------------+
| and two | columns. |
+------------+-------------+
| First and last rows |
| contain column spans. |
+--------------------------+
""",
[(0, 0, 2, 27, ['A table with three rows,']),
(2, 0, 4, 13, ['and two']),
(2, 13, 4, 27, ['columns.']),
(4, 0, 7, 27, ['First and last rows', 'contain column spans.'])],
([12, 13],
[],
[[(0, 1, 1, ['A table with three rows,']),
None],
[(0, 0, 3, ['and two']),
(0, 0, 3, ['columns.'])],
[(0, 1, 5, ['First and last rows', 'contain column spans.']),
None]])],
["""\
+------------+-------------+---------------+
| A table | two rows in | and row spans |
| with three +-------------+ to left and |
| columns, | the middle, | right. |
+------------+-------------+---------------+
""",
[(0, 0, 4, 13, ['A table', 'with three', 'columns,']),
(0, 13, 2, 27, ['two rows in']),
(0, 27, 4, 43, ['and row spans', 'to left and', 'right.']),
(2, 13, 4, 27, ['the middle,'])],
([12, 13, 15],
[],
[[(1, 0, 1, ['A table', 'with three', 'columns,']),
(0, 0, 1, ['two rows in']),
(1, 0, 1, ['and row spans', 'to left and', 'right.'])],
[None,
(0, 0, 3, ['the middle,']),
None]])],
["""\
+------------+-------------+---------------+
| A table | | two rows in | and funny |
| with 3 +--+-------------+-+ stuff. |
| columns, | the middle, | | |
+------------+-------------+---------------+
""",
[(0, 0, 4, 13, ['A table |', 'with 3 +--', 'columns,']),
(0, 13, 2, 27, ['two rows in']),
(0, 27, 4, 43, [' and funny', '-+ stuff.', ' |']),
(2, 13, 4, 27, ['the middle,'])],
([12, 13, 15],
[],
[[(1, 0, 1, ['A table |', 'with 3 +--', 'columns,']),
(0, 0, 1, ['two rows in']),
(1, 0, 1, [' and funny', '-+ stuff.', ' |'])],
[None,
(0, 0, 3, ['the middle,']),
None]])],
["""\
+-----------+-------------------------+
| W/NW cell | N/NE cell |
| +-------------+-----------+
| | Middle cell | E/SE cell |
+-----------+-------------+ |
| S/SE cell | |
+-------------------------+-----------+
""",
[(0, 0, 4, 12, ['W/NW cell', '', '']),
(0, 12, 2, 38, ['N/NE cell']),
(2, 12, 4, 26, ['Middle cell']),
(2, 26, 6, 38, ['E/SE cell', '', '']),
(4, 0, 6, 26, ['S/SE cell'])],
([11, 13, 11],
[],
[[(1, 0, 1, ['W/NW cell', '', '']),
(0, 1, 1, ['N/NE cell']),
None],
[None,
(0, 0, 3, ['Middle cell']),
(1, 0, 3, ['E/SE cell', '', ''])],
[(0, 1, 5, ['S/SE cell']),
None,
None]])],
["""\
+--------------+-------------+
| A bad table. | |
+--------------+ |
| Cells must be rectangles. |
+----------------------------+
""",
'TableMarkupError: Malformed table; parse incomplete.',
'TableMarkupError: Malformed table; parse incomplete.'],
["""\
+-------------------------------+
| A table with two header rows, |
+------------+------------------+
| the first | with a span. |
+============+==================+
| Two body | rows, |
+------------+------------------+
| the second with a span. |
+-------------------------------+
""",
[(0, 0, 2, 32, ['A table with two header rows,']),
(2, 0, 4, 13, ['the first']),
(2, 13, 4, 32, ['with a span.']),
(4, 0, 6, 13, ['Two body']),
(4, 13, 6, 32, ['rows,']),
(6, 0, 8, 32, ['the second with a span.'])],
([12, 18],
[[(0, 1, 1, ['A table with two header rows,']),
None],
[(0, 0, 3, ['the first']),
(0, 0, 3, ['with a span.'])]],
[[(0, 0, 5, ['Two body']),
(0, 0, 5, ['rows,'])],
[(0, 1, 7, ['the second with a span.']),
None]])],
["""\
+-------------------------------+
| A table with two head/body |
+=============+=================+
| row | separators. |
+=============+=================+
| That's bad. | |
+-------------+-----------------+
""",
'TableMarkupError: Multiple head/body row separators '
'(table lines 3 and 5); only one allowed.',
'TableMarkupError: Multiple head/body row separators '
'(table lines 3 and 5); only one allowed.'],
["""\
+-------------------------------------+
| |
+-------------------------------------+
""",
[(0, 0, 2, 38, [''])],
([37],
[],
[[(0, 0, 1, [''])]])],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
music21/test/testRunner.py | cuthbertLab/music21 | 1,449 | 11083643 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: testRunner.py
# Purpose: Music21 testing suite
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2006-2016 <NAME> and the music21
# Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
'''
The testRunner module contains the all important "mainTest" function that runs tests
in a given module. Except for the one instance of "defaultImports", everything here
can run on any system, not just music21.
'''
import doctest
import inspect
import platform
import re
import sys
import unittest
defaultImports = ['music21']
# ALL_OUTPUT = []
# test related functions
def addDocAttrTestsToSuite(suite,
moduleVariableLists,
outerFilename=None,
globs=False,
optionflags=(
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
)):
'''
takes a suite, such as a doctest.DocTestSuite and the list of variables
in a module and adds from those classes that have a _DOC_ATTR dictionary
(which documents the properties in the class) any doctests to the suite.
>>> import doctest
>>> s1 = doctest.DocTestSuite(chord)
>>> s1TestsBefore = len(s1._tests)
>>> allLocals = [getattr(chord, x) for x in dir(chord)]
>>> test.testRunner.addDocAttrTestsToSuite(s1, allLocals)
>>> s1TestsAfter = len(s1._tests)
>>> s1TestsAfter - s1TestsBefore
2
>>> t = s1._tests[-1]
>>> t
isRest ()
'''
dtp = doctest.DocTestParser()
if globs is False:
globs = __import__(defaultImports[0]).__dict__.copy()
elif globs is None:
globs = {}
for lvk in moduleVariableLists:
if not (inspect.isclass(lvk)):
continue
docattr = getattr(lvk, '_DOC_ATTR', None)
if docattr is None:
continue
for dockey in docattr:
documentation = docattr[dockey]
# print(documentation)
dt = dtp.get_doctest(documentation, globs, dockey, outerFilename, 0)
if not dt.examples:
continue
dtc = doctest.DocTestCase(dt,
optionflags=optionflags,
)
# print(dtc)
suite.addTest(dtc)
def fixDoctests(doctestSuite):
r'''
Fix doctests so that addresses are sanitized.
In the past this fixed other differences among Python versions.
In the future, it might again!
'''
windows: bool = platform.system() == 'Windows'
for dtc in doctestSuite: # Suite to DocTestCase -- undocumented.
if not hasattr(dtc, '_dt_test'):
continue
dt = dtc._dt_test # DocTest
for example in dt.examples:
example.want = stripAddresses(example.want, '0x...')
if windows:
example.want = example.want.replace('PosixPath', 'WindowsPath')
ADDRESS = re.compile('0x[0-9A-Fa-f]+')
def stripAddresses(textString, replacement='ADDRESS') -> str:
'''
Function that changes all memory addresses (pointers) in the given
textString with (replacement). This is useful for testing
that a function gives an expected result even if the result
contains references to memory locations. So for instance:
>>> stripA = test.testRunner.stripAddresses
>>> stripA('{0.0} <music21.clef.TrebleClef object at 0x02A87AD0>')
'{0.0} <music21.clef.TrebleClef object at ADDRESS>'
while this is left alone:
>>> stripA('{0.0} <music21.humdrum.spineParser.MiscTandem *>I>')
'{0.0} <music21.humdrum.spineParser.MiscTandem *>I>'
For doctests, can strip to '...' to make it work fine with doctest.ELLIPSIS
>>> stripA('{0.0} <music21.base.Music21Object object at 0x102a0ff10>', '0x...')
'{0.0} <music21.base.Music21Object object at 0x...>'
'''
return ADDRESS.sub(replacement, textString)
# ------------------------------------------------------------------------------
def mainTest(*testClasses, **kwargs):
'''
Takes as its arguments modules (or a string 'noDocTest' or 'verbose')
and runs all of these modules through a unittest suite
Unless 'noDocTest' is passed as a module, a docTest
is also performed on `__main__`, hence the name "mainTest".
If 'moduleRelative' (a string) is passed as a module, then
global variables are preserved.
Run example (put at end of your modules):
::
import unittest
class Test(unittest.TestCase):
def testHello(self):
hello = 'Hello'
self.assertEqual('Hello', hello)
import music21
if __name__ == '__main__':
music21.mainTest(Test)
This module tries to fix up some differences between python2 and python3 so
that the same doctests can work. These differences can now be removed, but
I cannot remember what they are!
'''
runAllTests = True
# default -- is fail fast.
failFast = bool(kwargs.get('failFast', True))
if failFast:
optionflags = (
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.REPORT_ONLY_FIRST_FAILURE
)
else:
optionflags = (
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
)
globs = None
if ('noDocTest' in testClasses
or 'noDocTest' in sys.argv
or 'nodoctest' in sys.argv
or bool(kwargs.get('noDocTest', False))):
skipDoctest = True
else:
skipDoctest = False
# start with doc tests, then add unit tests
if skipDoctest:
# create a test suite for storage
s1 = unittest.TestSuite()
else:
# create test suite derived from doc tests
# here we use '__main__' instead of a module
if ('moduleRelative' in testClasses
or 'moduleRelative' in sys.argv
or bool(kwargs.get('moduleRelative', False))):
pass
else:
for di in defaultImports:
globs = __import__(di).__dict__.copy()
if ('importPlusRelative' in testClasses
or 'importPlusRelative' in sys.argv
or bool(kwargs.get('importPlusRelative', False))):
globs.update(inspect.stack()[1][0].f_globals)
try:
s1 = doctest.DocTestSuite(
'__main__',
globs=globs,
optionflags=optionflags,
)
except ValueError as ve: # no docstrings
print('Problem in docstrings [usually a missing r value before '
+ f'the quotes:] {ve}')
s1 = unittest.TestSuite()
verbosity = 1
if ('verbose' in testClasses
or 'verbose' in sys.argv
or bool(kwargs.get('verbose', False))):
verbosity = 2 # this seems to hide most display
displayNames = False
if ('list' in sys.argv
or 'display' in sys.argv
or bool(kwargs.get('display', False))
or bool(kwargs.get('list', False))):
displayNames = True
runAllTests = False
runThisTest = None
if len(sys.argv) == 2:
arg = sys.argv[1].lower()
if arg not in ('list', 'display', 'verbose', 'nodoctest'):
# run a test directly named in this module
runThisTest = sys.argv[1]
if bool(kwargs.get('runTest', False)):
runThisTest = kwargs.get('runTest', False)
# -f, --failfast
if ('onlyDocTest' in sys.argv
or 'onlyDocTest' in testClasses
or bool(kwargs.get('onlyDocTest', False))):
testClasses = [] # remove cases
for t in testClasses:
if not isinstance(t, str):
if displayNames is True:
for tName in unittest.defaultTestLoader.getTestCaseNames(t):
print(f'Unit Test Method: {tName}')
if runThisTest is not None:
tObj = t() # call class
# search all names for case-insensitive match
for name in dir(tObj):
if (name.lower() == runThisTest.lower()
or name.lower() == ('test' + runThisTest.lower())
or name.lower() == ('xtest' + runThisTest.lower())):
runThisTest = name
break
if hasattr(tObj, runThisTest):
print(f'Running Named Test Method: {runThisTest}')
tObj.setUp()
getattr(tObj, runThisTest)()
runAllTests = False
break
else:
print(f'Could not find named test method: {runThisTest}, running all tests')
# normally operation collects all tests
s2 = unittest.defaultTestLoader.loadTestsFromTestCase(t)
s1.addTests(s2)
# Add _DOC_ATTR tests...
if not skipDoctest:
stacks = inspect.stack()
if len(stacks) > 1:
outerFrameTuple = stacks[1]
else:
outerFrameTuple = stacks[0]
outerFrame = outerFrameTuple[0]
outerFilename = outerFrameTuple[1]
localVariables = list(outerFrame.f_locals.values())
addDocAttrTestsToSuite(s1, localVariables, outerFilename, globs, optionflags)
if runAllTests is True:
fixDoctests(s1)
runner = unittest.TextTestRunner()
runner.verbosity = verbosity
unused_testResult = runner.run(s1)
if __name__ == '__main__':
mainTest()
# from pprint import pprint
# pprint(ALL_OUTPUT)
|
homeassistant/components/launch_library/sensor.py | learn-home-automation/core | 22,481 | 11083646 | <filename>homeassistant/components/launch_library/sensor.py<gh_stars>1000+
"""A sensor platform that give you information about the next space launch."""
from __future__ import annotations
from datetime import timedelta
import logging
from pylaunches import PyLaunches, PyLaunchesException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_AGENCY,
ATTR_AGENCY_COUNTRY_CODE,
ATTR_LAUNCH_TIME,
ATTR_STREAM,
ATTRIBUTION,
DEFAULT_NAME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the launch sensor."""
name = config[CONF_NAME]
session = async_get_clientsession(hass)
launches = PyLaunches(session)
async_add_entities([LaunchLibrarySensor(launches, name)], True)
class LaunchLibrarySensor(SensorEntity):
"""Representation of a launch_library Sensor."""
_attr_icon = "mdi:rocket"
def __init__(self, api: PyLaunches, name: str) -> None:
"""Initialize the sensor."""
self.api = api
self._attr_name = name
async def async_update(self) -> None:
"""Get the latest data."""
try:
launches = await self.api.upcoming_launches()
except PyLaunchesException as exception:
_LOGGER.error("Error getting data, %s", exception)
self._attr_available = False
else:
if next_launch := next((launch for launch in launches), None):
self._attr_available = True
self._attr_native_value = next_launch.name
self._attr_extra_state_attributes = {
ATTR_LAUNCH_TIME: next_launch.net,
ATTR_AGENCY: next_launch.launch_service_provider.name,
ATTR_AGENCY_COUNTRY_CODE: next_launch.pad.location.country_code,
ATTR_STREAM: next_launch.webcast_live,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
|
demo/io_util.py | hoverinc/OANet | 209 | 11083688 | import numpy as np
def write_keypoints(path, keypoints):
# path: path to save
# keypoints: single-precision real matrix, N*4, x,y,scale, orientation
assert keypoints.shape[1]==4
write_matrix(path, keypoints, np.float32)
def write_descriptors(path, descriptors):
write_matrix(path, descriptors, np.float32)
def write_matches(path, matches):
if len(matches) > 0:
write_matrix(path, matches, np.uint32)
def read_keypoints(path):
return read_matrix(path, np.float32)
def read_descriptors(path):
return read_matrix(path, np.float32)
def read_matches(path):
return read_matrix(path, np.uint32)
def read_matrix(path, dtype):
with open(path, "rb") as fid:
shape = np.fromfile(fid, count=2, dtype=np.int32)
matrix = np.fromfile(fid, count=shape[0] * shape[1], dtype=dtype)
return matrix.reshape(shape)
def write_matrix(path, data, dtype):
with open(path, 'wb') as f:
np.asarray(data.shape, dtype='int32').tofile(f)
data.astype(dtype).tofile(f)
|
tools/module_generator/c_source_templates.py | acrop/iotjs | 2,081 | 11083706 | <filename>tools/module_generator/c_source_templates.py
#!/usr/bin/env python
# Copyright 2019-present Samsung Electronics Co., Ltd. and other contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Templates for create/set a C variable
# one length String to char
JS_TO_CHAR = '''
// create a character value from a jerry_value_t
{TYPE} {NAME};
jerry_string_to_char_buffer ({JVAL}, (jerry_char_t*)(&{NAME}), 1);
'''
# Set a char variable
JS_SET_CHAR = '''
// set the value of {NAME}
jerry_string_to_char_buffer ({JVAL}, (jerry_char_t*)(&{NAME}), 1);
'''
# Number to int/float/enum
JS_TO_NUMBER = '''
// create an integer / floating point number from a jerry_value_t
{TYPE} {NAME} = ({TYPE})jerry_get_number_value ({JVAL});
'''
# Set an int/float/enum variable
JS_SET_NUMBER = '''
// set the value of {NAME}
{NAME} = jerry_get_number_value ({JVAL});
'''
# Boolean to _Bool
JS_TO_BOOL = '''
// create a _Bool value from a jerry_value_t
{TYPE} {NAME} = jerry_value_to_boolean ({JVAL});
'''
# Set a _Bool variable
JS_SET_BOOL = '''
// set the value of {NAME}
{NAME} = jerry_value_to_boolean ({JVAL});
'''
# String to char[]
JS_TO_STRING = '''
// create an array of characters from a jerry_value_t
{TYPE} * {NAME} = NULL;
if (jerry_value_is_string ({JVAL}))
{{
jerry_size_t {NAME}_size = jerry_get_string_size ({JVAL});
{NAME} = malloc ({NAME}_size + 1);
if({NAME} == NULL)
{{
return jerry_create_error (JERRY_ERROR_COMMON, (const jerry_char_t*)"Fail to allocate memory.");
}}
jerry_string_to_char_buffer ({JVAL}, (jerry_char_t*){NAME}, {NAME}_size);
{NAME}[{NAME}_size] = '\\0';
}}
'''
JS_FREE_STRING = '''
// TODO: if you won't use {NAME} pointer, uncomment the lines below
//if (jerry_value_is_string ({JVAL}))
// free ({NAME});
'''
# Set a char* variable
JS_SET_CHAR_PTR = '''
// set the value of {NAME}
jerry_size_t size = jerry_get_string_size ({JVAL});
if ({NAME} == NULL)
{{
{NAME} = ({TYPE}*) malloc (size + 1);
}}
jerry_string_to_char_buffer ({JVAL}, (jerry_char_t*){NAME}, size);
{NAME}[size] = '\\0';
'''
# Set a char[] variable
JS_SET_CHAR_ARR = '''
// set the value of {NAME}
jerry_string_to_char_buffer ({JVAL}, (jerry_char_t*){NAME}, {SIZE});
{NAME}[{SIZE}] = '\\0';
'''
# TypedArray to number pointer
JS_TO_TYPEDARRAY = '''
// create a pointer to number from a jerry_value_t
{TYPE} * {NAME} = NULL;
jerry_length_t {NAME}_byteLength = 0;
jerry_length_t {NAME}_byteOffset = 0;
jerry_value_t {NAME}_buffer;
if (jerry_value_is_typedarray ({JVAL}))
{{
{NAME}_buffer = jerry_get_typedarray_buffer ({JVAL}, &{NAME}_byteOffset, &{NAME}_byteLength);
{NAME} = ({TYPE}*) malloc ({NAME}_byteLength);
if({NAME} == NULL)
{{
jerry_release_value ({NAME}_buffer);
return jerry_create_error (JERRY_ERROR_COMMON, (const jerry_char_t*)"Fail to allocate memory.");
}}
jerry_arraybuffer_read ({NAME}_buffer, {NAME}_byteOffset, (uint8_t*){NAME}, {NAME}_byteLength);
}}
'''
JS_FREE_BUFFER = '''
jerry_release_value ({NAME}_buffer);
// TODO: if you won't use {NAME} pointer, uncomment the line below
//free ({NAME});
'''
JS_FREE_WRITE_BUFFER = '''
// write the values back into an arraybuffer from a pointer
if (jerry_value_is_typedarray ({JVAL}))
{{
jerry_arraybuffer_write ({NAME}_buffer, {NAME}_byteOffset, (uint8_t*){NAME}, {NAME}_byteLength);
jerry_release_value ({NAME}_buffer);
// TODO: if you won't use {NAME} pointer, uncomment the line below
//free ({NAME});
}}
'''
# Set a number pointer
JS_SET_TYPEDARRAY = '''
// set the value of {NAME}
jerry_length_t byteLength = 0;
jerry_length_t byteOffset = 0;
jerry_value_t buffer;
if (jerry_value_is_typedarray ({JVAL}))
{{
buffer = jerry_get_typedarray_buffer ({JVAL}, &byteOffset, &byteLength);
if ({NAME} == NULL)
{{
{NAME} = ({TYPE}*) malloc (byteLength);
}}
jerry_arraybuffer_read (buffer, byteOffset, (uint8_t*){NAME}, byteLength);
jerry_release_value (buffer);
}}
else
{{
{NAME} = NULL;
}}
'''
# Object to struct/union
JS_TO_RECORD = '''
// create a record from a jerry_value_t
void* {NAME}_void_ptr;
const jerry_object_native_info_t* {NAME}_type_ptr;
bool {NAME}_has_ptr = jerry_get_object_native_pointer({JVAL}, &{NAME}_void_ptr, &{NAME}_type_ptr);
if (!{NAME}_has_ptr ||
({NAME}_type_ptr != &{RECORD}_type_info && {NAME}_type_ptr != &{RECORD}_type_info_static)) {{
char const *msg = "Failed to get native {TYPE} pointer";
return jerry_create_error(JERRY_ERROR_TYPE, (const jerry_char_t *)msg);
}}
{TYPE} {NAME} = *(({TYPE}*){NAME}_void_ptr);
'''
# Set a struct/union
JS_SET_RECORD = '''
// set the value of {NAME}
void* {RECORD}_void_ptr;
const jerry_object_native_info_t* {RECORD}_type_ptr;
bool {RECORD}_has_ptr = jerry_get_object_native_pointer({JVAL}, &{RECORD}_void_ptr, &{RECORD}_type_ptr);
if (!{RECORD}_has_ptr ||
({RECORD}_type_ptr != &{RECORD}_type_info && {RECORD}_type_ptr != &{RECORD}_type_info_static)) {{
char const *msg = "Failed to get native {RECORD} pointer";
return jerry_create_error(JERRY_ERROR_TYPE, (const jerry_char_t *)msg);
}}
{NAME} = *(({TYPE}*){RECORD}_void_ptr);
'''
# Set a const struct/union
JS_SET_CONST_RECORD = '''
// set the value of {NAME}
void* {RECORD}_void_ptr;
const jerry_object_native_info_t* {RECORD}_type_ptr;
bool {RECORD}_has_ptr = jerry_get_object_native_pointer({JVAL}, &{RECORD}_void_ptr, &{RECORD}_type_ptr);
if (!{RECORD}_has_ptr ||
({RECORD}_type_ptr != &{RECORD}_type_info && {RECORD}_type_ptr != &{RECORD}_type_info_static)) {{
char const *msg = "Failed to get native {RECORD} pointer";
return jerry_create_error(JERRY_ERROR_TYPE, (const jerry_char_t *)msg);
}}
memcpy(&{NAME}, {RECORD}_void_ptr, sizeof({TYPE}));
'''
# Object to struct/union pointer
JS_TO_RECORD_PTR = '''
// create a record pointer from a jerry_value_t
void* {NAME}_void_ptr;
const jerry_object_native_info_t* {NAME}_type_ptr;
bool {NAME}_has_ptr = jerry_get_object_native_pointer({JVAL}, &{NAME}_void_ptr, &{NAME}_type_ptr);
if (!{NAME}_has_ptr ||
({NAME}_type_ptr != &{RECORD}_type_info && {NAME}_type_ptr != &{RECORD}_type_info_static)) {{
char const *msg = "Failed to get native {TYPE} pointer";
return jerry_create_error(JERRY_ERROR_TYPE, (const jerry_char_t *)msg);
}}
{TYPE} * {NAME} = ({TYPE}*){NAME}_void_ptr;
'''
# Function to C function
JS_TO_FUNCTION = '''
// create a function pointer from a jerry_value_t
{TYPE} (*{NAME})({PARAMS}) = NULL;
if (jerry_value_is_function({JVAL}))
{{
{FUNC}_{NAME}_js = {JVAL};
{NAME} = {FUNC}_{NAME};
}}
'''
JS_CB_FUNCTION = '''
// native callback function
jerry_value_t {FUNC}_{NAME}_js;
{RET_TYPE} {FUNC}_{NAME} ({PARAMS})
{{
jerry_value_t args[{LENGTH}];
{CREATE_VAL}
jerry_value_t this_val = jerry_create_undefined();
jerry_value_t result = jerry_call_function({FUNC}_{NAME}_js, this_val, args, {LENGTH});
{RESULT}
jerry_release_value(result);
jerry_release_value(this_val);
for (int i = 0; i < {LENGTH}; i++)
{{
jerry_release_value(args[i]);
}}
return {RET};
}}
'''
# Unsupported C type
JS_TO_UNSUPPORTED = '''
// TODO: Define the right value of the variable.
{TYPE} {NAME};
'''
# Templates for create a jerry_value_t variable
# Create Undefined/Bool/Number/Object
JS_CREATE_VAL = '''
jerry_value_t {NAME} = jerry_create_{TYPE} ({FROM});
'''
# Create one length String
JS_CREATE_CHAR = '''
jerry_value_t {NAME} = jerry_create_string_sz ((jerry_char_t*)(&{FROM}), 1);
'''
# Create String
JS_CREATE_STRING = '''
jerry_value_t {NAME};
if ({FROM} != NULL)
{{
{NAME} = jerry_create_string ((jerry_char_t*){FROM});
}}
else
{{
{NAME} = jerry_create_null ();
}}
'''
# Create TypedArray or Null
JS_CREATE_TYPEDARRAY = '''
// create a typedarray or null from a pointer
jerry_value_t {NAME};
if ({FROM} != NULL)
{{
jerry_length_t {NAME}_byteLength = sizeof({TYPE});
jerry_value_t {NAME}_buffer = jerry_create_arraybuffer ({NAME}_byteLength);
jerry_arraybuffer_write ({NAME}_buffer, 0, (uint8_t*){FROM}, {NAME}_byteLength);
{NAME} = jerry_create_typedarray_for_arraybuffer_sz (JERRY_TYPEDARRAY_{ARRAY_TYPE}, {NAME}_buffer, 0, 1);
jerry_release_value ({NAME}_buffer);
}}
else
{{
{NAME} = jerry_create_null ();
}}
'''
TYPEDARRAYS = {
'signed char': 'INT8',
'unsigned char': 'UINT8',
'short': 'INT16',
'unsigned short': 'UINT16',
'int': 'INT32',
'unsigned int': 'UINT32',
'long': 'INT32',
'unsigned long': 'UINT32',
'long long': 'INT32',
'unsigned long long': 'UINT32',
'float': 'FLOAT32',
'double': 'FLOAT64',
'long double': 'FLOAT64'
}
# Create Object
JS_CREATE_OBJECT = '''
// create object from record
{TYPE}* {RECORD}_native_ptr = ({TYPE}*)calloc(1, sizeof({TYPE}));
*{RECORD}_native_ptr = {FROM};
jerry_value_t {NAME} = {RECORD}_js_creator({RECORD}_native_ptr);
jerry_set_object_native_pointer({NAME}, {RECORD}_native_ptr, &{RECORD}_type_info);
'''
# Create Object
JS_CREATE_CONST_OBJECT = '''
// create object from record
{TYPE}* {RECORD}_native_ptr = ({TYPE}*)calloc(1, sizeof({TYPE}));
memcpy({RECORD}_native_ptr, &{FROM}, sizeof({TYPE}));
jerry_value_t {NAME} = {RECORD}_js_creator({RECORD}_native_ptr);
jerry_set_object_native_pointer({NAME}, {RECORD}_native_ptr, &{RECORD}_type_info);
'''
# Unsupported C type
JS_CREATE_UNSUPPORTED = '''
// TODO: Create a valid jerry_value_t from '{FROM}'.
jerry_value_t {NAME} = jerry_create_undefined ();
'''
# Templates for record types
# Record destructor
JS_RECORD_DESTRUCTOR = '''
void {RECORD}_js_destructor(void* ptr) {{
free(({TYPE}*)ptr);
}}
static const jerry_object_native_info_t {RECORD}_type_info = {{
.free_cb = {RECORD}_js_destructor
}};
static const jerry_object_native_info_t {RECORD}_type_info_static = {{
.free_cb = NULL
}};
'''
# Member getter/setter template
JS_RECORD_MEMBER = '''
// external function for getter/setter of record member
jerry_value_t {RECORD}_{NAME} (const jerry_value_t function_obj,
const jerry_value_t this_val,
const jerry_value_t args_p[],
const jerry_length_t args_cnt)
{{
void* void_ptr;
const jerry_object_native_info_t* type_ptr;
bool has_ptr = jerry_get_object_native_pointer(this_val, &void_ptr, &type_ptr);
if (!has_ptr ||
(type_ptr != &{RECORD}_type_info && type_ptr != &{RECORD}_type_info_static)) {{
char const *msg = "Failed to get native {RECORD} pointer";
return jerry_create_error(JERRY_ERROR_TYPE, (const jerry_char_t *)msg);
}}
{TYPE}* native_ptr = ({TYPE}*)(void_ptr);
{BODY}
return ret_val;
}}
'''
JS_RECORD_GETTER = '''
// external function for record getter
jerry_value_t {RECORD}{NAME}_getter (const jerry_value_t function_obj,
const jerry_value_t this_val,
const jerry_value_t args_p[],
const jerry_length_t args_cnt)
{{
jerry_value_t {NAME}_name = jerry_create_string((const jerry_char_t *) "_{NAME}");
jerry_value_t {NAME}_value = jerry_get_property(this_val, {NAME}_name);
jerry_release_value({NAME}_name);
return {NAME}_value;
}}
'''
# Record constructor
JS_RECORD_CONSTRUCTOR = '''
// external function for record constructor
jerry_value_t {RECORD}_js_constructor (const jerry_value_t function_obj,
const jerry_value_t this_val,
const jerry_value_t args_p[],
const jerry_length_t args_cnt)
{{
(void) {RECORD}_type_info_static;
{TYPE}* native_ptr = ({TYPE}*)calloc(1, sizeof({TYPE}));
if (args_cnt == 0)
{{
jerry_value_t ret_val = {RECORD}_js_creator(native_ptr);
jerry_set_object_native_pointer(ret_val, native_ptr, &{RECORD}_type_info);
return ret_val;
}}
if (args_cnt != 1 || !jerry_value_is_object (args_p[0]))
{{
char const *msg = "Wrong argument for {RECORD}(), expected an object.";
return jerry_create_error (JERRY_ERROR_TYPE, (const jerry_char_t*)msg);
}}
{BODY}
jerry_value_t ret_val = {RECORD}_js_creator(native_ptr);
jerry_set_object_native_pointer(ret_val, native_ptr, &{RECORD}_type_info);
return ret_val;
}}
'''
JS_RECORD_RETURN = '''
jerry_value_t ret_val = {RECORD}_js_creator(native_ptr);
jerry_set_object_native_pointer(ret_val, native_ptr, &{RECORD}_type_info);
return ret_val;
'''
JS_GET_PROP_STRUCT = '''
{TYPE} {NAME}{INIT};
jerry_value_t {NAME}_name = jerry_create_string((const jerry_char_t *) "{NAME}");
jerry_value_t {NAME}_value = jerry_get_property(args_p[0], {NAME}_name);
jerry_release_value({NAME}_name);
if (!jerry_value_is_undefined({NAME}_value))
{{
{GET_VAL}
}}
jerry_release_value({NAME}_value);
'''
JS_GET_PROP_UNION = '''
jerry_value_t {NAME}_name = jerry_create_string((const jerry_char_t *) "{NAME}");
jerry_value_t {NAME}_value = jerry_get_property(args_p[0], {NAME}_name);
jerry_release_value({NAME}_name);
if (!jerry_value_is_undefined({NAME}_value))
{{
{TYPE} {NAME}{INIT};
{GET_VAL}
jerry_release_value({NAME}_value);
{RET}
}}
jerry_release_value({NAME}_value);
'''
JS_INIT_MEMBERS = '''
*native_ptr = ({TYPE}){{{MEMBERS}}};
'''
JS_INIT_MEMBERS_CONST = '''
{TYPE} native = {{{MEMBERS}}};
memcpy(native_ptr, &native, sizeof({TYPE}));
'''
JS_RECORD_CREATOR = '''
jerry_value_t {RECORD}_js_creator ({TYPE}* native_ptr)
{{
jerry_value_t js_obj = jerry_create_object();
{REGIST}
return js_obj;
}}
'''
JS_REGIST_MEMBER = '''
// set record's member as a property to the object
jerry_property_descriptor_t {RECORD}_{NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{RECORD}_{NAME}_prop_desc);
{RECORD}_{NAME}_prop_desc.is_get_defined = true;
{RECORD}_{NAME}_prop_desc.is_set_defined = true;
{RECORD}_{NAME}_prop_desc.getter = jerry_create_external_function ({RECORD}_{NAME}_getter);
{RECORD}_{NAME}_prop_desc.setter = jerry_create_external_function ({RECORD}_{NAME}_setter);
jerry_value_t {RECORD}_{NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{NAME}");
jerry_value_t {RECORD}_{NAME}_return_value = jerry_define_own_property (js_obj, {RECORD}_{NAME}_prop_name, &{RECORD}_{NAME}_prop_desc);
jerry_release_value ({RECORD}_{NAME}_return_value);
jerry_release_value ({RECORD}_{NAME}_prop_name);
jerry_free_property_descriptor_fields (&{RECORD}_{NAME}_prop_desc);
'''
JS_REGIST_RECORD = '''
// set record as a property to the object
jerry_value_t {NAME}_js = {RECORD}_js_creator (&{REF});
jerry_set_object_native_pointer({NAME}_js, &{REF}, &{RECORD}_type_info_static);
jerry_property_descriptor_t {NAME}_js_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_js_prop_desc);
{NAME}_js_prop_desc.is_value_defined = true;
{NAME}_js_prop_desc.value = {NAME}_js;
jerry_value_t {NAME}_js_prop_name = jerry_create_string ((const jerry_char_t *)"_{NAME}");
jerry_value_t {NAME}_js_return_value = jerry_define_own_property ({OBJECT}, {NAME}_js_prop_name, &{NAME}_js_prop_desc);
jerry_release_value ({NAME}_js_return_value);
jerry_release_value ({NAME}_js_prop_name);
jerry_free_property_descriptor_fields (&{NAME}_js_prop_desc);
'''
JS_REGIST_CONST_MEMBER = '''
// set a constant member as a property to the object
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_value_defined = true;
{NAME}_prop_desc.value = {NAME}_js;
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{NAME}");
jerry_value_t {NAME}_return_value = jerry_define_own_property (js_obj, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
JS_REGIST_CONST_RECORD = '''
// set a constant record as a property to the object
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_get_defined = true;
{NAME}_prop_desc.getter = jerry_create_external_function ({NAME}_getter);
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{VALUE}");
jerry_value_t {NAME}_return_value = jerry_define_own_property ({OBJECT}, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
JS_REGIST_ARR_MEMBER = '''
// set a numeric array member as a property to the object
jerry_value_t {NAME}_buffer = jerry_create_arraybuffer_external (sizeof({TYPE}) * {SIZE}, (uint8_t*)native_ptr->{NAME}, NULL);
jerry_value_t {NAME}_typedarray = jerry_create_typedarray_for_arraybuffer_sz (JERRY_TYPEDARRAY_{ARRAY_TYPE}, {NAME}_buffer, 0, {SIZE});
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_value_defined = true;
{NAME}_prop_desc.value = {NAME}_typedarray;
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{NAME}");
jerry_value_t {NAME}_return_value = jerry_define_own_property (js_obj, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_release_value ({NAME}_buffer);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
# Template for a jerry_external_handler_t type function
JS_EXT_FUNC = '''
// external function for API functions or for getters / setters
jerry_value_t {NAME} (const jerry_value_t function_obj,
const jerry_value_t this_val,
const jerry_value_t args_p[],
const jerry_length_t args_cnt)
{{
{BODY}
return ret_val;
}}
'''
# Template for check the count of the external function's arguments
JS_CHECK_ARG_COUNT = '''
// check the count of the external function's arguments
if (args_cnt != {COUNT})
{{
char const *msg = "Wrong argument count for {FUNC}(), expected {COUNT}.";
return jerry_create_error (JERRY_ERROR_TYPE, (const jerry_char_t*)msg);
}}
'''
# Templates for check the type of a jerry_value_t variable
JS_CHECK_TYPE = '''
// check the type of a jerry_value_t variable
if (!jerry_value_is_{TYPE} ({JVAL}))
{{
char const *msg = "Wrong argument type for {FUNC}(), expected {TYPE}.";
return jerry_create_error (JERRY_ERROR_TYPE, (const jerry_char_t*)msg);
}}
'''
JS_CHECK_POINTER = '''
// check the type of a jerry_value_t variable
if (!jerry_value_is_{TYPE} ({JVAL}) && !jerry_value_is_null ({JVAL}))
{{
char const *msg = "Wrong argument type for {FUNC}(), expected {TYPE} or null.";
return jerry_create_error (JERRY_ERROR_TYPE, (const jerry_char_t*)msg);
}}
'''
# Templates for the module initialization function
INIT_FUNC = '''
// init function for the module
jerry_value_t Init_{NAME}()
{{
{BODY}
return object;
}}
'''
INIT_REGIST_FUNC = '''
// set an external function as a property to the module object
jerry_value_t {NAME}_name = jerry_create_string ((const jerry_char_t*)"{FUNC}");
jerry_value_t {NAME}_func = jerry_create_external_function ({NAME}_handler);
jerry_value_t {NAME}_ret = jerry_set_property ({OBJECT}, {NAME}_name, {NAME}_func);
jerry_release_value ({NAME}_name);
jerry_release_value ({NAME}_func);
jerry_release_value ({NAME}_ret);
'''
INIT_REGIST_RECORD = '''
// set a constructor as a property to the module object
jerry_value_t {NAME}_name = jerry_create_string ((const jerry_char_t*)"{RECORD}");
jerry_value_t {NAME}_func = jerry_create_external_function ({NAME}_js_constructor);
jerry_value_t {NAME}_ret = jerry_set_property ({OBJECT}, {NAME}_name, {NAME}_func);
jerry_release_value ({NAME}_name);
jerry_release_value ({NAME}_func);
jerry_release_value ({NAME}_ret);
'''
INIT_REGIST_ENUM = '''
// set an enum constant as a property to the module object
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_value_defined = true;
{NAME}_prop_desc.value = jerry_create_number ({REF});
jerry_value_t {NAME}_name = jerry_create_string ((const jerry_char_t *)"{ENUM}");
jerry_value_t {NAME}_ret = jerry_define_own_property ({OBJECT}, {NAME}_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_ret);
jerry_release_value ({NAME}_name);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
INIT_REGIST_VALUE = '''
// set a global variable as a property to the module object
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_get_defined = true;
{NAME}_prop_desc.is_set_defined = true;
{NAME}_prop_desc.getter = jerry_create_external_function ({NAME}_getter);
{NAME}_prop_desc.setter = jerry_create_external_function ({NAME}_setter);
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{VALUE}");
jerry_value_t {NAME}_return_value = jerry_define_own_property ({OBJECT}, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
INIT_REGIST_CONST = '''
// set a global constant or a macro as a property to the module object
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_value_defined = true;
{NAME}_prop_desc.value = {NAME}_js;
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{VALUE}");
jerry_value_t {NAME}_return_value = jerry_define_own_property ({OBJECT}, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
INIT_REGIST_NUM_ARR = '''
// set a global numeric array as a property to the module object
jerry_value_t {NAME}_buffer = jerry_create_arraybuffer_external (sizeof({TYPE}) * {SIZE}, (uint8_t*){REF}, NULL);
jerry_value_t {NAME}_typedarray = jerry_create_typedarray_for_arraybuffer_sz (JERRY_TYPEDARRAY_{ARRAY_TYPE}, {NAME}_buffer, 0, {SIZE});
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_value_defined = true;
{NAME}_prop_desc.value = {NAME}_typedarray;
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{ARR}");
jerry_value_t {NAME}_return_value = jerry_define_own_property ({OBJECT}, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_release_value ({NAME}_buffer);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
INIT_CREATE_OBJECT = '''
jerry_value_t {NAME}object = jerry_create_object();
'''
INIT_REGIST_OBJECT = '''
// set a namespace as a property to another namespace object
jerry_property_descriptor_t {NAME}_prop_desc;
jerry_init_property_descriptor_fields (&{NAME}_prop_desc);
{NAME}_prop_desc.is_value_defined = true;
{NAME}_prop_desc.value = {NAME}object;
jerry_value_t {NAME}_prop_name = jerry_create_string ((const jerry_char_t *)"{REF}");
jerry_value_t {NAME}_return_value = jerry_define_own_property ({OBJECT}, {NAME}_prop_name, &{NAME}_prop_desc);
jerry_release_value ({NAME}_return_value);
jerry_release_value ({NAME}_prop_name);
jerry_free_property_descriptor_fields (&{NAME}_prop_desc);
'''
# Template for include the right headers
INCLUDE = '''
#include <stdlib.h>
#include <string.h>
#include "jerryscript.h"
#include "{HEADER}"
'''
# Templates for modules.json and module.cmake
MODULES_JSON = '''
{{
"modules": {{
"{NAME}_module": {{
"native_files": ["src/{NAME}_js_binding.c"],
"init": "Init_{NAME}",
"cmakefile": "{CMAKE}"
}}
}}
}}
'''
MODULE_CMAKE = '''
set(MODULE_NAME "{NAME}_module")
link_directories(${{MODULE_DIR}})
list(APPEND MODULE_LIBS {LIBRARY})
'''
|
dbaas/tsuru/migrations/0002_auto__del_bind.py | didindinn/database-as-a-service | 303 | 11083709 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Bind'
db.delete_table(u'tsuru_bind')
def backwards(self, orm):
# Adding model 'Bind'
db.create_table(u'tsuru_bind', (
('service_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('service_hostname', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('databaseinfra', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'binds', null=True, to=orm['physical.DatabaseInfra'], on_delete=models.PROTECT, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'tsuru', ['Bind'])
models = {
}
complete_apps = ['tsuru'] |
img2dataset/__init__.py | kanttouchthis/img2dataset | 482 | 11083714 | <gh_stars>100-1000
"""Img2dataset"""
from img2dataset.main import main
from img2dataset.main import download
|
torchtext/experimental/__init__.py | parmeet/text | 3,172 | 11083722 | <filename>torchtext/experimental/__init__.py
from . import datasets
from . import transforms
from . import models
__all__ = ['datasets', 'transforms', 'models']
|
rlschool/quadrupedal/envs/__init__.py | HaojieSHI98/RLSchool | 169 | 11083727 | <filename>rlschool/quadrupedal/envs/__init__.py
from rlschool.quadrupedal.envs.gym_envs import * |
leet/dynamic/maxProfit4.py | monishshah18/python-cp-cheatsheet | 140 | 11083742 | <filename>leet/dynamic/maxProfit4.py
"""
time: k * p
space: p
"""
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
if len(prices) <= 1:
return 0
dp = [0] * len(prices)
if k > len(prices):
profit = 0
for i in range(1, len(prices)):
profit = max(profit, profit + prices[i] - prices[i-1])
return profit
for _ in range(k):
val = 0
for i in range(1, len(prices)):
val = max(dp[i], val + prices[i]-prices[i-1])
dp[i] = max(val, dp[i-1])
return dp[-1]
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
if len(prices) <= k:
t0, t1 = 0, float(-inf)
for p in prices:
t0old = t0
t0 = max(t0, t1 + p)
t1 = max(t1, t0old - p)
return t0
t0 = [0] * (k+1)
t1 = [float(-inf)] * (k+1)
for p in prices:
for i in range(k, 0, -1):
t0[i] = max(t0[i], t1[i] + p)
t1[i] = max(t1[i], t0[i-1] - p)
return t0[k] |
wagtail/core/management/commands/create_log_entries_from_revisions.py | brownaa/wagtail | 8,851 | 11083745 | <reponame>brownaa/wagtail<filename>wagtail/core/management/commands/create_log_entries_from_revisions.py
from django.core.management.base import BaseCommand
from wagtail.core.models import PageLogEntry, PageRevision
def get_comparison(page, revision_a, revision_b):
comparison = page.get_edit_handler().get_comparison()
comparison = [comp(revision_a, revision_b) for comp in comparison]
comparison = [comp for comp in comparison if comp.has_changed()]
return comparison
class Command(BaseCommand):
def handle(self, *args, **options):
current_page_id = None
missing_models_content_type_ids = set()
for revision in PageRevision.objects.order_by('page_id', 'created_at').select_related('page').iterator():
# This revision is for a page type that is no longer in the database. Bail out early.
if revision.page.content_type_id in missing_models_content_type_ids:
continue
if not revision.page.specific_class:
missing_models_content_type_ids.add(revision.page.content_type_id)
continue
is_new_page = revision.page_id != current_page_id
if is_new_page:
# reset previous revision when encountering a new page.
previous_revision = None
has_content_changes = False
current_page_id = revision.page_id
if not PageLogEntry.objects.filter(revision=revision).exists():
try:
current_revision_as_page = revision.as_page_object()
except Exception:
# restoring old revisions may fail if e.g. they have an on_delete=PROTECT foreign key
# to a no-longer-existing model instance. We cannot compare changes between two
# non-restorable revisions, although we can at least infer that there was a content
# change at the point that it went from restorable to non-restorable or vice versa.
current_revision_as_page = None
published = revision.id == revision.page.live_revision_id
if previous_revision is not None:
try:
previous_revision_as_page = previous_revision.as_page_object()
except Exception:
previous_revision_as_page = None
if previous_revision_as_page is None and current_revision_as_page is None:
# both revisions failed to restore - unable to determine presence of content changes
has_content_changes = False
elif previous_revision_as_page is None or current_revision_as_page is None:
# one or the other revision failed to restore, which indicates a content change
has_content_changes = True
else:
# Must use .specific so the comparison picks up all fields, not just base Page ones.
comparison = get_comparison(revision.page.specific, previous_revision_as_page, current_revision_as_page)
has_content_changes = len(comparison) > 0
if (
current_revision_as_page is not None
and current_revision_as_page.live_revision_id == previous_revision.id
):
# Log the previous revision publishing.
self.log_page_action('wagtail.publish', previous_revision, True)
if is_new_page or has_content_changes or published:
if is_new_page:
action = 'wagtail.create'
elif published:
action = 'wagtail.publish'
else:
action = 'wagtail.edit'
if published and has_content_changes:
# When publishing, also log the 'draft save', but only if there have been content changes
self.log_page_action('wagtail.edit', revision, has_content_changes)
self.log_page_action(action, revision, has_content_changes)
previous_revision = revision
def log_page_action(self, action, revision, has_content_changes):
PageLogEntry.objects.log_action(
instance=revision.page.specific,
action=action,
data='',
revision=None if action == 'wagtail.create' else revision,
user=revision.user,
timestamp=revision.created_at,
content_changed=has_content_changes,
)
|
ding/worker/collector/tests/speed_test/utils.py | sailxjx/DI-engine | 464 | 11083792 | <reponame>sailxjx/DI-engine
import numpy as np
def random_change(number):
return number * (1 + (np.random.random() - 0.5) * 0.6)
|
algorithms/maths/next_bigger.py | zhengli0817/algorithms | 22,426 | 11083812 | """
I just bombed an interview and made pretty much zero
progress on my interview question.
Given a number, find the next higher number which has the
exact same set of digits as the original number.
For example: given 38276 return 38627.
given 99999 return -1. (no such number exists)
Condensed mathematical description:
Find largest index i such that array[i − 1] < array[i].
(If no such i exists, then this is already the last permutation.)
Find largest index j such that j ≥ i and array[j] > array[i − 1].
Swap array[j] and array[i − 1].
Reverse the suffix starting at array[i].
"""
import unittest
def next_bigger(num):
digits = [int(i) for i in str(num)]
idx = len(digits) - 1
while idx >= 1 and digits[idx-1] >= digits[idx]:
idx -= 1
if idx == 0:
return -1 # no such number exists
pivot = digits[idx-1]
swap_idx = len(digits) - 1
while pivot >= digits[swap_idx]:
swap_idx -= 1
digits[swap_idx], digits[idx-1] = digits[idx-1], digits[swap_idx]
digits[idx:] = digits[:idx-1:-1] # prefer slicing instead of reversed(digits[idx:])
return int(''.join(str(x) for x in digits))
class TestSuite(unittest.TestCase):
def test_next_bigger(self):
self.assertEqual(next_bigger(38276), 38627)
self.assertEqual(next_bigger(12345), 12354)
self.assertEqual(next_bigger(1528452), 1528524)
self.assertEqual(next_bigger(138654), 143568)
self.assertEqual(next_bigger(54321), -1)
self.assertEqual(next_bigger(999), -1)
self.assertEqual(next_bigger(5), -1)
if __name__ == '__main__':
unittest.main()
|
third_party/blink/tools/blinkpy/common/config/PRESUBMIT.py | chromium/chromium | 14,668 | 11083816 | <gh_stars>1000+
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""config/ presubmit script for Blink.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
USE_PYTHON3 = True
PRESUBMIT_VERSION = '2.0.0'
def CheckEnsureSpecifier(input_api, output_api):
"""Ensure the first specifiers for the builders are valid.
"""
this_dir = input_api.PresubmitLocalPath()
builders_json_file = input_api.os_path.join(this_dir,
"builders.json")
generic_test_expectation = input_api.os_path.join(this_dir,
'..', '..', '..', '..', 'web_tests', 'TestExpectations')
if builders_json_file in input_api.AbsoluteLocalPaths():
with open(generic_test_expectation) as f:
tags = f.readline().rstrip()
with open(builders_json_file) as f:
builders = input_api.json.load(f)
for key, value in builders.items():
tag = value["specifiers"][0]
if tag == "Android" or tag == "Trusty":
continue
if tag not in tags:
error_message = (
'This CL updates builders.json, but the specifier %s '
'is not a valid tag in TestExpectations' % tag)
if input_api.is_committing:
return [output_api.PresubmitError(error_message)]
else:
return [output_api.PresubmitPromptWarning(error_message)]
return []
|
flask_appbuilder/validators.py | tianhe1986/Flask-AppBuilder | 3,862 | 11083849 | import re
from typing import Optional
from flask import current_app
from flask_appbuilder.exceptions import PasswordComplexityValidationError
from flask_appbuilder.models.base import BaseInterface
from flask_babel import gettext
from wtforms import Field, Form, ValidationError
password_complexity_regex = re.compile(
r"""(
^(?=.*[A-Z].*[A-Z]) # at least two capital letters
(?=.*[^0-9a-zA-Z]) # at least one of these special characters
(?=.*[0-9].*[0-9]) # at least two numeric digits
(?=.*[a-z].*[a-z].*[a-z]) # at least three lower case letters
.{10,} # at least 10 total characters
$
)""",
re.VERBOSE,
)
class Unique:
"""
WTForm field validator. Checks if field value is unique against
a specified table field.
"""
field_flags = ("unique",)
def __init__(
self, datamodel: BaseInterface, col_name: str, message: Optional[str] = None
) -> None:
"""
:param datamodel: The datamodel class, abstract layer for backend
:param col_name: The unique column name.
:param message: The error message.
"""
self.datamodel = datamodel
self.col_name = col_name
self.message = message
def __call__(self, form: Form, field: Field) -> None:
filters = self.datamodel.get_filters().add_filter(
self.col_name, self.datamodel.FilterEqual, field.data
)
count, obj = self.datamodel.query(filters)
if count > 0:
# only test if Unique, if pk value is different on update.
if not hasattr(form, "_id") or form._id != self.datamodel.get_keys(obj)[0]:
if self.message is None:
self.message = field.gettext(u"Already exists.")
raise ValidationError(self.message)
class PasswordComplexityValidator:
"""
WTForm field validator. Calls a custom password validator, useful for imposing
password complexity for database Auth users.
"""
def __call__(self, form: Form, field: Field) -> None:
if current_app.config.get("FAB_PASSWORD_COMPLEXITY_ENABLED", False):
password_complexity_validator = current_app.config.get(
"FAB_PASSWORD_COMPLEXITY_VALIDATOR", None
)
if password_complexity_validator is not None:
try:
password_complexity_validator(field.data)
except PasswordComplexityValidationError as exc:
raise ValidationError(str(exc))
else:
try:
default_password_complexity(field.data)
except PasswordComplexityValidationError as exc:
raise ValidationError(str(exc))
def default_password_complexity(password: str) -> None:
"""
FAB's default password complexity validator, set FAB_PASSWORD_COMPLEXITY_ENABLED
to True to enable it
"""
match = re.search(password_complexity_regex, password)
if not match:
raise PasswordComplexityValidationError(
gettext(
"Must have at least two capital letters,"
" one special character, two digits, three lower case letters and"
" a minimal length of 10."
)
)
|
corehq/util/itertools.py | dimagilg/commcare-hq | 471 | 11083850 | <reponame>dimagilg/commcare-hq<gh_stars>100-1000
def zip_with_gaps(all_items, some_items, all_items_key=None, some_items_key=None):
"""
Yields pairs of items from `all_items` and `some_items` where item
keys match.
Keys do not need to be unique. Keys in `all_items` must be a
superset of keys in `some_items`. If key functions are not given,
items are compared directly. Sequence is important: Items are
assumed to be sorted.
>>> long_list = ['Alice', 'Apple', 'Bengal', 'Carrot', 'Daring', 'Danger', 'Dakar', 'Electric']
>>> short_list = ['Cabernet', 'Daedalus', 'Daimler', 'Dog']
>>> list(zip_with_gaps(long_list, short_list, lambda x: x[0], lambda x: x[0]))
[('Carrot', 'Cabernet'), ('Daring', 'Daedalus'), ('Danger', 'Daimler'), ('Dakar', 'Dog')]
"""
if all_items_key is None:
all_items_key = lambda x: x # noqa: E731
if some_items_key is None:
some_items_key = lambda x: x # noqa: E731
all_iterable = iter(all_items)
for s_item in some_items:
for a_item in all_iterable:
if some_items_key(s_item) == all_items_key(a_item):
yield (a_item, s_item)
break
|
dev/Gems/CloudGemAWSScriptBehaviors/AWS/test/test_settings.py | brianherrera/lumberyard | 1,738 | 11083856 | <filename>dev/Gems/CloudGemAWSScriptBehaviors/AWS/test/test_settings.py
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #4 $
import unittest
import mock
import pkgutil
from .. import get_setting
class TestSettings(unittest.TestCase):
def test_returns_None_when_no_settings_module(self):
actual = get_setting('TestLogicalName')
self.assertIsNone(actual)
def test_loads_settings_module(self):
settings = {
'TestLogicalName': 'TestPhysicalName'
}
with mock.patch.object(pkgutil, 'find_loader') as mock_find_loader:
mock_find_loader.return_value = mock.MagicMock()
mock_find_loader.return_value.load_module = mock.MagicMock()
mock_find_loader.return_value.load_module.return_value = mock.MagicMock()
mock_find_loader.return_value.load_module.return_value.settings = settings
self.assertEquals(get_setting('TestLogicalName'), 'TestPhysicalName')
self.assertEquals(get_setting('NotDefined'), None)
mock_find_loader.assert_called_once_with('CloudCanvas.settings')
mock_find_loader.return_value.load_module.called_once_with('CloudCanvas.settings')
|
iPERCore/services/run_imitator.py | JSssssss/iPERCore | 2,223 | 11083873 | <gh_stars>1000+
# Copyright (c) 2020-2021 impersonator.org authors (<NAME> and <NAME>). All rights reserved.
from iPERCore.models import ModelsFactory
from iPERCore.tools.utils.signals.smooth import temporal_smooth_smpls
from iPERCore.tools.utils.filesio.persistence import clear_dir
from iPERCore.tools.utils.multimedia.video import fuse_src_ref_multi_outputs
from iPERCore.services.preprocess import preprocess
from iPERCore.services.personalization import personalize
from iPERCore.services.options.process_info import ProcessInfo
from iPERCore.services.options.meta_info import MetaImitateOutput
from iPERCore.services.base_runner import (
get_src_info_for_inference,
add_hands_params_to_smpl,
add_special_effect,
add_bullet_time_effect
)
def call_imitator_inference(opt, imitator, meta_output, ref_paths,
ref_smpls, visualizer, use_selected_f2pts=False):
"""
Args:
opt:
imitator:
meta_output:
ref_paths:
ref_smpls:
visualizer:
use_selected_f2pts:
Returns:
outputs (List[Tuple[str]]):
"""
# if there are more than 10 frames, then we will use temporal smooth of smpl.
if len(ref_smpls) > 10:
ref_smpls = temporal_smooth_smpls(ref_smpls, pose_fc=meta_output.pose_fc, cam_fc=meta_output.cam_fc)
out_imgs_dir = clear_dir(meta_output.out_img_dir)
effect_info = meta_output.effect_info
view_directions = effect_info["View"]
bullet_time_list = effect_info["BT"]
# check use multi-view outputs
if len(view_directions) == 0:
# if do not use multi-view outputs, only add bullet-time effects
ref_smpls, ref_imgs_paths = add_bullet_time_effect(ref_smpls, ref_paths, bt_list=bullet_time_list)
# add hands parameters to smpl
ref_smpls = add_hands_params_to_smpl(ref_smpls, imitator.body_rec.np_hands_mean)
# run imitator's inference function
outputs = imitator.inference(tgt_smpls=ref_smpls, cam_strategy=opt.cam_strategy,
output_dir=out_imgs_dir, prefix="pred_", visualizer=visualizer,
verbose=True, use_selected_f2pts=use_selected_f2pts)
outputs = list(zip(outputs))
else:
outputs = []
ref_imgs_paths = ref_paths
for i, view in enumerate(view_directions):
# otherwise, we will add both multi-view and bullet-time effects
ref_view_smpls, ref_imgs_paths = add_special_effect(ref_smpls, ref_paths,
view_dir=view, bt_list=bullet_time_list)
# add hands parameters to smpl
ref_view_smpls = add_hands_params_to_smpl(ref_view_smpls, imitator.body_rec.np_hands_mean)
# run imitator's inference function
view_outputs = imitator.inference(tgt_smpls=ref_view_smpls, cam_strategy=opt.cam_strategy,
output_dir=out_imgs_dir, prefix=f"pred_{i}_{int(view)}_",
visualizer=visualizer, verbose=True,
use_selected_f2pts=use_selected_f2pts)
outputs.append(view_outputs)
outputs = list(zip(*outputs))
results_dict = {
"outputs": outputs,
"ref_imgs_paths": ref_imgs_paths
}
return results_dict
def imitate(opt):
"""
Args:
opt:
Returns:
all_meta_outputs (list of MetaOutput):
"""
print("Step 3: running imitator.")
if opt.ip:
from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer
visualizer = VisdomVisualizer(env=opt.model_id, ip=opt.ip, port=opt.port)
else:
visualizer = None
# set imitator
imitator = ModelsFactory.get_by_name("imitator", opt)
meta_src_proc = opt.meta_data["meta_src"]
meta_ref_proc = opt.meta_data["meta_ref"]
all_meta_outputs = []
for i, meta_src in enumerate(meta_src_proc):
"""
meta_input:
path: /p300/tpami/neuralAvatar/sources/fange_1/fange_1_ns=2
bg_path: /p300/tpami/neuralAvatar/sources/fange_1/IMG_7225.JPG
name: fange_1
primitives_dir: ../tests/debug/primitives/fange_1
processed_dir: ../tests/debug/primitives/fange_1/processed
vid_info_path: ../tests/debug/primitives/fange_1/processed/vid_info.pkl
"""
src_proc_info = ProcessInfo(meta_src)
src_proc_info.deserialize()
src_info = src_proc_info.convert_to_src_info(num_source=opt.num_source)
src_info_for_inference = get_src_info_for_inference(opt, src_info)
# source setup
imitator.source_setup(
src_path=src_info_for_inference["paths"],
src_smpl=src_info_for_inference["smpls"],
masks=src_info_for_inference["masks"],
bg_img=src_info_for_inference["bg"],
offsets=src_info_for_inference["offsets"],
links_ids=src_info_for_inference["links"],
visualizer=visualizer
)
for j, meta_ref in enumerate(meta_ref_proc):
"""
meta_input:
path: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp4
bg_path:
name: bantangzhuyi_1
audio: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp3
fps: 30.02
pose_fc: 400.0
cam_fc: 150.0
primitives_dir: ../tests/debug/primitives/bantangzhuyi_1
processed_dir: ../tests/debug/primitives/bantangzhuyi_1/processed
vid_info_path: ../tests/debug/primitives/bantangzhuyi_1/processed/vid_info.pkl
"""
meta_output = MetaImitateOutput(meta_src, meta_ref)
ref_proc_info = ProcessInfo(meta_ref)
ref_proc_info.deserialize()
ref_info = ref_proc_info.convert_to_ref_info()
results_dict = call_imitator_inference(
opt, imitator, meta_output,
ref_paths=ref_info["images"],
ref_smpls=ref_info["smpls"],
visualizer=visualizer
)
# save to video
fuse_src_ref_multi_outputs(
meta_output.out_mp4, src_info_for_inference["paths"],
results_dict["ref_imgs_paths"], results_dict["outputs"],
audio_path=meta_output.audio, fps=meta_output.fps,
image_size=opt.image_size, pool_size=opt.num_workers
)
all_meta_outputs.append(meta_output)
for meta_output in all_meta_outputs:
print(meta_output)
print("Step 3: running imitator done.")
return all_meta_outputs
def run_imitator(opt):
# 1. prepreocess
successful = preprocess(opt)
if successful:
# 2. personalization
personalize(opt)
# 3. imitate
all_meta_outputs = imitate(opt)
else:
all_meta_outputs = []
return all_meta_outputs
if __name__ == "__main__":
from iPERCore.services.options.options_inference import InferenceOptions
OPT = InferenceOptions().parse()
run_imitator(opt=OPT)
|
locations/spiders/tgifridays.py | jleedev/alltheplaces | 297 | 11083878 | # -*- coding: utf-8 -*-
import datetime
import re
import json
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAY_MAPPING = {
'Monday': 'Mo',
'Tuesday': 'Tu',
'Wednesday': 'We',
'Thursday': 'Th',
'Friday': 'Fr',
'Saturday': 'Sa',
'Sunday': 'Su'
}
class TGIFridaySpider(scrapy.Spider):
download_delay = 0.2
name = "tgifridays"
item_attributes = { 'brand': "TGI Friday's" }
allowed_domains = ["tgifridays.com"]
start_urls = (
'https://locations.tgifridays.com/sitemap.xml',
)
def parse_hours(self, hours):
opening_hours = OpeningHours()
for hour in hours:
if hour["opens"] in ("Closed", ""):
continue
elif hour["closes"] in ("Closed", ""):
continue
else:
opening_hours.add_range(
day=hour["dayOfWeek"].replace('http://schema.org/', '')[:2],
open_time=hour["opens"],
close_time=hour["closes"],
time_format='%I:%M%p',
)
return opening_hours.as_opening_hours()
def parse_store(self, response):
# The JSON blob has an extra "}\r\n" at the end
data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first()[:-3])
properties = {
'addr_full': data['address']['streetAddress'],
'phone': data['telephone'],
'city': data['address']['addressLocality'],
'state': data['address']['addressRegion'],
'postcode': data['address']['postalCode'],
'country': data['address']['addressCountry'],
'ref': data['@id'],
'website': data['url'],
'lat': data['geo']['latitude'],
'lon': data['geo']['longitude'],
'name': data['name'],
}
hours = self.parse_hours(data.get("openingHoursSpecification", []))
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
for path in city_urls:
if path.count('/') == 5:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
|
hdfs3/tests/test_conf.py | HirniMeshram1/hdfs3 | 143 | 11083908 |
import os
import pytest
import shutil
import tempfile
from hdfs3.conf import conf, guess_config, conf_defaults, hdfs_conf
from hdfs3 import HDFileSystem
@pytest.yield_fixture()
def no_conf():
# clear environment
hcd = os.environ.pop('HADOOP_CONF_DIR', None)
hi = os.environ.pop('HADOOP_INSTALL', None)
lh3c = os.environ.pop('LIBHDFS3_CONF', None)
yield
# carefully reset
if hcd:
os.environ['HADOOP_CONF_DIR'] = hcd
else:
os.environ.pop('HADOOP_CONF_DIR', None)
if hi:
os.environ['HADOOP_INSTALL'] = hi
else:
os.environ.pop('HADOOP_INSTALL', None)
if lh3c:
os.environ['LIBHDFS3_CONF'] = lh3c
else:
os.environ.pop('LIBHDFS3_CONF', None)
@pytest.yield_fixture()
def simple_conf_file(no_conf):
d = str(tempfile.mkdtemp())
fn = os.path.join(d, 'hdfs-site.xml')
with open(fn, 'w') as fout:
fout.write(example_conf)
yield fn
shutil.rmtree(d, True)
def test_no_conf(no_conf):
if 'host' in conf:
assert conf['host'] is not None
if 'port' in conf:
assert conf['port'] is not None
def test_simple_pars(no_conf):
hdfs = HDFileSystem('blah', 1, autoconf=False, connect=False)
assert hdfs.conf['host'] == 'blah'
assert hdfs.conf['port'] == 1
hdfs = HDFileSystem('blah', 1, autoconf=True, connect=False)
assert hdfs.conf['host'] == 'blah'
assert hdfs.conf['port'] == 1
hdfs = HDFileSystem('blah', 1, autoconf=True, connect=False,
pars={'port': 2})
assert hdfs.conf['port'] == 1
hdfs = HDFileSystem(autoconf=True, connect=False)
assert hdfs.conf['host'] == conf_defaults['host']
assert hdfs.conf['port'] == conf_defaults['port']
with pytest.raises(Exception):
HDFileSystem(autoconf=False, connect=True)
hdfs = HDFileSystem(host='blah', autoconf=True, connect=False)
assert hdfs.conf['host'] == 'blah'
hdfs = HDFileSystem(connect=False, pars={'port': 1})
assert hdfs.conf['port'] == 1
hdfs = HDFileSystem(connect=False, pars={'port': 1}, port=2)
assert hdfs.conf['port'] == 2
def test_with_libhdfs3_conf(simple_conf_file):
os.environ['LIBHDFS3_CONF'] = simple_conf_file
guess_config()
assert conf['host'] == 'this.place'
assert conf['port'] == 9999
assert conf['dfs.replication'] == '1'
def test_with_hadoop_conf(simple_conf_file):
dname = os.path.dirname(simple_conf_file)
os.environ['HADOOP_CONF_DIR'] = dname
guess_config()
assert os.environ['LIBHDFS3_CONF'] == simple_conf_file
assert conf['host'] == 'this.place'
assert conf['port'] == 9999
assert conf['dfs.replication'] == '1'
def test_with_file(simple_conf_file):
hdfs_conf(os.path.dirname(simple_conf_file))
assert conf['host'] == 'this.place'
assert conf['port'] == 9999
assert conf['dfs.replication'] == '1'
def test_default_port_and_host(no_conf):
guess_config()
hdfs = HDFileSystem(connect=False)
assert hdfs.host == conf_defaults['host']
assert hdfs.port == conf_defaults['port']
def test_token_and_ticket_cache_in_same_time():
ticket_cache = "/tmp/krb5cc_0"
token = "abc"
with pytest.raises(RuntimeError) as ctx:
HDFileSystem(connect=False, ticket_cache=ticket_cache, token=token)
msg = "It is not possible to use ticket_cache and token at same time"
assert msg in str(ctx.value)
example_conf = """
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.namenode.rpc-address</name>
<value>this.place:9999</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/var/lib/hadoop-hdfs/dn_socket</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.skip.checksum</name>
<value>true</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>false</value>
</property>
</configuration>
"""
|
routes/admin.py | Junhua9981/WebProjectFinal | 120 | 11083912 | <filename>routes/admin.py
from fastapi import Body, APIRouter
from fastapi.encoders import jsonable_encoder
from fastapi.security import HTTPBasicCredentials
from passlib.context import CryptContext
from database.database import admin_collection
from auth.jwt_handler import signJWT
from database.database import add_admin
from models.admin import AdminModel
router = APIRouter()
hash_helper = CryptContext(schemes=["bcrypt"])
@router.post("/login")
async def admin_login(admin_credentials: HTTPBasicCredentials = Body(...)):
# NEW CODE
admin_user = await admin_collection.find_one({"email": admin_credentials.username}, {"_id": 0})
if (admin_user):
password = <PASSWORD>.verify(
admin_credentials.password, admin_user["password"])
if (password):
return signJWT(admin_credentials.username)
return "Incorrect email or password"
return "Incorrect email or password"
@router.post("/")
async def admin_signup(admin: AdminModel = Body(...)):
admin_exists = await admin_collection.find_one({"email": admin.email}, {"_id": 0})
if(admin_exists):
return "Email already exists"
admin.password = <PASSWORD>(admin.password)
new_admin = await add_admin(jsonable_encoder(admin))
return new_admin |
python/alibiexplainer/alibiexplainer/explainer.py | ittus/kserve | 1,146 | 11083922 | <reponame>ittus/kserve
# Copyright 2021 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import asyncio
from enum import Enum
from typing import List, Any, Mapping, Union, Dict
import kserve
import numpy as np
from alibiexplainer.anchor_images import AnchorImages
from alibiexplainer.anchor_tabular import AnchorTabular
from alibiexplainer.anchor_text import AnchorText
from alibiexplainer.explainer_wrapper import ExplainerWrapper
logging.basicConfig(level=kserve.constants.KSERVE_LOGLEVEL)
class ExplainerMethod(Enum):
anchor_tabular = "AnchorTabular"
anchor_images = "AnchorImages"
anchor_text = "AnchorText"
def __str__(self):
return self.value
class AlibiExplainer(kserve.Model):
def __init__( # pylint:disable=too-many-arguments
self,
name: str,
predictor_host: str,
method: ExplainerMethod,
config: Mapping,
explainer: object = None,
):
super().__init__(name)
self.predictor_host = predictor_host
logging.info("Predict URL set to %s", self.predictor_host)
self.method = method
if self.method is ExplainerMethod.anchor_tabular:
self.wrapper: ExplainerWrapper = AnchorTabular(
self._predict_fn, explainer, **config
)
elif self.method is ExplainerMethod.anchor_images:
self.wrapper = AnchorImages(self._predict_fn, explainer, **config)
elif self.method is ExplainerMethod.anchor_text:
self.wrapper = AnchorText(self._predict_fn, explainer, **config)
else:
raise NotImplementedError
def load(self) -> bool:
pass
def _predict_fn(self, arr: Union[np.ndarray, List]) -> np.ndarray:
instances = []
for req_data in arr:
if isinstance(req_data, np.ndarray):
instances.append(req_data.tolist())
else:
instances.append(req_data)
loop = asyncio.get_running_loop() # type: ignore
resp = loop.run_until_complete(self.predict({"instances": instances}))
return np.array(resp["predictions"])
def explain(self, request: Dict) -> Any:
if (
self.method is ExplainerMethod.anchor_tabular
or self.method is ExplainerMethod.anchor_images
or self.method is ExplainerMethod.anchor_text
):
explanation = self.wrapper.explain(request["instances"])
explanationAsJsonStr = explanation.to_json()
logging.info("Explanation: %s", explanationAsJsonStr)
return json.loads(explanationAsJsonStr)
raise NotImplementedError
|
understat/__init__.py | arkjinli/understat | 114 | 11083940 | from .understat import Understat
|
deepsvg/svglib/util_fns.py | naoto0804/deepsvg | 573 | 11083946 | import math
def get_roots(a, b, c):
if a == 0:
if b == 0:
return []
return [-c / b]
r = b * b - 4 * a * c
if r < 0:
return []
elif r == 0:
x0 = -b / (2 * a)
return [x0]
x1, x2 = (-b - math.sqrt(r)) / (2 * a), (-b + math.sqrt(r)) / (2 * a)
return x1, x2
|
tests/integration_tests/flows/test_mysql_api.py | mindsdb/main | 261 | 11083956 | <gh_stars>100-1000
import time
import tempfile
import unittest
import json
from pathlib import Path
import docker
import netifaces
import pandas as pd
import requests
from common import (
run_environment,
EXTERNAL_DB_CREDENTIALS,
CONFIG_PATH)
def get_docker0_inet_ip():
if "docker0" not in netifaces.interfaces():
raise Exception("Unable to find 'docker' interface. Please install docker first.")
return netifaces.ifaddresses('docker0')[netifaces.AF_INET][0]['addr']
HTTP_API_ROOT = f'http://{get_docker0_inet_ip()}:47334/api'
class Dlist(list):
"""Service class for convenient work with list of dicts(db response)"""
def __contains__(self, item):
if item in self.__getitem__(0):
return True
return False
def get_record(self, key, value):
if key in self:
for x in self:
if x[key] == value:
return x
return None
class TestScenario:
predictor_name = 'home_rentals'
file_datasource_name = "from_files"
def create_datasource(self, db_type):
_query = "CREATE DATASOURCE %s WITH ENGINE = '%s', PARAMETERS = %s;" % (
db_type.upper(),
db_type,
json.dumps(self.db_creds[db_type]))
return self.query(_query)
@staticmethod
def upload_ds(df, name):
"""Upload pandas df as csv file."""
with tempfile.NamedTemporaryFile(mode='w+', newline='', delete=False) as f:
df.to_csv(f, index=False)
f.flush()
url = f'{HTTP_API_ROOT}/datasources/{name}'
data = {
"source_type": (None, 'file'),
"file": (f.name, f, 'text/csv'),
"source": (None, f.name.split('/')[-1]),
"name": (None, name)
}
res = requests.put(url, files=data)
res.raise_for_status()
def verify_file_ds(self, ds_name):
timeout = 5
threshold = time.time() + timeout
res = ''
while time.time() < threshold:
_query = "USE files; SHOW tables;"
res = self.query(_query)
if 'Tables_in_files' in res and res.get_record('Tables_in_files', ds_name):
break
time.sleep(0.5)
self.assertTrue('Tables_in_files' in res and res.get_record('Tables_in_files', ds_name),
f"file datasource {ds_name} is not ready to use after {timeout} seconds")
def check_predictor_readiness(self, predictor_name):
timeout = 600
threshold = time.time() + timeout
res = ''
while time.time() < threshold:
_query = "SELECT status FROM predictors WHERE name='{}';".format(predictor_name)
res = self.query(_query)
res = self.query(_query)
if 'status' in res and res.get_record('status', 'complete'):
break
time.sleep(2)
self.assertTrue('status' in res and res.get_record('status', 'complete'),
f"predictor {predictor_name} is not complete after {timeout} seconds")
def validate_datasource_creation(self, ds_type):
self.create_datasource(ds_type.lower())
res = self.query("SELECT * FROM mindsdb.datasources WHERE name='{}';".format(ds_type.upper()))
self.assertTrue("name" in res and res.get_record("name", ds_type.upper()),
f"Expected datasource is not found after creation - {ds_type.upper()}: {res}")
def test_1_create_datasources(self):
for ds_type in self.db_creds:
# TODO del clickhouse from list
if ds_type not in ['kafka', 'redis', 'mongodb_atlas', 'clickhouse']:
with self.subTest(msg=ds_type):
print(f"\nExecuting {self._testMethodName} ({__name__}.{self.__class__.__name__}) [{ds_type}]")
self.validate_datasource_creation(ds_type)
def test_2_create_predictor(self):
_query = f"CREATE PREDICTOR {self.predictor_name} from MYSQL (select * from test_data.home_rentals) as hr_ds predict rental_price;"
self.query(_query)
self.check_predictor_readiness(self.predictor_name)
def test_3_making_prediction(self):
_query = ('SELECT rental_price, rental_price_explain FROM ' +
self.predictor_name +
' WHERE when_data=\'{"number_of_rooms":"2","sqft":"400","location":"downtown","days_on_market":"2","initial_price":"2500"}\';')
res = self.query(_query)
self.assertTrue('rental_price' in res and 'rental_price_explain' in res,
f"error getting prediction from {self.predictor_name} - {res}")
def test_4_service_requests(self):
service_requests = [
"show databases;",
"show schemas;",
"show tables;",
"show tables from mindsdb;",
"show full tables from mindsdb;",
"show variables;",
"show session status;",
"show global variables;",
"show engines;",
"show warnings;",
"show charset;",
"show collation;",
"show datasources;",
"show predictors;",
"show function status where db = 'mindsdb';",
"show procedure status where db = 'mindsdb';",
# "show table status like commands;",
]
for req in service_requests:
with self.subTest(msg=req):
print(f"\nExecuting {self._testMethodName} ({__name__}.{self.__class__.__name__}) [{req}]")
self.query(req)
def test_5_drop_datasource(self):
self.query('drop datasource MYSQL;')
def test_6_describe_predictor_attrs(self):
attrs = ["model", "features", "ensemble"]
for attr in attrs:
with self.subTest(msg=attr):
print(f"\nExecuting {self._testMethodName} ({__name__}.{self.__class__.__name__}) [{attr}]")
self.query(f"describe mindsdb.{self.predictor_name}.{attr};")
def test_7_train_predictor_from_files(self):
df = pd.DataFrame({
'x1': [x for x in range(100, 210)] + [x for x in range(100, 210)],
'x2': [x * 2 for x in range(100, 210)] + [x * 3 for x in range(100, 210)],
'y': [x * 3 for x in range(100, 210)] + [x * 2 for x in range(100, 210)]
})
file_predictor_name = "predictor_from_file"
self.upload_ds(df, self.file_datasource_name)
self.verify_file_ds(self.file_datasource_name)
_query = f"CREATE PREDICTOR {file_predictor_name} from files (select * from {self.file_datasource_name}) predict y;"
self.query(_query)
self.check_predictor_readiness(file_predictor_name)
def test_8_0_select_from_files(self):
_query = f"select * from files.{self.file_datasource_name};"
self.query(_query)
def test_9_ts_train_and_predict(self):
train_df = pd.DataFrame({
'group': ["A" for _ in range(100, 210)] + ["B" for _ in range(100, 210)],
'order': [x for x in range(100, 210)] + [x for x in range(200, 310)],
'x1': [x for x in range(100, 210)] + [x for x in range(100, 210)],
'x2': [x * 2 for x in range(100, 210)] + [x * 3 for x in range(100, 210)],
'y': [x * 3 for x in range(100, 210)] + [x * 2 for x in range(100, 210)]
})
test_df = pd.DataFrame({
'group': ["A" for _ in range(210, 220)] + ["B" for _ in range(210, 220)],
'order': [x for x in range(210, 220)] + [x for x in range(310, 320)],
'x1': [x for x in range(210, 220)] + [x for x in range(210, 220)],
'x2': [x * 2 for x in range(210, 220)] + [x * 3 for x in range(210, 220)],
'y': [x * 3 for x in range(210, 220)] + [x * 2 for x in range(210, 220)]
})
train_ds_name = "train_ts_file_ds"
test_ds_name = "test_ts_file_ds"
for df, ds_name in [(train_df, train_ds_name), (test_df, test_ds_name)]:
self.upload_ds(df, ds_name)
self.verify_file_ds(ds_name)
params = [
("with_group_by_hor1",
f"CREATE PREDICTOR %s from files (select * from {train_ds_name}) PREDICT y ORDER BY order GROUP BY group WINDOW 10 HORIZON 1;",
f"SELECT res.group, res.y as PREDICTED_RESULT FROM files.{test_ds_name} as source JOIN mindsdb.%s as res WHERE source.group= 'A' LIMIT 1;",
1),
("no_group_by_hor1",
f"CREATE PREDICTOR %s from files (select * from {train_ds_name}) PREDICT y ORDER BY order WINDOW 10 HORIZON 1;",
f"SELECT res.group, res.y as PREDICTED_RESULT FROM files.{test_ds_name} as source JOIN mindsdb.%s as res LIMIT 1;",
1),
("with_group_by_hor2",
f"CREATE PREDICTOR %s from files (select * from {train_ds_name}) PREDICT y ORDER BY order GROUP BY group WINDOW 10 HORIZON 2;",
f"SELECT res.group, res.y as PREDICTED_RESULT FROM files.{test_ds_name} as source JOIN mindsdb.%s as res WHERE source.group= 'A' LIMIT 2;",
2),
("no_group_by_hor2",
f"CREATE PREDICTOR %s from files (select * from {train_ds_name}) PREDICT y ORDER BY order WINDOW 10 HORIZON 2;",
f"SELECT res.group, res.y as PREDICTED_RESULT FROM files.{test_ds_name} as source JOIN mindsdb.%s as res LIMIT 2;",
2),
]
for predictor_name, create_query, select_query, res_len in params:
with self.subTest(msg=predictor_name):
print(f"\nExecuting {self._testMethodName} ({__name__}.{self.__class__.__name__}) [{predictor_name}]")
self.query(create_query % predictor_name)
self.check_predictor_readiness(predictor_name)
res = self.query(select_query % predictor_name)
self.assertTrue(len(res) == res_len, f"prediction result {res} contains more that {res_len} records")
class MySqlApiTest(unittest.TestCase, TestScenario):
@classmethod
def setUpClass(cls):
override_config = {
'integrations': {},
'api': {
"http": {"host": get_docker0_inet_ip()},
"mysql": {"host": get_docker0_inet_ip()}
}
}
run_environment(apis=['http', 'mysql'], override_config=override_config)
cls.docker_client = docker.from_env()
cls.mysql_image = 'mysql'
cls.config = json.loads(Path(CONFIG_PATH).read_text())
with open(EXTERNAL_DB_CREDENTIALS, 'rt') as f:
cls.db_creds = json.load(f)
cls.launch_query_tmpl = "mysql --host=%s --port=%s --user=%s --database=mindsdb" % (
cls.config["api"]["mysql"]["host"],
cls.config["api"]["mysql"]["port"],
cls.config["api"]["mysql"]["user"])
@classmethod
def tearDownClass(cls):
cls.docker_client.close()
def query(self, _query, encoding='utf-8'):
"""Run mysql docker container
Perform connection to mindsdb database
Execute sql request
----------------------
It is very problematic (or even impossible)
to provide sql statement as it is in 'docker run command',
that's why this action is splitted on three steps:
Save sql statement into temporary dir in .sql file
Run docker container with volume points to this temp dir,
Provide .sql file as input parameter for 'mysql' command"""
with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/test.sql", 'w') as f:
f.write(_query)
cmd = f"{self.launch_query_tmpl} < /temp/test.sql"
cmd = 'sh -c "' + cmd + '"'
res = self.docker_client.containers.run(
self.mysql_image,
command=cmd,
remove=True,
volumes={str(tmpdirname): {'bind': '/temp', 'mode': 'ro'}},
environment={"MYSQL_PWD": self.config["api"]["mysql"]["password"]})
return self.to_dicts(res.decode(encoding))
@staticmethod
def to_dicts(response):
if not response:
return {}
lines = response.splitlines()
if len(lines) < 2:
return {}
headers = tuple(lines[0].split("\t"))
res = Dlist()
for body in lines[1:]:
data = tuple(body.split("\t"))
res.append(dict(zip(headers, data)))
return res
if __name__ == "__main__":
try:
unittest.main(failfast=True, verbosity=2)
print('Tests passed!')
except Exception as e:
print(f'Tests Failed!\n{e}')
|
Combo_Dial_Safe/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 11083975 | # Combo Dial Safe
# for Adafruit Circuit Playground express
# with CircuitPython
import time
import board
import pwmio
from adafruit_motor import servo
from adafruit_circuitplayground.express import cpx
pwm = pwmio.PWMOut(board.A3, duty_cycle=2 ** 15, frequency=50)
# plug red servo wire to VOUT, brown to GND, yellow to A3
servo = servo.Servo(pwm)
cpx.pixels.brightness = 0.05 # set brightness value
def unlock_servo():
servo.angle = 180
def lock_servo():
servo.angle = 90
correct_combo = ['B', 'D', 'C'] # this is where to set the combo
entered_combo = [] # this will be used to store attempts
current_dial_position = 'X'
cpx.red_led = 1 # turn off the on-board red LED while locked
lock_servo() # lock the servo
while True:
x_float, y_float, z_float = cpx.acceleration # read acceleromter
x = int(x_float) # make int of it
y = int(y_float)
z = int(z_float)
# four simple rotation positions, A-D
# the combination entries are based on which letter is facing up
#
# A
# .___.
# . .
# D . . B
# . .
# . .
# .|_|.
# C
if x == 0 and y == 9:
current_dial_position = 'A' # used to store dial position
cpx.pixels.fill((0, 0, 255))
if x == 9 and y == 0:
current_dial_position = 'B'
cpx.pixels.fill((80, 0, 80))
if x == 0 and y == -9:
current_dial_position = 'C'
cpx.pixels.fill((255, 70, 0))
if x == -9 and y == 0:
current_dial_position = 'D'
cpx.pixels.fill((255, 255, 255))
# press the right/B button to lock the servo
if cpx.button_b: # this is a more Pythonic way to check button status
print('Locked/Reset')
cpx.red_led = 1
cpx.pixels.fill((50, 10, 10))
lock_servo()
cpx.play_tone(120, 0.4)
cpx.pixels.fill((0, 0, 0))
entered_combo = [] # clear this for next time around
time.sleep(1)
# press the left/A button to enter the current position as a combo entry
if cpx.button_a: # this means the button has been pressed
# grab the current_dial_position value and add to the list
entered_combo.append(current_dial_position)
dial_msg = 'Dial Position: ' + \
str(entered_combo[(len(entered_combo) - 1)])
print(dial_msg)
cpx.play_tone(320, 0.3) # beep
time.sleep(1) # slow down button checks
if len(entered_combo) == 3:
if entered_combo == correct_combo: # they match!
print('Correct! Unlocked.')
cpx.red_led = 0 # turn off the on board LED
cpx.pixels.fill((0, 255, 0))
unlock_servo()
cpx.play_tone(440, 1)
time.sleep(3)
entered_combo = [] # clear this for next time around
else:
print('Incorret combination.')
cpx.pixels.fill((255, 0, 0))
cpx.play_tone(180, 0.3) # beep
cpx.play_tone(130, 1) # boop
time.sleep(3)
entered_combo = [] # clear this for next time around
|
lintcode/629.minimum-spanning-tree.py | geemaple/algorithm | 177 | 11084002 | '''
Definition for a Connection
class Connection:
def __init__(self, city1, city2, cost):
self.city1, self.city2, self.cost = city1, city2, cost
'''
class UnionFind(object):
def __init__(self, n):
self.size = n
self.graph = {}
for i in range(n):
self.graph[i] = i
def find(self, node):
if self.graph[node] == node:
return node
self.graph[node] = self.find(self.graph[node])
return self.graph[node]
def query(self, a, b):
return self.find(a) == self.find(b)
def connect(self, a, b):
root_a = self.find(a)
root_b = self.find(b)
if root_a != root_b:
self.size -= 1
self.graph[root_a] = root_b
def all_connected(self):
return self.size == 1
def comp(a, b):
if a.cost != b.cost:
return a.cost - b.cost
if a.city1 != b.city1:
if a.city1 < b.city1:
return -1
else:
return 1
if a.city2 == b.city2:
return 0
elif a.city2 < b.city2:
return -1
else:
return 1
class Solution:
# @param {Connection[]} connections given a list of connections
# include two cities and cost
# @return {Connection[]} a list of connections from results
def lowestCost(self, connections):
# Write your code here
connections.sort(cmp=comp)
cityMap = {}
count = 0
for conn in connections:
if conn.city1 not in cityMap:
cityMap[conn.city1] = count
count += 1
if conn.city2 not in cityMap:
cityMap[conn.city2] = count
count += 1
uf = UnionFind(count)
res = []
for conn in connections:
city1 = cityMap[conn.city1]
city2 = cityMap[conn.city2]
if not uf.query(city1, city2):
uf.connect(city1, city2)
res.append(conn)
return res if uf.all_connected() else [] |
src/proxy.py | LIVEauctioneers/aws-lambda-go-shim | 864 | 11084009 | <gh_stars>100-1000
#
# Copyright 2017 Alsanium, SAS. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import runtime
def dump(obj):
if hasattr(obj, '__slots__'):
return {slot: getattr(obj, slot) for slot in obj.__slots__}
return obj.__dict__
class Proxy(object):
def __getattr__(self, name):
if name == "init":
return lambda: None
runtime.lookup(name)
return self._handle
def _handle(self, evt, ctx):
return json.loads(runtime.handle(
json.dumps(evt),
json.dumps(ctx, default=dump),
json.dumps({k: v for k, v in ((k, os.getenv(k)) for k in (
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_SESSION_TOKEN",
"AWS_SECURITY_TOKEN",
"_X_AMZN_TRACE_ID",
)) if v}),
ctx.log, ctx.get_remaining_time_in_millis))
|
petl/test/io/test_db_server.py | vishalbelsare/petl | 495 | 11084043 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import sys
import logging
import petl as etl
from petl.test.helpers import ieq
logger = logging.getLogger(__name__)
debug = logger.debug
def _test_dbo(write_dbo, read_dbo=None):
if read_dbo is None:
read_dbo = write_dbo
expect_empty = (('foo', 'bar'),)
expect = (('foo', 'bar'),
('a', 1),
('b', 2))
expect_appended = (('foo', 'bar'),
('a', 1),
('b', 2),
('a', 1),
('b', 2))
actual = etl.fromdb(read_dbo, 'SELECT * FROM test')
debug('verify empty to start with...')
debug(etl.look(actual))
ieq(expect_empty, actual)
debug('write some data and verify...')
etl.todb(expect, write_dbo, 'test')
debug(etl.look(actual))
ieq(expect, actual)
debug('append some data and verify...')
etl.appenddb(expect, write_dbo, 'test')
debug(etl.look(actual))
ieq(expect_appended, actual)
debug('overwrite and verify...')
etl.todb(expect, write_dbo, 'test')
debug(etl.look(actual))
ieq(expect, actual)
debug('cut, overwrite and verify')
etl.todb(etl.cut(expect, 'bar', 'foo'), write_dbo, 'test')
debug(etl.look(actual))
ieq(expect, actual)
debug('cut, append and verify')
etl.appenddb(etl.cut(expect, 'bar', 'foo'), write_dbo, 'test')
debug(etl.look(actual))
ieq(expect_appended, actual)
debug('try a single row')
etl.todb(etl.head(expect, 1), write_dbo, 'test')
debug(etl.look(actual))
ieq(etl.head(expect, 1), actual)
def _test_with_schema(dbo, schema):
expect = (('foo', 'bar'),
('a', 1),
('b', 2))
expect_appended = (('foo', 'bar'),
('a', 1),
('b', 2),
('a', 1),
('b', 2))
actual = etl.fromdb(dbo, 'SELECT * FROM test')
print('write some data and verify...')
etl.todb(expect, dbo, 'test', schema=schema)
ieq(expect, actual)
print(etl.look(actual))
print('append some data and verify...')
etl.appenddb(expect, dbo, 'test', schema=schema)
ieq(expect_appended, actual)
print(etl.look(actual))
def _test_unicode(dbo):
expect = ((u'name', u'id'),
(u'Արամ Խաչատրյան', 1),
(u'<NAME>', 2),
(u'<NAME>', 3),
(u'章子怡', 4),
)
actual = etl.fromdb(dbo, 'SELECT * FROM test_unicode')
print('write some data and verify...')
etl.todb(expect, dbo, 'test_unicode')
ieq(expect, actual)
print(etl.look(actual))
def _setup_mysql(dbapi_connection):
# setup table
cursor = dbapi_connection.cursor()
# deal with quote compatibility
cursor.execute('SET SQL_MODE=ANSI_QUOTES')
cursor.execute('DROP TABLE IF EXISTS test')
cursor.execute('CREATE TABLE test (foo TEXT, bar INT)')
cursor.execute('DROP TABLE IF EXISTS test_unicode')
cursor.execute('CREATE TABLE test_unicode (name TEXT, id INT) '
'CHARACTER SET utf8')
cursor.close()
dbapi_connection.commit()
def _setup_postgresql(dbapi_connection):
# setup table
cursor = dbapi_connection.cursor()
cursor.execute('DROP TABLE IF EXISTS test')
cursor.execute('CREATE TABLE test (foo TEXT, bar INT)')
cursor.execute('DROP TABLE IF EXISTS test_unicode')
# assume character encoding UTF-8 already set at database level
cursor.execute('CREATE TABLE test_unicode (name TEXT, id INT)')
cursor.close()
dbapi_connection.commit()
host, user, password, database = 'localhost', 'petl', 'test', 'petl'
try:
import pymysql
import sqlalchemy
pymysql.connect(host=host,
user=user,
password=password,
database=database)
except Exception as e:
print('SKIP pymysql tests: %s' % e, file=sys.stderr)
else:
def test_mysql():
import pymysql
connect = pymysql.connect
# assume database already created
dbapi_connection = connect(host=host,
user=user,
password=password,
database=database)
# exercise using a dbapi_connection
_setup_mysql(dbapi_connection)
_test_dbo(dbapi_connection)
# exercise using a dbapi_cursor
_setup_mysql(dbapi_connection)
dbapi_cursor = dbapi_connection.cursor()
_test_dbo(dbapi_cursor)
dbapi_cursor.close()
# exercise sqlalchemy dbapi_connection
_setup_mysql(dbapi_connection)
from sqlalchemy import create_engine
sqlalchemy_engine = create_engine('mysql+pymysql://%s:%s@%s/%s' %
(user, password, host, database))
sqlalchemy_connection = sqlalchemy_engine.connect()
sqlalchemy_connection.execute('SET SQL_MODE=ANSI_QUOTES')
_test_dbo(sqlalchemy_connection)
sqlalchemy_connection.close()
# exercise sqlalchemy session
_setup_mysql(dbapi_connection)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=sqlalchemy_engine)
sqlalchemy_session = Session()
_test_dbo(sqlalchemy_session)
sqlalchemy_session.close()
# exercise sqlalchemy engine
_setup_mysql(dbapi_connection)
sqlalchemy_engine2 = create_engine('mysql+pymysql://%s:%s@%s/%s' %
(user, password, host, database))
sqlalchemy_engine2.execute('SET SQL_MODE=ANSI_QUOTES')
_test_dbo(sqlalchemy_engine2)
sqlalchemy_engine2.dispose()
# other exercises
_test_with_schema(dbapi_connection, database)
utf8_connection = connect(host=host, user=user,
password=password,
database=database,
charset='utf8')
utf8_connection.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
_test_unicode(utf8_connection)
try:
import MySQLdb
import sqlalchemy
MySQLdb.connect(host=host,
user=user,
passwd=password,
db=database)
except Exception as e:
print('SKIP MySQLdb tests: %s' % e, file=sys.stderr)
else:
def test_mysql():
import MySQLdb
connect = MySQLdb.connect
# assume database already created
dbapi_connection = connect(host=host,
user=user,
passwd=password,
db=database)
# exercise using a dbapi_connection
_setup_mysql(dbapi_connection)
_test_dbo(dbapi_connection)
# exercise using a dbapi_cursor
_setup_mysql(dbapi_connection)
dbapi_cursor = dbapi_connection.cursor()
_test_dbo(dbapi_cursor)
dbapi_cursor.close()
# exercise sqlalchemy dbapi_connection
_setup_mysql(dbapi_connection)
from sqlalchemy import create_engine
sqlalchemy_engine = create_engine('mysql+mysqldb://%s:%s@%s/%s' %
(user, password, host, database))
sqlalchemy_connection = sqlalchemy_engine.connect()
sqlalchemy_connection.execute('SET SQL_MODE=ANSI_QUOTES')
_test_dbo(sqlalchemy_connection)
sqlalchemy_connection.close()
# exercise sqlalchemy session
_setup_mysql(dbapi_connection)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=sqlalchemy_engine)
sqlalchemy_session = Session()
_test_dbo(sqlalchemy_session)
sqlalchemy_session.close()
# other exercises
_test_with_schema(dbapi_connection, database)
utf8_connection = connect(host=host, user=user,
passwd=password,
db=database,
charset='utf8')
utf8_connection.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
_test_unicode(utf8_connection)
try:
import psycopg2
import sqlalchemy
psycopg2.connect(
'host=%s dbname=%s user=%s password=%s'
% (host, database, user, password)
)
except Exception as e:
print('SKIP psycopg2 tests: %s' % e, file=sys.stderr)
else:
def test_postgresql():
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
# assume database already created
dbapi_connection = psycopg2.connect(
'host=%s dbname=%s user=%s password=%s'
% (host, database, user, password)
)
# exercise using a dbapi_connection
_setup_postgresql(dbapi_connection)
_test_dbo(dbapi_connection)
# exercise using a dbapi_cursor
_setup_postgresql(dbapi_connection)
dbapi_cursor = dbapi_connection.cursor()
_test_dbo(dbapi_cursor)
dbapi_cursor.close()
# exercise sqlalchemy dbapi_connection
_setup_postgresql(dbapi_connection)
from sqlalchemy import create_engine
sqlalchemy_engine = create_engine('postgresql+psycopg2://%s:%s@%s/%s' %
(user, password, host, database))
sqlalchemy_connection = sqlalchemy_engine.connect()
_test_dbo(sqlalchemy_connection)
sqlalchemy_connection.close()
# exercise sqlalchemy session
_setup_postgresql(dbapi_connection)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=sqlalchemy_engine)
sqlalchemy_session = Session()
_test_dbo(sqlalchemy_session)
sqlalchemy_session.close()
# other exercises
_test_dbo(dbapi_connection,
lambda: dbapi_connection.cursor(name='arbitrary'))
_test_with_schema(dbapi_connection, 'public')
_test_unicode(dbapi_connection)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.