max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/data/callbacks.py | jamesdarke/gwader | 309 | 11132580 | def on_download_start(api, gid):
print("started " + gid)
def on_download_pause(api, gid):
print("paused " + gid)
|
tests/unit/portcon/order_sizer/test_long_short.py | calumrussell/qstrader | 2,220 | 11132582 | <gh_stars>1000+
from unittest.mock import Mock
import pandas as pd
import pytest
import pytz
from qstrader.portcon.order_sizer.long_short import (
LongShortLeveragedOrderSizer
)
@pytest.mark.parametrize(
"gross_leverage,expected",
[
(-1.0, None),
(0.0, None),
(0.01, 0.01),
(0.99, 0.99),
(1.0, 1.0),
(2.0, 2.0),
(5.0, 5.0),
]
)
def test_check_set_gross_leverage(gross_leverage, expected):
"""
Checks that the gross leverage falls into the appropriate
range and raises otherwise.
"""
broker = Mock()
broker_portfolio_id = "1234"
data_handler = Mock()
if expected is None:
with pytest.raises(ValueError):
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
else:
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
assert order_sizer.gross_leverage == expected
@pytest.mark.parametrize(
"weights,gross_leverage,expected",
[
(
{'EQ:ABC': 0.2, 'EQ:DEF': 0.6},
1.0,
{'EQ:ABC': 0.25, 'EQ:DEF': 0.75}
),
(
{'EQ:ABC': 0.5, 'EQ:DEF': 0.5},
1.0,
{'EQ:ABC': 0.5, 'EQ:DEF': 0.5}
),
(
{'EQ:ABC': 0.01, 'EQ:DEF': 0.01},
1.0,
{'EQ:ABC': 0.5, 'EQ:DEF': 0.5}
),
(
{'EQ:ABC': 0.2, 'EQ:DEF': 0.6},
2.0,
{'EQ:ABC': 0.5, 'EQ:DEF': 1.5}
),
(
{'EQ:ABC': 0.2, 'EQ:DEF': 0.6},
0.5,
{'EQ:ABC': 0.125, 'EQ:DEF': 0.375}
),
(
{'EQ:ABC': 0.1, 'EQ:DEF': 0.3, 'EQ:GHI': 0.02, 'EQ:JKL': 0.8},
1.0,
{'EQ:ABC': 0.1 / 1.22, 'EQ:DEF': 0.3 / 1.22, 'EQ:GHI': 0.02 / 1.22, 'EQ:JKL': 0.8 / 1.22}
),
(
{'EQ:ABC': 0.1, 'EQ:DEF': 0.3, 'EQ:GHI': 0.02, 'EQ:JKL': 0.8},
3.0,
{'EQ:ABC': 0.3 / 1.22, 'EQ:DEF': 0.9 / 1.22, 'EQ:GHI': 0.06 / 1.22, 'EQ:JKL': 2.4 / 1.22}
),
(
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0},
1.0,
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0}
),
(
{'EQ:ABC': -0.2, 'EQ:DEF': 0.6},
1.0,
{'EQ:ABC': -0.25, 'EQ:DEF': 0.75}
),
(
{'EQ:ABC': -0.2, 'EQ:DEF': 0.6},
2.0,
{'EQ:ABC': -0.5, 'EQ:DEF': 1.5}
),
(
{'EQ:ABC': -0.1, 'EQ:DEF': 0.3, 'EQ:GHI': 0.02, 'EQ:JKL': -0.8},
3.0,
{'EQ:ABC': -0.3 / 1.22, 'EQ:DEF': 0.9 / 1.22, 'EQ:GHI': 0.06 / 1.22, 'EQ:JKL': -2.4 / 1.22}
)
]
)
def test_normalise_weights(weights, gross_leverage, expected):
"""
Checks that the _normalise_weights method rescales the weights
for the correct gross exposure and leverage.
"""
broker = Mock()
broker_portfolio_id = "1234"
data_handler = Mock()
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
if expected is None:
with pytest.raises(ValueError):
result = order_sizer._normalise_weights(weights)
else:
result = order_sizer._normalise_weights(weights)
assert result == pytest.approx(expected)
@pytest.mark.parametrize(
"total_equity,gross_leverage,weights,asset_prices,expected",
[
(
1e6,
1.0,
{'EQ:SPY': 0.5, 'EQ:AGG': 0.5},
{'EQ:SPY': 250.0, 'EQ:AGG': 150.0},
{'EQ:SPY': {'quantity': 2000}, 'EQ:AGG': {'quantity': 3333}}
),
(
325000.0,
1.5,
{'EQ:SPY': 0.6, 'EQ:AGG': 0.4},
{'EQ:SPY': 352.0, 'EQ:AGG': 178.0},
{'EQ:SPY': {'quantity': 830}, 'EQ:AGG': {'quantity': 1095}}
),
(
687523.0,
2.0,
{'EQ:SPY': 0.05, 'EQ:AGG': 0.328, 'EQ:TLT': 0.842, 'EQ:GLD': 0.9113},
{'EQ:SPY': 1036.23, 'EQ:AGG': 456.55, 'EQ:TLT': 987.63, 'EQ:GLD': 14.76},
{
'EQ:SPY': {'quantity': 31},
'EQ:AGG': {'quantity': 463},
'EQ:TLT': {'quantity': 550},
'EQ:GLD': {'quantity': 39833},
}
),
(
687523.0,
2.0,
{'EQ:SPY': 0.05, 'EQ:AGG': -0.328, 'EQ:TLT': -0.842, 'EQ:GLD': 0.9113},
{'EQ:SPY': 1036.23, 'EQ:AGG': 456.55, 'EQ:TLT': 987.63, 'EQ:GLD': 14.76},
{
'EQ:SPY': {'quantity': 31},
'EQ:AGG': {'quantity': -463},
'EQ:TLT': {'quantity': -550},
'EQ:GLD': {'quantity': 39833},
}
)
]
)
def test_call(total_equity, gross_leverage, weights, asset_prices, expected):
"""
Checks that the __call__ method correctly outputs the target
portfolio from a given set of weights and a timestamp.
"""
dt = pd.Timestamp('2019-01-01 15:00:00', tz=pytz.utc)
broker_portfolio_id = "1234"
broker = Mock()
broker.get_portfolio_total_equity.return_value = total_equity
broker.fee_model.calc_total_cost.return_value = 0.0
data_handler = Mock()
data_handler.get_asset_latest_ask_price.side_effect = lambda self, x: asset_prices[x]
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
result = order_sizer(dt, weights)
assert result == expected
|
validators/i18n/es.py | vphilippon/validators | 586 | 11132603 | # -*- coding: utf-8 -*-
from validators.utils import validator
__all__ = ('es_cif', 'es_nif', 'es_nie', 'es_doi',)
def nif_nie_validation(doi, number_by_letter, special_cases):
"""
Validate if the doi is a NIF or a NIE.
:param doi: DOI to validate.
:return: boolean if it's valid.
"""
doi = doi.upper()
if doi in special_cases:
return False
table = 'TRWAGMYFPDXBNJZSQVHLCKE'
if len(doi) != 9:
return False
control = doi[8]
# If it is not a DNI, convert the first letter to the corresponding
# digit
numbers = number_by_letter.get(doi[0], doi[0]) + doi[1:8]
return numbers.isdigit() and control == table[int(numbers) % 23]
@validator
def es_cif(doi):
"""
Validate a Spanish CIF.
Each company in Spain prior to 2008 had a distinct CIF and has been
discontinued. For more information see `wikipedia.org/cif`_.
The new replacement is to use NIF for absolutely everything. The issue is
that there are "types" of NIFs now: company, person[citizen vs recident]
all distinguished by the first character of the DOI. For this reason we
will continue to call CIF NIFs that are used for companies.
This validator is based on `generadordni.es`_.
.. _generadordni.es:
https://generadordni.es/
.. _wikipedia.org/cif:
https://es.wikipedia.org/wiki/C%C3%B3digo_de_identificaci%C3%B3n_fiscal
Examples::
>>> es_cif('B25162520')
True
>>> es_cif('B25162529')
ValidationFailure(func=es_cif, args=...)
.. versionadded:: 0.13.0
:param doi: DOI to validate
"""
doi = doi.upper()
if len(doi) != 9:
return False
table = 'JABCDEFGHI'
first_chr = doi[0]
doi_body = doi[1:8]
control = doi[8]
if not doi_body.isdigit():
return False
odd_result = 0
even_result = 0
for index, char in enumerate(doi_body):
if index % 2 == 0:
# Multiply each each odd position doi digit by 2 and sum it all
# together
odd_result += sum(map(int, str(int(char) * 2)))
else:
even_result += int(char)
res = (10 - (even_result + odd_result) % 10) % 10
if first_chr in 'ABEH': # Number type
return str(res) == control
elif first_chr in 'PSQW': # Letter type
return table[res] == control
elif first_chr not in 'CDFGJNRUV':
return False
return control == str(res) or control == table[res]
@validator
def es_nif(doi):
"""
Validate a Spanish NIF.
Each entity, be it person or company in Spain has a distinct NIF. Since
we've designated CIF to be a company NIF, this NIF is only for person.
For more information see `wikipedia.org/nif`_.
This validator is based on `generadordni.es`_.
.. _generadordni.es:
https://generadordni.es/
.. _wikipedia.org/nif:
https://es.wikipedia.org/wiki/N%C3%BAmero_de_identificaci%C3%B3n_fiscal
Examples::
>>> es_nif('26643189N')
True
>>> es_nif('26643189X')
ValidationFailure(func=es_nif, args=...)
.. versionadded:: 0.13.0
:param doi: DOI to validate
"""
number_by_letter = {'L': '0', 'M': '0', 'K': '0'}
special_cases = ['X0000000T', '00000000T', '00000001R']
return nif_nie_validation(doi, number_by_letter, special_cases)
@validator
def es_nie(doi):
"""
Validate a Spanish NIE.
The NIE is a tax identification number in Spain, known in Spanish as the
NIE, or more formally the Número de identidad de extranjero. For more
information see `wikipedia.org/nie`_.
This validator is based on `generadordni.es`_.
.. _generadordni.es:
https://generadordni.es/
.. _wikipedia.org/nie:
https://es.wikipedia.org/wiki/N%C3%BAmero_de_identidad_de_extranjero
Examples::
>>> es_nie('X0095892M')
True
>>> es_nie('X0095892X')
ValidationFailure(func=es_nie, args=...)
.. versionadded:: 0.13.0
:param doi: DOI to validate
"""
number_by_letter = {'X': '0', 'Y': '1', 'Z': '2'}
special_cases = ['X0000000T']
# NIE must must start with X Y or Z
if not doi or doi[0] not in number_by_letter.keys():
return False
return nif_nie_validation(doi, number_by_letter, special_cases)
@validator
def es_doi(doi):
"""
Validate a Spanish DOI.
A DOI in spain is all NIF / CIF / NIE / DNI -- a digital ID. For more
information see `wikipedia.org/doi`_.
This validator is based on `generadordni.es`_.
.. _generadordni.es:
https://generadordni.es/
.. _wikipedia.org/doi:
https://es.wikipedia.org/wiki/Identificador_de_objeto_digital
Examples::
>>> es_doi('X0095892M')
True
>>> es_doi('X0095892X')
ValidationFailure(func=es_doi, args=...)
.. versionadded:: 0.13.0
:param doi: DOI to validate
"""
return es_nie(doi) or es_nif(doi) or es_cif(doi)
|
tests/test_pipelines_fill_mask.py | liminghao1630/transformers | 8,028 | 11132651 | <reponame>liminghao1630/transformers<filename>tests/test_pipelines_fill_mask.py
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class FillMaskPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_MASKED_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
@require_tf
def test_small_model_tf(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf")
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " acc<PASSWORD>"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
],
)
@require_torch
def test_small_model_pt(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt")
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
],
)
outputs = unmasker("My name is <mask> <mask>", top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
],
)
@slow
@require_torch
def test_large_model_pt(self):
unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt")
self.run_large_test(unmasker)
@slow
@require_tf
def test_large_model_tf(self):
unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf")
self.run_large_test(unmasker)
def run_large_test(self, unmasker):
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs),
[
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs),
[
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs),
[
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
],
)
@require_torch
def test_model_no_pad_pt(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt")
unmasker.tokenizer.pad_token_id = None
unmasker.tokenizer.pad_token = None
self.run_pipeline_test(unmasker, [])
@require_tf
def test_model_no_pad_tf(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf")
unmasker.tokenizer.pad_token_id = None
unmasker.tokenizer.pad_token = None
self.run_pipeline_test(unmasker, [])
def get_test_pipeline(self, model, tokenizer, feature_extractor):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
examples = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def run_pipeline_test(self, fill_masker, examples):
tokenizer = fill_masker.tokenizer
model = fill_masker.model
outputs = fill_masker(
f"This is a {tokenizer.mask_token}",
)
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
outputs = fill_masker([f"This is a {tokenizer.mask_token}"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."])
self.assertEqual(
outputs,
[
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
],
)
with self.assertRaises(ValueError):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(PipelineException):
fill_masker("This is")
self.run_test_top_k(model, tokenizer)
self.run_test_targets(model, tokenizer)
self.run_test_top_k_targets(model, tokenizer)
self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer)
self.fill_mask_with_multiple_masks(model, tokenizer)
def run_test_targets(self, model, tokenizer):
vocab = tokenizer.get_vocab()
targets = list(sorted(vocab.keys()))[:2]
# Pipeline argument
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets)
outputs = fill_masker(f"This is a {tokenizer.mask_token}")
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
target_ids = {vocab[el] for el in targets}
self.assertEqual(set(el["token"] for el in outputs), target_ids)
self.assertEqual(set(el["token_str"] for el in outputs), set(targets))
# Call argument
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets)
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
target_ids = {vocab[el] for el in targets}
self.assertEqual(set(el["token"] for el in outputs), target_ids)
self.assertEqual(set(el["token_str"] for el in outputs), set(targets))
# Score equivalence
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets)
tokens = [top_mask["token_str"] for top_mask in outputs]
scores = [top_mask["score"] for top_mask in outputs]
unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens)
target_scores = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(scores), nested_simplify(target_scores))
# Raises with invalid
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""])
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[])
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="")
def run_test_top_k(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2)
outputs = fill_masker(f"This is a {tokenizer.mask_token}")
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2)
self.assertEqual(
outputs2,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2))
def run_test_top_k_targets(self, model, tokenizer):
vocab = tokenizer.get_vocab()
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
# top_k=2, ntargets=3
targets = list(sorted(vocab.keys()))[:3]
outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets)
# If we use the most probably targets, and filter differently, we should still
# have the same results
targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)]
outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2)
# They should yield exactly the same result
self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2))
def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
vocab = tokenizer.get_vocab()
# String duplicates + id duplicates
targets = list(sorted(vocab.keys()))[:3]
targets = [targets[0], targets[1], targets[0], targets[2], targets[1]]
outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(outputs), 3)
def fill_mask_with_multiple_masks(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2
)
self.assertEqual(
outputs,
[
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
],
)
|
lib_pypy/_ssl_build.py | nanjekyejoannah/pypy | 333 | 11132655 | <reponame>nanjekyejoannah/pypy
import sys
from _cffi_ssl import _cffi_src
sys.modules['_cffi_src'] = _cffi_src
#
from _cffi_ssl._cffi_src.build_openssl import (build_ffi_for_binding,
_get_openssl_libraries, extra_link_args, compiler_type)
if sys.platform == "win32":
pypy_win32_extra = ["pypy_win32_extra"]
else:
pypy_win32_extra = []
libraries=_get_openssl_libraries(sys.platform)
ffi = build_ffi_for_binding(
module_name="_pypy_openssl",
module_prefix="_cffi_src.openssl.",
modules=[
# This goes first so we can define some cryptography-wide symbols.
"cryptography",
"aes",
"asn1",
"bignum",
"bio",
"cmac",
"conf",
"crypto",
"ct",
"dh",
"dsa",
"ec",
"ecdh",
"ecdsa",
"engine",
"err",
"evp",
"fips",
"hmac",
"nid",
"objects",
"ocsp",
"opensslv",
"osrandom_engine",
"pem",
"pkcs12",
"rand",
"rsa",
"ssl",
"x509",
"x509name",
"x509v3",
"x509_vfy",
"pkcs7",
"callbacks",
] + pypy_win32_extra,
libraries=libraries,
extra_link_args=extra_link_args(compiler_type()),
)
if __name__ == '__main__':
ffi.compile(verbose=True)
if sys.platform == 'win32':
# copy dlls from externals to the pwd
# maybe we should link to libraries instead of the dlls
# to avoid this mess
import os, glob, shutil
path_parts = os.environ['PATH'].split(';')
candidates = [x for x in path_parts if 'externals' in x]
def copy_from_path(dll):
for c in candidates:
files = glob.glob(os.path.join(c, dll + '*.dll'))
if files:
for fname in files:
print('copying', fname)
shutil.copy(fname, '.')
if candidates:
for lib in libraries:
copy_from_path(lib)
|
pgoapi/protos/pogoprotos/data/quests/quest_goal_pb2.py | aroo135/pgoapi | 842 | 11132661 | <filename>pgoapi/protos/pogoprotos/data/quests/quest_goal_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/quests/quest_goal.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/quests/quest_goal.proto',
package='pogoprotos.data.quests',
syntax='proto3',
serialized_pb=_b('\n\'pogoprotos/data/quests/quest_goal.proto\x12\x16pogoprotos.data.quests\"\x8c\x01\n\tQuestGoal\x12\x41\n\x0boption_type\x18\x01 \x01(\x0e\x32,.pogoprotos.data.quests.QuestGoal.OptionType\x12\x0e\n\x06target\x18\x02 \x01(\x05\",\n\nOptionType\x12\t\n\x05UNSET\x10\x00\x12\x13\n\x0fOF_POKEMON_TYPE\x10\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_QUESTGOAL_OPTIONTYPE = _descriptor.EnumDescriptor(
name='OptionType',
full_name='pogoprotos.data.quests.QuestGoal.OptionType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OF_POKEMON_TYPE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=164,
serialized_end=208,
)
_sym_db.RegisterEnumDescriptor(_QUESTGOAL_OPTIONTYPE)
_QUESTGOAL = _descriptor.Descriptor(
name='QuestGoal',
full_name='pogoprotos.data.quests.QuestGoal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='option_type', full_name='pogoprotos.data.quests.QuestGoal.option_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='pogoprotos.data.quests.QuestGoal.target', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_QUESTGOAL_OPTIONTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=208,
)
_QUESTGOAL.fields_by_name['option_type'].enum_type = _QUESTGOAL_OPTIONTYPE
_QUESTGOAL_OPTIONTYPE.containing_type = _QUESTGOAL
DESCRIPTOR.message_types_by_name['QuestGoal'] = _QUESTGOAL
QuestGoal = _reflection.GeneratedProtocolMessageType('QuestGoal', (_message.Message,), dict(
DESCRIPTOR = _QUESTGOAL,
__module__ = 'pogoprotos.data.quests.quest_goal_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.quests.QuestGoal)
))
_sym_db.RegisterMessage(QuestGoal)
# @@protoc_insertion_point(module_scope)
|
spartan/expr/mathematics.py | GabrielWen/spartan | 156 | 11132665 | '''
Basic numpy style mathematics operations on arrays.
These include --
*
'''
import sys
import __builtin__
import numpy as np
import scipy.sparse as sp
from .operator.map import map, map2
from .operator.map_with_location import map_with_location
from .operator.reduce import reduce
from .operator.ndarray import ndarray
from .operator.optimize import disable_parakeet, not_idempotent
from .. import util, blob_ctx
from ..array import extent
from ..array.extent import index_for_reduction, shapes_match
from ..util import Assert
def add(a, b):
return map((a, b), fn=np.add)
def reciprocal(a):
return map(a, fn=np.reciprocal)
def negative(a):
return map(a, fn=np.negative)
def sub(a, b):
return map((a, b), fn=np.subtract)
def _rsub(a, b):
return map((b, a), fn=np.sub)
def _multiply(a, b):
if sp.issparse(a):
return a.multiply(b)
else:
return np.multiply(a, b)
def multiply(a, b):
return map((a, b), fn=_multiply)
def _divide(a, b):
if sp.issparse(a):
return a.divide(b)
else:
return np.divide(a, b)
def divide(a, b):
return map((a, b), fn=_divide)
def _rdivide(a, b):
return divide(b, a)
def true_divide(a, b):
return map((a, b), fn=np.true_divide)
def floor_divide(a, b):
return map((a, b), fn=np.floor_divide)
def fmod(a, b):
return map((a, b), fn=np.fmod)
def mod(a, b):
return map((a, b), fn=np.mod)
def remainder(a, b):
return remainder((a, b), fn=np.remainder)
def power(a, b):
return map((a, b), fn=np.power)
def maximum(a, b):
return map((a, b), np.maximum)
def minimum(a, b):
return map((a, b), np.minimum)
def ln(v):
return map(v, fn=np.log)
def log(v):
return map(v, fn=np.log)
def exp(v):
return map(v, fn=np.exp)
def square(v):
return map(v, fn=np.square)
def sqrt(v):
return map(v, fn=np.sqrt)
def abs(v):
return map(v, fn=np.abs)
def _sum_local(ex, data, axis):
return data.sum(axis)
def sum(x, axis=None, tile_hint=None):
'''
Sum ``x`` over ``axis``.
:param x: The array to sum.
:param axis: Either an integer or ``None``.
'''
return reduce(x,
axis=axis,
dtype_fn=lambda input: input.dtype,
local_reduce_fn=_sum_local,
accumulate_fn=np.add,
tile_hint=tile_hint)
def _prod_local(ex, data, axis):
return data.prod(axis)
def _prod_dtype_fn(input):
if input.dtype == np.int32:
return np.dtype(np.int64)
else:
return input.dtype
def prod(x, axis=None, tile_hint=None):
'''
Prod ``x`` over ``axis``.
:param x: The array to product.
:param axis: Either an integer or ``None``.
'''
return reduce(x,
axis=axis,
dtype_fn=_prod_dtype_fn,
local_reduce_fn=_prod_local,
accumulate_fn=np.multiply,
tile_hint=tile_hint)
|
sunpy/map/__init__.py | johan12345/sunpy | 628 | 11132689 | """
SunPy Map
isort:skip_file
"""
from sunpy.map.mapbase import GenericMap
from sunpy.map import sources
from sunpy.map.header_helper import *
from sunpy.map.map_factory import Map
from sunpy.map.maputils import *
from .compositemap import CompositeMap
from .mapsequence import MapSequence
|
tests/test_paginators.py | bduzik/api-client | 112 | 11132690 | from unittest.mock import Mock
import pytest
from apiclient import APIClient, JsonRequestFormatter, JsonResponseHandler, paginated
from apiclient.authentication_methods import NoAuthentication
from apiclient.paginators import set_strategy
from apiclient.request_strategies import BaseRequestStrategy, RequestStrategy
from tests.helpers import client_factory
def next_page_param(response, previous_page_params):
if response["next"]:
return {"page": response["next"]}
def next_page_url(response, previous_page_url):
if response["next"]:
return response["next"]
class QueryPaginatedClient(APIClient):
def __init__(self, base_url, **kwargs):
self.base_url = base_url
super().__init__(**kwargs)
@paginated(by_query_params=next_page_param)
def make_read_request(self):
return self.get(endpoint=self.base_url)
class UrlPaginatedClient(APIClient):
def __init__(self, base_url, **kwargs):
self.base_url = base_url
super().__init__(**kwargs)
@paginated(by_url=next_page_url)
def make_read_request(self):
return self.get(endpoint=self.base_url)
def test_query_parameter_pagination(mock_requests):
# Given the response is over three pages
response_data = [
{"page1": "data", "next": "page2"},
{"page2": "data", "next": "page3"},
{"page3": "data", "next": None},
]
mock_requests.get(
"mock://testserver.com",
[
{"json": {"page1": "data", "next": "page2"}, "status_code": 200},
{"json": {"page2": "data", "next": "page3"}, "status_code": 200},
{"json": {"page3": "data", "next": None}, "status_code": 200},
],
)
# mock_requests.get.side_effect = [build_response(json=page_data) for page_data in response_data]
client = QueryPaginatedClient(
base_url="mock://testserver.com",
authentication_method=NoAuthentication(),
response_handler=JsonResponseHandler,
request_formatter=JsonRequestFormatter,
)
# And the client has been set up with the SinglePagePaginator
original_strategy = client.get_request_strategy()
assert isinstance(original_strategy, RequestStrategy)
# When I call the client method
response = list(client.make_read_request())
# Then two requests are made to get both pages
assert mock_requests.call_count == 3
assert len(response) == 3
assert response == response_data
# And the clients paginator is reset back to the original.
assert client.get_request_strategy() == original_strategy
def test_url_parameter_pagination(mock_requests):
# Given the response is over two pages
mock_requests.get(
"mock://testserver.com",
json={"page1": "data", "next": "mock://testserver.com/page2"},
status_code=200,
)
mock_requests.get("mock://testserver.com/page2", json={"page2": "data", "next": None}, status_code=200)
response_data = [
{"page1": "data", "next": "mock://testserver.com/page2"},
{"page2": "data", "next": None},
]
client = UrlPaginatedClient(
base_url="mock://testserver.com",
authentication_method=NoAuthentication(),
response_handler=JsonResponseHandler,
request_formatter=JsonRequestFormatter,
)
# And the client has been set up with the SinglePagePaginator
original_strategy = client.get_request_strategy()
assert isinstance(original_strategy, RequestStrategy)
# When I call the client method
response = list(client.make_read_request())
# Then two requests are made to get both pages
assert mock_requests.call_count == 2
assert response == response_data
# And the clients paginator is reset back to the original.
assert client.get_request_strategy() == original_strategy
def test_set_strategy_changes_strategy_on_copy_of_client_when_in_context():
client = client_factory()
original_strategy = client.get_request_strategy()
new_strategy = Mock(spec=BaseRequestStrategy)
with set_strategy(client, new_strategy) as temporary_client:
assert client.get_request_strategy() == original_strategy
assert temporary_client.get_request_strategy() == new_strategy
assert client.get_request_strategy() == original_strategy
def test_context_manager_resets_request_strategy_when_error():
client = client_factory()
original_strategy = client.get_request_strategy()
new_strategy = Mock(spec=BaseRequestStrategy)
raises_when_called = Mock(side_effect=ValueError("Something went wrong"))
with pytest.raises(ValueError):
with set_strategy(client, new_strategy):
raises_when_called()
assert client.get_request_strategy() == original_strategy
|
glumpy/transforms/panzoom.py | antoineMoPa/glumpy | 1,074 | 11132703 | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import library
from . transform import Transform
class PanZoom(Transform):
"""
2D pan & zoom transform.
:param float aspect:
Indicate what is the aspect ratio of the object displayed. This is
necessary to convert pixel drag move in oject space coordinates.
Default is None.
:param float,float pan:
Initial translation. Default is (0,0)
:param float,float zoom:
Initial zoom level. Default is (1,1)
:param float zoom_min:
Minimal zoom level. Default is 0.01
:param float zoom_max:
Minimal zoom level. Default is 1000.0
The panzoom transform allow to translate and scale a scene in the window
space coordinate (2D). This means that whatever point you grab on the
screen, it should remains under the mouse pointer. Zoom is realized using
the mouse scroll and is always centered on the mouse pointer.
The transform is connected to the following events:
* ``on_attach``: Transform initialization
* ``on_resize``: Tranform update to maintain aspect
* ``on_mouse_scroll``: Zoom in & out (user action)
* ``on_mouse_grab``: Pan (user action)
**Usage example**:
.. code:: python
vertex = '''
attribute vec2 position;
void main()
{
gl_Position = <transform>(vec4(position, 0.0, 1.0));
} '''
...
window = app.Window(width=800, height=800)
program = gloo.Program(vertex, fragment, count=4)
...
program['transform'] = PanZoom(aspect=1)
window.attach(program['transform'])
...
"""
aliases = { "pan" : "panzoom_translate",
"translate" : "panzoom_translate",
"zoom" : "panzoom_scale",
"scale" : "panzoom_scale" }
def __init__(self, *args, **kwargs):
"""
Initialize the transform.
"""
code = library.get("transforms/panzoom.glsl")
Transform.__init__(self, code, *args, **kwargs)
self._aspect = Transform._get_kwarg("aspect", kwargs) or None
self._pan = np.array(Transform._get_kwarg("pan", kwargs) or (0.,0.))
self._zoom_min = Transform._get_kwarg("zoom_min", kwargs) or 0.01
self._zoom_max = Transform._get_kwarg("zoom_max", kwargs) or 1000
self._zoom = Transform._get_kwarg("zoom", kwargs) or 1
self._width = 1
self._height = 1
self._window_aspect = np.asarray([1.,1.])
@property
def aspect(self):
""" Aspect (width/height) """
return self._aspect
@aspect.setter
def aspect(self, value):
""" Aspect (width/height) """
self._aspect = value
@property
def pan(self):
""" Panning (translation) """
return self._pan
@pan.setter
def pan(self, value):
""" Panning (translation) """
self._pan = np.asarray(value)
if self.is_attached:
self["pan"] = self._pan
@property
def zoom(self):
""" Zoom level """
return self._zoom
@zoom.setter
def zoom(self, value):
""" Zoom level """
self._zoom = np.clip(value, self._zoom_min, self._zoom_max)
if self.is_attached:
aspect = 1.0
if self._aspect is not None:
aspect = self._window_aspect * self._aspect
self["zoom"] = self._zoom * aspect
@property
def zoom_min(self):
""" Minimal zoom level """
return self._zoom_min
@zoom_min.setter
def zoom_min(self, value):
""" Minimal zoom level """
self._zoom_min = min(value, self._zoom_max)
@property
def zoom_max(self):
""" Maximal zoom level """
return self._zoom_max
@zoom_max.setter
def zoom_max(self, value):
""" Maximal zoom level """
self._zoom_max = max(value, self._zoom_min)
def reset(self):
""" Reset transform (zoom=1, pan=(0,0)) """
self.zoom = 1
self.pan = 0,0
def on_attach(self, program):
self["pan"] = self.pan
aspect = 1.0
if self._aspect is not None:
aspect = self._window_aspect * self._aspect
self["zoom"] = self.zoom * aspect
def on_resize(self, width, height):
self._width = float(width)
self._height = float(height)
aspect = self._width/self._height
if aspect > 1.0:
self._window_aspect = np.array([1.0/aspect, 1.0])
else:
self._window_aspect = np.array([1.0, aspect/1.0])
aspect = 1.0
if self._aspect is not None:
aspect = self._window_aspect * self._aspect
self["zoom"] = self.zoom * aspect
# Transmit signal to other transforms
Transform.on_resize(self, width, height)
def on_mouse_scroll(self, x, y, dx, dy):
# Normalize mouse coordinates and invert y axis
x = x/(self._width/2.) - 1.
y = 1.0 - y/(self._height/2.)
zoom = np.clip(self._zoom*(1.0+dy/100.0), self.zoom_min, self.zoom_max)
ratio = zoom / self.zoom
xpan = x-ratio*(x-self.pan[0])
ypan = y-ratio*(y-self.pan[1])
self.zoom = zoom
self.pan = xpan, ypan
def on_mouse_drag(self, x, y, dx, dy, button):
dx = 2*(dx / self._width)
dy = -2*(dy / self._height)
self.pan = self.pan + (dx,dy)
|
test/tst_masked3.py | timgates42/netcdf4-python | 574 | 11132720 | <reponame>timgates42/netcdf4-python
import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from netCDF4 import Dataset, default_fillvals
# Test automatic conversion of masked arrays (set_auto_mask())
class SetAutoMaskTestBase(unittest.TestCase):
"""Base object for tests checking the functionality of set_auto_mask()"""
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
self.fillval = default_fillvals["i2"]
self.v = np.array([self.fillval, 5, 4, -9999], dtype = "i2")
self.v_ma = ma.array([self.fillval, 5, 4, -9999], dtype = "i2", mask = [True, False, False, True])
self.scale_factor = 10.
self.add_offset = 5.
self.v_scaled = self.v * self.scale_factor + self.add_offset
self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset
f = Dataset(self.testfile, 'w')
_ = f.createDimension('x', None)
v = f.createVariable('v', "i2", 'x')
v.missing_value = np.array(-9999, v.dtype)
# v[0] not set, will be equal to _FillValue
v[1] = self.v[1]
v[2] = self.v[2]
v[3] = v.missing_value
f.close()
def tearDown(self):
os.remove(self.testfile)
class SetAutoMaskFalse(SetAutoMaskTestBase):
def test_unscaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(False)"""
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_mask(False)
v = f.variables["v"][:]
self.assertEqual(v.dtype, "i2")
self.assertTrue(isinstance(v, np.ndarray))
self.assertTrue(not isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v)
f.close()
def test_scaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(False) with scaling"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].scale_factor = self.scale_factor
f.variables["v"].add_offset = self.add_offset
f.close()
# Note: Scaling variables is default if scale_factor and/or add_offset are present
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_mask(False)
v = f.variables["v"][:]
self.assertEqual(v.dtype, "f8")
self.assertTrue(isinstance(v, np.ndarray))
self.assertTrue(not isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v_scaled)
f.close()
class SetAutoMaskTrue(SetAutoMaskTestBase):
def test_unscaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(True)"""
f = Dataset(self.testfile)
f.variables["v"].set_auto_mask(True) # The default anyway...
v_ma = f.variables['v'][:]
self.assertEqual(v_ma.dtype, "i2")
self.assertTrue(isinstance(v_ma, np.ndarray))
self.assertTrue(isinstance(v_ma, ma.core.MaskedArray))
assert_array_almost_equal(v_ma, self.v_ma)
f.close()
def test_scaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(True)"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].scale_factor = self.scale_factor
f.variables["v"].add_offset = self.add_offset
f.close()
# Note: Scaling variables is default if scale_factor and/or add_offset are present
f = Dataset(self.testfile)
f.variables["v"].set_auto_mask(True) # The default anyway...
v_ma = f.variables['v'][:]
self.assertEqual(v_ma.dtype, "f8")
self.assertTrue(isinstance(v_ma, np.ndarray))
self.assertTrue(isinstance(v_ma, ma.core.MaskedArray))
assert_array_almost_equal(v_ma, self.v_ma_scaled)
f.close()
class GlobalSetAutoMaskTest(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
f = Dataset(self.testfile, 'w')
grp1 = f.createGroup('Group1')
grp2 = f.createGroup('Group2')
f.createGroup('Group3') # empty group
f.createVariable('var0', "i2", ())
grp1.createVariable('var1', 'f8', ())
grp2.createVariable('var2', 'f4', ())
f.close()
def tearDown(self):
os.remove(self.testfile)
def runTest(self):
# Note: The default behaviour is to to have both auto-masking and auto-scaling activated.
# This is already tested in tst_scaled.py, so no need to repeat here. Instead,
# disable auto-masking and auto-scaling altogether.
f = Dataset(self.testfile, "r")
# Neither scaling and masking enabled
f.set_auto_maskandscale(False)
v0 = f.variables['var0']
v1 = f.groups['Group1'].variables['var1']
v2 = f.groups['Group2'].variables['var2']
self.assertFalse(v0.scale)
self.assertFalse(v0.mask)
self.assertFalse(v1.scale)
self.assertFalse(v1.mask)
self.assertFalse(v2.scale)
self.assertFalse(v2.mask)
# No auto-masking, but auto-scaling
f.set_auto_maskandscale(True)
f.set_auto_mask(False)
self.assertTrue(v0.scale)
self.assertFalse(v0.mask)
self.assertTrue(v1.scale)
self.assertFalse(v1.mask)
self.assertTrue(v2.scale)
self.assertFalse(v2.mask)
f.close()
if __name__ == '__main__':
unittest.main()
|
examples/contrib/traffic_lights.py | AlohaChina/or-tools | 8,273 | 11132727 | <filename>examples/contrib/traffic_lights.py
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Traffic lights problem in Google CP Solver.
CSPLib problem 16
http://www.cs.st-andrews.ac.uk/~ianm/CSPLib/prob/prob016/index.html
'''
Specification:
Consider a four way traffic junction with eight traffic lights. Four of the
traffic
lights are for the vehicles and can be represented by the variables V1 to V4
with domains
{r,ry,g,y} (for red, red-yellow, green and yellow). The other four traffic
lights are
for the pedestrians and can be represented by the variables P1 to P4 with
domains {r,g}.
The constraints on these variables can be modelled by quaternary constraints
on
(Vi, Pi, Vj, Pj ) for 1<=i<=4, j=(1+i)mod 4 which allow just the tuples
{(r,r,g,g), (ry,r,y,r), (g,g,r,r), (y,r,ry,r)}.
It would be interesting to consider other types of junction (e.g. five roads
intersecting) as well as modelling the evolution over time of the traffic
light sequence.
...
Results
Only 2^2 out of the 2^12 possible assignments are solutions.
(V1,P1,V2,P2,V3,P3,V4,P4) =
{(r,r,g,g,r,r,g,g), (ry,r,y,r,ry,r,y,r), (g,g,r,r,g,g,r,r),
(y,r,ry,r,y,r,ry,r)}
[(1,1,3,3,1,1,3,3), ( 2,1,4,1, 2,1,4,1), (3,3,1,1,3,3,1,1), (4,1, 2,1,4,1,
2,1)}
The problem has relative few constraints, but each is very tight. Local
propagation
appears to be rather ineffective on this problem.
'''
Note: In this model we use only the constraint solver.AllowedAssignments().
Compare with these models:
* MiniZinc: http://www.hakank.org/minizinc/traffic_lights.mzn
* Comet : http://www.hakank.org/comet/traffic_lights.co
* ECLiPSe : http://www.hakank.org/eclipse/traffic_lights.ecl
* Gecode : http://hakank.org/gecode/traffic_lights.cpp
* SICStus : http://hakank.org/sicstus/traffic_lights.pl
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main(base=10, start=1, len1=1, len2=4):
# Create the solver.
solver = pywrapcp.Solver("Traffic lights")
#
# data
#
n = 4
r, ry, g, y = list(range(n))
lights = ["r", "ry", "g", "y"]
# The allowed combinations
allowed = []
allowed.extend([(r, r, g, g), (ry, r, y, r), (g, g, r, r), (y, r, ry, r)])
#
# declare variables
#
V = [solver.IntVar(0, n - 1, "V[%i]" % i) for i in range(n)]
P = [solver.IntVar(0, n - 1, "P[%i]" % i) for i in range(n)]
#
# constraints
#
for i in range(n):
for j in range(n):
if j == (1 + i) % n:
solver.Add(solver.AllowedAssignments((V[i], P[i], V[j], P[j]), allowed))
#
# Search and result
#
db = solver.Phase(V + P, solver.INT_VAR_SIMPLE, solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
for i in range(n):
print("%+2s %+2s" % (lights[V[i].Value()], lights[P[i].Value()]), end=" ")
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
print()
if __name__ == "__main__":
main()
|
data_loader.py | yongbozhi/Deep-Mutual-Learning | 104 | 11132731 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 14:12:10 2019
@author: chxy
"""
import numpy as np
import torch
from torchvision import datasets
from torchvision import transforms
def get_train_loader(data_dir,
batch_size,
random_seed,
shuffle=True,
num_workers=4,
pin_memory=True):
"""
Utility function for loading and returning a multi-process
train iterator over the CIFAR100 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Args
----
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: train set iterator.
"""
# define transforms
trans = transforms.Compose([
transforms.RandomCrop(32, padding=4), # 将图像转化为32 * 32
transforms.RandomHorizontalFlip(), # 随机水平翻转
transforms.RandomRotation(degrees=15), # 随机旋转
transforms.ToTensor(), # 将numpy数据类型转化为Tensor
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
])
# load dataset
dataset = datasets.CIFAR100(root=data_dir,
transform=trans,
download=False,
train=True)
if shuffle:
np.random.seed(random_seed)
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory,
)
return train_loader
def get_test_loader(data_dir,
batch_size,
num_workers=4,
pin_memory=True):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR100 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Args
----
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
# define transforms
trans = transforms.Compose([
transforms.ToTensor(), # 将numpy数据类型转化为Tensor
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 归一化
])
# load dataset
dataset = datasets.CIFAR100(
data_dir, train=False, download=False, transform=trans
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
|
plugins/modules/oci_events_rule.py | slmjy/oci-ansible-collection | 108 | 11132736 | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_events_rule
short_description: Manage a Rule resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Rule resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new rule.
- "This resource has the following action operations in the M(oracle.oci.oci_events_rule_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
display_name:
description:
- A string that describes the rule. It does not have to be unique, and you can change it. Avoid entering
confidential information.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
description:
description:
- A string that describes the details of the rule. It does not have to be unique, and you can change it. Avoid entering
confidential information.
- This parameter is updatable.
type: str
is_enabled:
description:
- Whether or not this rule is currently enabled.
- "Example: `true`"
- Required for create using I(state=present).
- This parameter is updatable.
type: bool
condition:
description:
- "A filter that specifies the event that will trigger actions associated with this rule. A few
important things to remember about filters:"
- "* Fields not mentioned in the condition are ignored. You can create a valid filter that matches
all events with two curly brackets: `{}`"
- " For more examples, see
L(Matching Events with Filters,https://docs.cloud.oracle.com/iaas/Content/Events/Concepts/filterevents.htm).
* For a condition with fields to match an event, the event must contain all the field names
listed in the condition. Field names must appear in the condition with the same nesting
structure used in the event."
- " For a list of reference events, see
L(Services that Produce Events,https://docs.cloud.oracle.com/iaas/Content/Events/Reference/eventsproducers.htm).
* Rules apply to events in the compartment in which you create them and any child compartments.
This means that a condition specified by a rule only matches events emitted from resources in
the compartment or any of its child compartments.
* Wildcard matching is supported with the asterisk (*) character."
- For examples of wildcard matching, see
L(Matching Events with Filters,https://docs.cloud.oracle.com/iaas/Content/Events/Concepts/filterevents.htm)
- "Example: `\\\\\\"eventType\\\\\\": \\\\\\"com.oraclecloud.databaseservice.autonomous.database.backup.end\\\\\\"`"
- Required for create using I(state=present).
- This parameter is updatable.
type: str
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
actions:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
actions:
description:
- A list of one or more ActionDetails objects.
type: list
elements: dict
required: true
suboptions:
action_type:
description:
- The action to perform if the condition in the rule matches an event.
- "* **ONS:** Send to an Oracle Notification Service topic.
* **OSS:** Send to a stream from Oracle Streaming Service.
* **FAAS:** Send to an Oracle Functions Service endpoint."
type: str
choices:
- "OSS"
- "FAAS"
- "ONS"
required: true
is_enabled:
description:
- Whether or not this action is currently enabled.
- "Example: `true`"
type: bool
required: true
description:
description:
- A string that describes the details of the action. It does not have to be unique, and you can change it. Avoid entering
confidential information.
type: str
stream_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stream to which messages are
delivered.
- Required when action_type is 'OSS'
type: str
function_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of a Function hosted by Oracle Functions
Service.
- Applicable when action_type is 'FAAS'
type: str
topic_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the topic to which messages are
delivered.
- Applicable when action_type is 'ONS'
type: str
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. Exists for cross-compatibility
only.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
rule_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this rule.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the Rule.
- Use I(state=present) to create or update a Rule.
- Use I(state=absent) to delete a Rule.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create rule
oci_events_rule:
# required
display_name: display_name_example
is_enabled: true
condition: condition_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
actions:
# required
actions:
- # required
action_type: OSS
is_enabled: true
stream_id: "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
# optional
description: description_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update rule
oci_events_rule:
# required
rule_id: "ocid1.rule.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
description: description_example
is_enabled: true
condition: condition_example
actions:
# required
actions:
- # required
action_type: OSS
is_enabled: true
stream_id: "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update rule using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_events_rule:
# required
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
is_enabled: true
condition: condition_example
actions:
# required
actions:
- # required
action_type: OSS
is_enabled: true
stream_id: "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete rule
oci_events_rule:
# required
rule_id: "ocid1.rule.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete rule using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_events_rule:
# required
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
rule:
description:
- Details of the Rule resource acted upon by the current operation
returned: on success
type: complex
contains:
display_name:
description:
- A string that describes the rule. It does not have to be unique, and you can change it. Avoid entering
confidential information.
- "Example: `\\"This rule sends a notification upon completion of DbaaS backup.\\"`"
returned: on success
type: str
sample: display_name_example
description:
description:
- A string that describes the details of the rule. It does not have to be unique, and you can change it. Avoid entering
confidential information.
returned: on success
type: str
sample: description_example
lifecycle_state:
description:
- The current state of the rule.
returned: on success
type: str
sample: CREATING
condition:
description:
- "A filter that specifies the event that will trigger actions associated with this rule. A few
important things to remember about filters:"
- "* Fields not mentioned in the condition are ignored. You can create a valid filter that matches
all events with two curly brackets: `{}`"
- " For more examples, see
L(Matching Events with Filters,https://docs.cloud.oracle.com/iaas/Content/Events/Concepts/filterevents.htm).
* For a condition with fields to match an event, the event must contain all the field names
listed in the condition. Field names must appear in the condition with the same nesting
structure used in the event."
- " For a list of reference events, see
L(Services that Produce Events,https://docs.cloud.oracle.com/iaas/Content/Events/Reference/eventsproducers.htm).
* Rules apply to events in the compartment in which you create them and any child compartments.
This means that a condition specified by a rule only matches events emitted from resources in
the compartment or any of its child compartments.
* Wildcard matching is supported with the asterisk (*) character."
- For examples of wildcard matching, see
L(Matching Events with Filters,https://docs.cloud.oracle.com/iaas/Content/Events/Concepts/filterevents.htm)
- "Example: `\\\\\\"eventType\\\\\\": \\\\\\"com.oraclecloud.databaseservice.autonomous.database.backup.end\\\\\\"`"
returned: on success
type: str
sample: condition_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
is_enabled:
description:
- Whether or not this rule is currently enabled.
- "Example: `true`"
returned: on success
type: bool
sample: true
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. Exists for cross-
compatibility only.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
actions:
description:
- ""
returned: on success
type: complex
contains:
actions:
description:
- A list of one or more Action objects.
returned: on success
type: complex
contains:
action_type:
description:
- The action to perform if the condition in the rule matches an event.
- "* **ONS:** Send to an Oracle Notification Service topic.
* **OSS:** Send to a stream from Oracle Streaming Service.
* **FAAS:** Send to an Oracle Functions Service endpoint."
returned: on success
type: str
sample: ONS
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the action.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_message:
description:
- A message generated by the Events service about the current state of this action.
returned: on success
type: str
sample: lifecycle_message_example
lifecycle_state:
description:
- The current state of the rule.
returned: on success
type: str
sample: CREATING
is_enabled:
description:
- Whether or not this action is currently enabled.
- "Example: `true`"
returned: on success
type: bool
sample: true
description:
description:
- A string that describes the details of the action. It does not have to be unique, and you can change it. Avoid entering
confidential information.
returned: on success
type: str
sample: description_example
function_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of a Function hosted by Oracle
Functions Service.
returned: on success
type: str
sample: "ocid1.function.oc1..xxxxxxEXAMPLExxxxxx"
topic_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the topic to which messages are
delivered.
returned: on success
type: str
sample: "ocid1.topic.oc1..xxxxxxEXAMPLExxxxxx"
stream_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stream to which messages are
delivered.
returned: on success
type: str
sample: "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this rule.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- The time this rule was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339)
timestamp format.
- "Example: `2018-09-12T22:47:12.613Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_message:
description:
- A message generated by the Events service about the current state of this rule.
returned: on success
type: str
sample: lifecycle_message_example
sample: {
"display_name": "display_name_example",
"description": "description_example",
"lifecycle_state": "CREATING",
"condition": "condition_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"is_enabled": true,
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"actions": {
"actions": [{
"action_type": "ONS",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_message": "lifecycle_message_example",
"lifecycle_state": "CREATING",
"is_enabled": true,
"description": "description_example",
"function_id": "ocid1.function.oc1..xxxxxxEXAMPLExxxxxx",
"topic_id": "ocid1.topic.oc1..xxxxxxEXAMPLExxxxxx",
"stream_id": "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
}]
},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"lifecycle_message": "lifecycle_message_example"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.events import EventsClient
from oci.events.models import CreateRuleDetails
from oci.events.models import UpdateRuleDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class RuleHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "rule_id"
def get_module_resource_id(self):
return self.module.params.get("rule_id")
def get_get_fn(self):
return self.client.get_rule
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_rule, rule_id=self.module.params.get("rule_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(self.client.list_rules, **kwargs)
def get_create_model_class(self):
return CreateRuleDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_rule,
call_fn_args=(),
call_fn_kwargs=dict(create_rule_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateRuleDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_rule,
call_fn_args=(),
call_fn_kwargs=dict(
rule_id=self.module.params.get("rule_id"),
update_rule_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_rule,
call_fn_args=(),
call_fn_kwargs=dict(rule_id=self.module.params.get("rule_id"),),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
RuleHelperCustom = get_custom_class("RuleHelperCustom")
class ResourceHelper(RuleHelperCustom, RuleHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
display_name=dict(aliases=["name"], type="str"),
description=dict(type="str"),
is_enabled=dict(type="bool"),
condition=dict(type="str"),
compartment_id=dict(type="str"),
actions=dict(
type="dict",
options=dict(
actions=dict(
type="list",
elements="dict",
required=True,
options=dict(
action_type=dict(
type="str",
required=True,
choices=["OSS", "FAAS", "ONS"],
),
is_enabled=dict(type="bool", required=True),
description=dict(type="str"),
stream_id=dict(type="str"),
function_id=dict(type="str"),
topic_id=dict(type="str"),
),
)
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
rule_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="rule",
service_client_class=EventsClient,
namespace="events",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
tests/test_api.py | kingosticks/mopidy-soundcloud | 161 | 11132748 | import os.path
import unittest
from unittest import mock
import vcr
import mopidy_soundcloud
from mopidy.models import Track
from mopidy_soundcloud import Extension
from mopidy_soundcloud.soundcloud import SoundCloudClient, readable_url
local_path = os.path.abspath(os.path.dirname(__file__))
my_vcr = vcr.VCR(
serializer="yaml",
cassette_library_dir=local_path + "/fixtures",
record_mode="once",
match_on=["uri", "method"],
decode_compressed_response=False,
filter_headers=["Authorization"],
)
class ApiTest(unittest.TestCase):
@my_vcr.use_cassette("sc-login.yaml")
def setUp(self):
config = Extension().get_config_schema()
config["auth_token"] = "<PASSWORD>"
config["explore_songs"] = 10
self.api = SoundCloudClient({"soundcloud": config, "proxy": {}})
def test_sets_user_agent(self):
agent = "Mopidy-SoundCloud/%s Mopidy/" % mopidy_soundcloud.__version__
assert agent in self.api.http_client.headers["user-agent"]
def test_public_client_no_token(self):
token_key = "authorization"
assert token_key not in self.api.public_stream_client.headers._store
def test_resolves_string(self):
_id = self.api.parse_track_uri("soundcloud:song.38720262")
assert _id == "38720262"
@my_vcr.use_cassette("sc-login-error.yaml")
def test_responds_with_error(self):
with mock.patch("mopidy_soundcloud.soundcloud.logger.error") as d:
config = Extension().get_config_schema()
config["auth_token"] = "<PASSWORD>"
SoundCloudClient({"soundcloud": config, "proxy": {}}).user
d.assert_called_once_with(
'Invalid "auth_token" used for SoundCloud authentication!'
)
@my_vcr.use_cassette("sc-login.yaml")
def test_returns_username(self):
user = self.api.user.get("username")
assert user == "Nick Steel 3"
@my_vcr.use_cassette("sc-resolve-track.yaml")
def test_resolves_object(self):
trackc = {}
trackc["uri"] = "soundcloud:song.38720262"
track = Track(**trackc)
id = self.api.parse_track_uri(track)
assert id == "38720262"
@my_vcr.use_cassette("sc-resolve-track-none.yaml")
def test_resolves_unknown_track_to_none(self):
track = self.api.get_track("s38720262")
assert track is None
@my_vcr.use_cassette("sc-resolve-track.yaml")
def test_resolves_track(self):
track = self.api.get_track("13158665")
assert isinstance(track, Track)
assert track.uri == "soundcloud:song/Munching at Tiannas house.13158665"
@my_vcr.use_cassette("sc-resolve-http.yaml")
def test_resolves_http_url(self):
track = self.api.resolve_url(
"https://soundcloud.com/bbc-radio-4/m-w-cloud"
)[0]
assert isinstance(track, Track)
assert (
track.uri
== "soundcloud:song/That Mitchell and Webb Sound The Cloud.122889665"
)
@my_vcr.use_cassette("sc-resolve-set.yaml")
def test_resolves_set_url(self):
expected_tracks = [
"01 Dash And Blast",
"02 We Flood Empty Lakes",
"03 A Song For Starlit Beaches",
"04 Illuminate My Heart, My Darling",
]
tracks = self.api.resolve_url(
"https://soundcloud.com/yndihalda/sets/dash-and-blast"
)
assert len(tracks) == 4
for i, _ in enumerate(expected_tracks):
assert isinstance(tracks[i], Track)
assert tracks[i].name == expected_tracks[i]
assert tracks[i].length > 500
assert len(tracks[i].artists) == 1
assert list(tracks[i].artists)[0].name == "<NAME>"
@my_vcr.use_cassette("sc-liked.yaml")
def test_get_user_likes(self):
tracks = self.api.get_likes()
assert len(tracks) == 3
assert isinstance(tracks[0], Track)
assert tracks[1].name == "Pelican - Deny The Absolute"
@my_vcr.use_cassette("sc-stream.yaml")
def test_get_user_stream(self):
tracks = self.api.get_user_stream()
assert len(tracks) == 10
assert isinstance(tracks[0], Track)
assert tracks[2].name == "JW Ep 20- <NAME>"
@my_vcr.use_cassette("sc-following.yaml")
def test_get_followings(self):
users = self.api.get_followings()
assert len(users) == 10
assert users[0] == ("Young Legionnaire", "992503")
assert users[1] == ("Tall Ships", "1710483")
assert users[8] == ("Pelican Song", "27945548")
assert users[9] == ("sleepmakeswaves", "1739693")
@my_vcr.use_cassette("sc-user-tracks.yaml")
def test_get_user_tracks(self):
expected_tracks = [
"The Wait",
"The Cliff (Palms Remix)",
"The Cliff (Just<NAME> Remix)",
"The Cliff (Vocal Version)",
"Pelican - The Creeper",
"Pelican - Lathe Biosas",
"Pelican - Ephemeral",
"Pelican - Deny the Absolute",
"Pelican - Immutable Dusk",
"Pelican - Strung Up From The Sky",
]
tracks = self.api.get_tracks(27945548)
for i, _ in enumerate(expected_tracks):
assert isinstance(tracks[i], Track)
assert tracks[i].name == expected_tracks[i]
assert tracks[i].length > 500
assert len(tracks[i].artists) == 1
@my_vcr.use_cassette("sc-set.yaml")
def test_get_set(self):
tracks = self.api.get_set("10961826")
assert len(tracks) == 1
assert isinstance(tracks[0], dict)
@my_vcr.use_cassette("sc-set-invalid.yaml")
def test_get_invalid_set(self):
tracks = self.api.get_set("blahblahrubbosh")
assert tracks == []
@my_vcr.use_cassette("sc-sets.yaml")
def test_get_sets(self):
sets = self.api.get_sets()
assert len(sets) == 2
name, set_id, tracks = sets[1]
assert name == "Pelican"
assert set_id == "10961826"
assert len(tracks) == 1
def test_readeble_url(self):
assert "Barsuk Records" == readable_url('"@"Barsuk Records')
assert "_Barsuk Records" == readable_url("_Barsuk 'Records'")
@my_vcr.use_cassette("sc-resolve-track-id.yaml")
def test_resolves_stream_track(self):
track = self.api.get_track("13158665", True)
assert isinstance(track, Track)
assert track.uri == (
"https://cf-media.sndcdn.com/fxguEjG4ax6B.128.mp3?Policy="
"eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiKjovL2NmLW1lZGlhLnNu"
"ZGNkbi5jb20vZnhndUVqRzRheDZCLjEyOC5tcDMiLCJDb25kaXRpb24i"
"OnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE2MTc3MzMy"
"MDJ9fX1dfQ__&Signature=R6mfsrmYiPXF8Q-Eh0vsmtiqnIAkFMckw"
"6qETd0gjJlXnxzjXq~ZiY4ClwgChjfv9e5NdID54hcSrq3jamUCuQ-Gr"
"94WH0beJFXa9itVnV2A83~FE6Fye~ocTsVx7fzrpDFKJ80csI-QtLkV3"
"3E06oMClsMPbjvdw3d1caFpGfkck7OCmV0Z9Jat0dYDkRfjGZF7HqGRP"
"-upiIJ3l0cWfSyhRJ~F5o29TASJMQMQAigjCV0by9DsK2Naw1tcAW4DH"
"YJF4oOUQkTLRwtw0B5mJXfKfFGQxjj1RSGZNFZxG0oV2nD1-svYX-Enz"
"ldPOUBDvyUr-nNmS0wR9Qm5XsTAbQ__&Key-Pair-Id=<KEY>"
"M5DG6EPQ"
)
@my_vcr.use_cassette("sc-resolve-track-id.yaml")
def test_unstreamable_track(self):
track = self.api._get("tracks/13158665")
track["streamable"] = False
track = self.api.parse_track(track)
assert track is None
@my_vcr.use_cassette("sc-resolve-app-client-id.yaml")
def test_resolves_app_client_id(self):
track = self.api._get("tracks/13158665")
track["sharing"] = "private"
track = self.api.parse_track(track, True)
assert track.uri == (
"https://cf-media.sndcdn.com/fxguEjG4ax6B.128.mp3?Policy="
"eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiKjovL2NmLW1lZGlhLnNu"
"ZGNkbi5jb20vZnhndUVqRzRheDZCLjEyOC5tcDMiLCJDb25kaXRpb24i"
"OnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE2MTc3Mzcw"
"ODV9fX1dfQ__&Signature=AT7ZL9gDe~34stPzDOORReIeNTbEpo~27"
"VP-set6t-T2mIW-W1fuWW6ny4-kd5XsW7mgndht1poURixYx1bUNTJFt"
"SX1LjjfvUaGfA5w3eDbfSHvlmh8fqIVN6RZAbCwQUbcndn8TI5Q1EPfP"
"8Aq-DLsIdUEE~3gxIVvX-YgzDZtxRMue0eefgp5oxk5z3KbHILPAyeS-"
"GQx4JIgMxSWaMKiG0Dx0raTNW8JFNugs9u5h62J21BxGSd6aifU9boff"
"khg1yWR9ccqHjMdDSRGpHLSBin6iNNHRzHj9vC4cq--DexYnyLQtdZp3"
"UlaXbFlP~-3XBMf6FLNiPbUA4HxgA__&Key-Pair-Id=<KEY>"
"M5DG6EPQ"
)
@my_vcr.use_cassette("sc-resolve-track-id-invalid-client-id.yaml")
def test_resolves_stream_track_invalid_id(self):
self.api.public_client_id = "blahblahrubbosh"
track = self.api.get_track("13158665", True)
assert isinstance(track, Track)
assert track.uri == (
"https://cf-media.sndcdn.com/fxguEjG4ax6B.128.mp3?Policy="
"eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiKjovL2NmLW1lZGlhLnNu"
"ZGNkbi5jb20vZnhndUVqRzRheDZCLjEyOC5tcDMiLCJDb25kaXRpb24i"
"OnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE2MTc1NDI2"
"MDh9fX1dfQ__&Signature=SwnMkrFlBL1Es-S7DMuHLiAzYxgKdl4bk"
"sjUny73MKN9d~54MhUzYOmgzETiERC73tyGo3iovjjk6P556J3FvAibn"
"adM7ip5pPNT5HpyS4~xE2zCAg9s1DnDSypcUzOT6pvKKTJ3F95w6~kr3"
"lRbRfDHsuq6O1HKB4k~NBVdTMRFhDRZJPdGg2BJFiI5M-IA-Ut5CQUJS"
"kYNXG1kQtvIJNenAUQAuQm0iKv-um7C5YbgkdOpZC~HU49YiLcCw8T~b"
"VYRgspxMctUQssmTg5yysD65vkQk8QVWpx9kE9kxdCL7oFqdAbv9tsgu"
"s7~nptZlygrOVi9TIyikLsi6BeMQw__&Key-Pair-Id=<KEY>"
"<KEY>"
)
def test_parse_fail_reason(self):
test_reason = "Unknown"
reason_res = self.api.parse_fail_reason(test_reason)
assert reason_res == ""
@my_vcr.use_cassette("sc-search.yaml")
def test_search(self):
tracks = self.api.search("the great descent")
assert len(tracks) == 10
assert isinstance(tracks[0], Track)
assert tracks[0].name == "Turn Around (Mix1)"
|
raft/rpc.py | shunliz/raft | 113 | 11132767 | <filename>raft/rpc.py
import json
import socket
class Rpc(object):
def __init__(self, addr=None, timeout=None):
self.ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if addr:
self.bind(tuple(addr))
if timeout:
self.ss.settimeout(timeout)
def bind(self, addr):
self.addr = tuple(addr)
self.ss.bind(addr)
def settimeout(self, timeout):
self.ss.settimeout(timeout)
def send(self, data, addr):
data = json.dumps(data).encode("utf-8")
self.ss.sendto(data, tuple(addr))
def recv(self, addr=None, timeout=None):
if addr:
self.bind(addr)
if not self.addr:
raise ("please bind to an addr")
if timeout:
self.settimeout(timeout)
data, addr = self.ss.recvfrom(65535)
return json.loads(data), addr
def close(self):
self.ss.close()
|
astropy/uncertainty/core.py | mehrdad-shokri/astropy | 445 | 11132785 | <filename>astropy/uncertainty/core.py
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Distribution class and associated machinery.
"""
import numpy as np
from astropy import units as u
from astropy import stats
__all__ = ['Distribution']
# we set this by hand because the symbolic expression (below) requires scipy
# SMAD_SCALE_FACTOR = 1 / scipy.stats.norm.ppf(0.75)
SMAD_SCALE_FACTOR = 1.48260221850560203193936104071326553821563720703125
class Distribution:
"""
A scalar value or array values with associated uncertainty distribution.
This object will take its exact type from whatever the ``samples`` argument
is. In general this is expected to be an `~astropy.units.Quantity` or
`numpy.ndarray`, although anything compatible with `numpy.asanyarray` is
possible.
See also: http://docs.astropy.org/en/stable/uncertainty/
Parameters
----------
samples : array_like
The distribution, with sampling along the *leading* axis. If 1D, the
sole dimension is used as the sampling axis (i.e., it is a scalar
distribution).
"""
_generated_subclasses = {}
def __new__(cls, samples):
if isinstance(samples, Distribution):
samples = samples.distribution
else:
samples = np.asanyarray(samples, order='C')
if samples.shape == ():
raise TypeError('Attempted to initialize a Distribution with a scalar')
new_dtype = np.dtype({'names': ['samples'],
'formats': [(samples.dtype, (samples.shape[-1],))]})
samples_cls = type(samples)
new_cls = cls._generated_subclasses.get(samples_cls)
if new_cls is None:
# Make a new class with the combined name, inserting Distribution
# itself below the samples class since that way Quantity methods
# like ".to" just work (as .view() gets intercepted). However,
# repr and str are problems, so we put those on top.
# TODO: try to deal with this at the lower level. The problem is
# that array2string does not allow one to override how structured
# arrays are typeset, leading to all samples to be shown. It may
# be possible to hack oneself out by temporarily becoming a void.
new_name = samples_cls.__name__ + cls.__name__
new_cls = type(
new_name,
(_DistributionRepr, samples_cls, ArrayDistribution),
{'_samples_cls': samples_cls})
cls._generated_subclasses[samples_cls] = new_cls
self = samples.view(dtype=new_dtype, type=new_cls)
# Get rid of trailing dimension of 1.
self.shape = samples.shape[:-1]
return self
@property
def distribution(self):
return self['samples']
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
converted = []
outputs = kwargs.pop('out', None)
if outputs:
kwargs['out'] = tuple((output.distribution if
isinstance(output, Distribution)
else output) for output in outputs)
if method in {'reduce', 'accumulate', 'reduceat'}:
axis = kwargs.get('axis', None)
if axis is None:
assert isinstance(inputs[0], Distribution)
kwargs['axis'] = tuple(range(inputs[0].ndim))
for input_ in inputs:
if isinstance(input_, Distribution):
converted.append(input_.distribution)
else:
shape = getattr(input_, 'shape', ())
if shape:
converted.append(input_[..., np.newaxis])
else:
converted.append(input_)
results = getattr(ufunc, method)(*converted, **kwargs)
if not isinstance(results, tuple):
results = (results,)
if outputs is None:
outputs = (None,) * len(results)
finals = []
for result, output in zip(results, outputs):
if output is not None:
finals.append(output)
else:
if getattr(result, 'shape', False):
finals.append(Distribution(result))
else:
finals.append(result)
return finals if len(finals) > 1 else finals[0]
@property
def n_samples(self):
"""
The number of samples of this distribution. A single `int`.
"""
return self.dtype['samples'].shape[0]
def pdf_mean(self, dtype=None, out=None):
"""
The mean of this distribution.
Arguments are as for `numpy.mean`.
"""
return self.distribution.mean(axis=-1, dtype=dtype, out=out)
def pdf_std(self, dtype=None, out=None, ddof=0):
"""
The standard deviation of this distribution.
Arguments are as for `numpy.std`.
"""
return self.distribution.std(axis=-1, dtype=dtype, out=out, ddof=ddof)
def pdf_var(self, dtype=None, out=None, ddof=0):
"""
The variance of this distribution.
Arguments are as for `numpy.var`.
"""
return self.distribution.var(axis=-1, dtype=dtype, out=out, ddof=ddof)
def pdf_median(self, out=None):
"""
The median of this distribution.
Parameters
----------
out : array, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
"""
return np.median(self.distribution, axis=-1, out=out)
def pdf_mad(self, out=None):
"""
The median absolute deviation of this distribution.
Parameters
----------
out : array, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
"""
median = self.pdf_median(out=out)
absdiff = np.abs(self - median)
return np.median(absdiff.distribution, axis=-1, out=median,
overwrite_input=True)
def pdf_smad(self, out=None):
"""
The median absolute deviation of this distribution rescaled to match the
standard deviation for a normal distribution.
Parameters
----------
out : array, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
"""
result = self.pdf_mad(out=out)
result *= SMAD_SCALE_FACTOR
return result
def pdf_percentiles(self, percentile, **kwargs):
"""
Compute percentiles of this Distribution.
Parameters
----------
percentile : float or array of floats or `~astropy.units.Quantity`
The desired precentiles of the distribution (i.e., on [0,100]).
`~astropy.units.Quantity` will be converted to percent, meaning
that a ``dimensionless_unscaled`` `~astropy.units.Quantity` will
be interpreted as a quantile.
Additional keywords are passed into `numpy.percentile`.
Returns
-------
percentiles : `~astropy.units.Quantity`
The ``fracs`` percentiles of this distribution.
"""
percentile = u.Quantity(percentile, u.percent).value
percs = np.percentile(self.distribution, percentile, axis=-1, **kwargs)
# numpy.percentile strips units for unclear reasons, so we have to make
# a new object with units
if hasattr(self.distribution, '_new_view'):
return self.distribution._new_view(percs)
else:
return percs
def pdf_histogram(self, **kwargs):
"""
Compute histogram over the samples in the distribution.
Parameters
----------
All keyword arguments are passed into `astropy.stats.histogram`. Note
That some of these options may not be valid for some multidimensional
distributions.
Returns
-------
hist : array
The values of the histogram. Trailing dimension is the histogram
dimension.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``. Trailing dimension is the
bin histogram dimension.
"""
distr = self.distribution
raveled_distr = distr.reshape(distr.size//distr.shape[-1], distr.shape[-1])
nhists = []
bin_edges = []
for d in raveled_distr:
nhist, bin_edge = stats.histogram(d, **kwargs)
nhists.append(nhist)
bin_edges.append(bin_edge)
nhists = np.array(nhists)
nh_shape = self.shape + (nhists.size//self.size,)
bin_edges = np.array(bin_edges)
be_shape = self.shape + (bin_edges.size//self.size,)
return nhists.reshape(nh_shape), bin_edges.reshape(be_shape)
class ScalarDistribution(Distribution, np.void):
"""Scalar distribution.
This class mostly exists to make `~numpy.array2print` possible for
all subclasses. It is a scalar element, still with n_samples samples.
"""
pass
class ArrayDistribution(Distribution, np.ndarray):
# This includes the important overrride of view and __getitem__
# which are needed for all ndarray subclass Distributions, but not
# for the scalar one.
_samples_cls = np.ndarray
# Override view so that we stay a Distribution version of the new type.
def view(self, dtype=None, type=None):
if type is None:
if issubclass(dtype, np.ndarray):
type = dtype
dtype = None
else:
raise ValueError('Cannot set just dtype for a Distribution.')
result = self.distribution.view(dtype, type)
return Distribution(result)
# Override __getitem__ so that 'samples' is returned as the sample class.
def __getitem__(self, item):
result = super().__getitem__(item)
if item == 'samples':
# Here, we need to avoid our own redefinition of view.
return super(ArrayDistribution, result).view(self._samples_cls)
elif isinstance(result, np.void):
return result.view((ScalarDistribution, result.dtype))
else:
return result
class _DistributionRepr:
def __repr__(self):
reprarr = repr(self.distribution)
if reprarr.endswith('>'):
firstspace = reprarr.find(' ')
reprarr = reprarr[firstspace+1:-1] # :-1] removes the ending '>'
return '<{} {} with n_samples={}>'.format(self.__class__.__name__,
reprarr, self.n_samples)
else: # numpy array-like
firstparen = reprarr.find('(')
reprarr = reprarr[firstparen:]
return '{}{} with n_samples={}'.format(self.__class__.__name__,
reprarr, self.n_samples)
return reprarr
def __str__(self):
distrstr = str(self.distribution)
toadd = f' with n_samples={self.n_samples}'
return distrstr + toadd
def _repr_latex_(self):
if hasattr(self.distribution, '_repr_latex_'):
superlatex = self.distribution._repr_latex_()
toadd = fr', \; n_{{\rm samp}}={self.n_samples}'
return superlatex[:-1] + toadd + superlatex[-1]
else:
return None
class NdarrayDistribution(_DistributionRepr, ArrayDistribution):
pass
# Ensure our base NdarrayDistribution is known.
Distribution._generated_subclasses[np.ndarray] = NdarrayDistribution
|
PhysicsTools/PythonAnalysis/examples/MCTruth.py | ckamtsikis/cmssw | 852 | 11132787 | <reponame>ckamtsikis/cmssw<filename>PhysicsTools/PythonAnalysis/examples/MCTruth.py
from __future__ import print_function
from PhysicsTools.PythonAnalysis import *
from ROOT import *
# prepare the FWLite autoloading mechanism
gSystem.Load("libFWCoreFWLite.so")
ROOT.FWLiteEnabler.enable()
# load the file with the generator output
theFile = TFile("generatorOutput.root")
events = theFile.Get("Events")
# Needed for SetAddress to work right
events.GetEntry()
# set the buffers for the branches you want to access
# 1) create a buffer
# 2) open the root branch
# 3) connect buffer and branch
# example: generator particles
source = edm.HepMCProduct()
sourceBranch = events.GetBranch(events.GetAlias("source"))
sourceBranch.SetAddress(source)
# now loop over the events
for index in all(events):
# update all branches - the buffers are filled automatically
# Hint: put all you branches in a list and loop over it
sourceBranch.GetEntry(index)
events.GetEntry(index,0)
# do something with the data
genEvent = source.GetEvent();
print(genEvent.event_number())
|
src/pretix/base/migrations/0071_auto_20170729_1616.py | pajowu/pretix | 1,248 | 11132799 | <reponame>pajowu/pretix
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-29 16:16
from __future__ import unicode_literals
import i18nfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0070_auto_20170719_0910'),
]
operations = [
migrations.AddField(
model_name='question',
name='help_text',
field=i18nfield.fields.I18nTextField(blank=True, help_text='If the question needs to be explained or clarified, do it here!', null=True, verbose_name='Help text'),
),
migrations.AlterField(
model_name='invoiceaddress',
name='vat_id',
field=models.CharField(blank=True, help_text='Only for business customers within the EU.', max_length=255, verbose_name='VAT ID'),
),
]
|
examples/emb_fasttext_demo.py | msgi/nlp-tour | 1,559 | 11132807 | from smartnlp.embedding.fasttext_model import FastTextModel
if __name__ == '__main__':
# cbow 模型
model = FastTextModel('data/tianlong_seg.txt', 'model/fasttext/model.vec', model_type='cbow')
# skipgram 模型
# model = FastTextModel('data/tianlong_seg.txt', 'model/fasttext/model')
print(model.get_nearest_neighbors('段誉', 10))
|
hydrachain/tests/test_base.py | bts/hydrachain | 406 | 11132810 | <gh_stars>100-1000
# from hydrachain import protocol
from hydrachain.consensus.base import Vote, VoteBlock, VoteNil, LockSet, ishash, Ready
from hydrachain.consensus.base import DoubleVotingError, InvalidVoteError, MissingSignatureError
from hydrachain.consensus.base import BlockProposal, genesis_signing_lockset, InvalidProposalError
from hydrachain.consensus.base import Proposal, VotingInstruction, InvalidSignature, Signed
from ethereum import utils, tester
import rlp
import pytest
privkey = 'x' * 32
def test_signed():
s = Signed(v=0, r=0, s=0)
assert s.sender is None
with pytest.raises(MissingSignatureError):
s.hash
s.sign(privkey)
sender = s.sender
h = s.hash
s.v = 0 # change signature, in order to test signature independend hash
assert s.sender == sender
assert s.hash == h
def test_vote():
h, r = 2, 3
bh = '0' * 32
sender = utils.privtoaddr(privkey)
v = Vote(h, r)
v2 = Vote(h, r, blockhash=bh)
assert isinstance(v, Vote)
assert isinstance(v2, Vote)
assert isinstance(v, VoteNil)
assert isinstance(v, rlp.Serializable)
assert isinstance(v2, VoteBlock)
v.sign(privkey)
s = v.sender
assert s == sender
v2.sign(privkey)
assert v2.sender == sender
# encode
assert len(v.get_sedes()) == len(v.fields) == 6
vs = rlp.encode(v)
assert isinstance(vs, bytes)
print rlp.decode(vs)
vd = rlp.decode(vs, Vote)
assert isinstance(vd, VoteNil)
assert vd.blockhash == ''
assert vd == v
v2s = rlp.encode(v2)
v2d = rlp.decode(v2s, Vote)
assert isinstance(v2d, VoteBlock)
assert v2d.blockhash == bh
assert v2d == v2
assert v != v2
assert vd != v2d
assert len(set((v, vd))) == 1
assert len(set((v2, v2d))) == 1
assert len(set((v, vd, v2, v2d))) == 2
privkeys = [chr(i) * 32 for i in range(1, 11)]
validators = [utils.privtoaddr(p) for p in privkeys]
def test_ready():
ls = LockSet(num_eligible_votes=len(privkeys))
s = Ready(0, current_lockset=ls)
assert s.current_lockset == ls
s.sign(privkey)
s0 = Ready(0, current_lockset=ls)
s0.sign(privkey)
s1 = Ready(1, current_lockset=ls)
s1.sign(privkey)
assert s == s0
assert s != s1
def test_LockSet():
ls = LockSet(num_eligible_votes=len(privkeys))
assert not ls
assert len(ls) == 0
bh = '0' * 32
r, h = 2, 3
v1 = VoteBlock(h, r, bh)
# add not signed
with pytest.raises(InvalidVoteError):
ls.add(v1)
assert not ls
assert v1 not in ls
# add signed
v1.sign(privkeys[0])
ls.add(v1)
assert ls
assert len(ls) == 1
lsh = ls.hash
ls.add(v1)
assert lsh == ls.hash
assert len(ls) == 1
# second vote same sender
v2 = VoteBlock(h, r, bh)
v2.sign(privkeys[0])
ls.add(v1)
ls.add(v2)
assert lsh == ls.hash
assert len(ls) == 1
# third vote
v3 = VoteBlock(h, r, bh)
v3.sign(privkeys[1])
ls.add(v1)
ls.add(v3)
assert lsh != ls.hash
assert len(ls) == 2
assert v3 in ls
lsh = ls.hash
# vote wrong round
v4 = VoteBlock(h, r + 1, bh)
v4.sign(privkeys[2])
with pytest.raises(InvalidVoteError):
ls.add(v4)
assert lsh == ls.hash
assert len(ls) == 2
assert v4 not in ls
# vote twice
v3_2 = VoteBlock(h, r, blockhash='1' * 32)
v3_2.sign(privkeys[1])
with pytest.raises(DoubleVotingError):
ls.add(v3_2)
assert lsh == ls.hash
assert len(ls) == 2
assert v3_2 not in ls
def test_one_vote_lockset():
ls = LockSet(num_eligible_votes=1)
bh = '0' * 32
r, h = 2, 3
v1 = VoteBlock(h, r, bh)
v1.sign(privkeys[0])
ls.add(v1)
assert ls.has_quorum
def test_LockSet_isvalid():
ls = LockSet(num_eligible_votes=len(privkeys))
bh = '0' * 32
r, h = 2, 3
votes = [VoteBlock(h, r, bh) for i in range(len(privkeys))]
for i, v in enumerate(votes):
v.sign(privkeys[i])
ls.add(v)
assert len(ls) == i + 1
if len(ls) < ls.num_eligible_votes * 2 / 3.:
assert not ls.is_valid
else:
assert ls.is_valid
assert ls.has_quorum # same blockhash
ls.check()
def test_LockSet_3_quorums():
ls = LockSet(3)
v = VoteBlock(0, 0, '0' * 32)
v.sign(privkeys[0])
ls.add(v)
v = VoteNil(0, 0)
v.sign(privkeys[1])
ls.add(v)
assert len(ls) == 2
assert not ls.is_valid
v = VoteNil(0, 0)
v.sign(privkeys[2])
ls.add(v)
assert ls.is_valid
assert ls.has_noquorum
assert not ls.has_quorum
assert not ls.has_quorum_possible
assert ls.check()
def test_LockSet_quorums():
combinations = dict(has_quorum=[
[1] * 7,
[1] * 7 + [2] * 3,
[1] * 7 + [None] * 3,
],
has_noquorum=[
[1] * 3 + [2] * 3 + [None],
[None] * 7,
[None] * 10,
range(10),
range(7)
],
has_quorum_possible=[
[1] * 4 + [None] * 3,
[1] * 4 + [2] * 4,
[1] * 4 + [2] * 3 + [3] * 3,
[1] * 6 + [2]
])
r, h = 1, 2
for method, permutations in combinations.items():
for set_ in permutations:
assert len(set_) >= 7
ls = LockSet(len(privkeys))
for i, p in enumerate(set_):
if p is not None:
bh = chr(p) * 32
v = VoteBlock(h, r, bh)
else:
v = VoteNil(h, r)
v.sign(privkeys[i])
ls.add(v)
assert len(ls) >= 7
assert getattr(ls, method)
ls.check()
# check stable sort
bhs = ls.blockhashes()
if len(bhs) > 1:
assert ishash(bhs[0][0])
assert isinstance(bhs[0][1], int)
if bhs[0][1] == bhs[1][1]:
assert bhs[0][0] > bhs[1][0]
else:
assert bhs[0][1] > bhs[1][1]
# test serialization
s = rlp.encode(ls)
d = rlp.decode(s, LockSet)
assert ls == d
assert id(ls) != id(d)
assert getattr(ls, method) == getattr(d, method)
def test_blockproposal():
s = tester.state()
# block 1
s.mine(n=1)
genesis = s.blocks[0]
assert genesis.header.number == 0
blk1 = s.blocks[1]
assert blk1.header.number == 1
gls = genesis_signing_lockset(genesis, privkeys[0])
bp = BlockProposal(height=1, round=0, block=blk1, signing_lockset=gls, round_lockset=None)
assert bp.lockset == gls
assert isinstance(bp, Proposal)
bp.sign(tester.k0)
with pytest.raises(InvalidProposalError): # round >0 needs round_lockset
bp = BlockProposal(height=1, round=1, block=blk1, signing_lockset=gls, round_lockset=None)
bp.validate_votes(validators, validators[:1])
# block 2
s.mine(n=1)
blk2 = s.blocks[2]
assert blk2.header.number == 2
ls = LockSet(len(validators))
for privkey in privkeys:
v = VoteBlock(height=1, round=0, blockhash=blk1.hash)
v.sign(privkey)
ls.add(v)
bp = BlockProposal(height=2, round=0, block=blk2, signing_lockset=ls, round_lockset=None)
assert bp.lockset == ls
with pytest.raises(InvalidProposalError): # signature missing
bp.validate_votes(validators, validators)
with pytest.raises(InvalidProposalError):
bp.sign(privkeys[0]) # privkey doesnt match coinbase
bp.validate_votes(validators, validators)
with pytest.raises(InvalidSignature): # already signed
bp.sign(tester.k0)
bp.v = 0 # reset sigcheck hack
bp.sign(tester.k0)
bp.validate_votes(validators, validators)
with pytest.raises(InvalidProposalError): # round >0 needs round_lockset
bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=gls, round_lockset=None)
# block 2 round 1, timeout in round=0
rls = LockSet(len(validators))
for privkey in privkeys:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls)
assert bp.lockset == rls
bp.sign(tester.k0)
bp.validate_votes(validators, validators)
# serialize
s = rlp.encode(bp)
dbp = rlp.decode(s, BlockProposal)
assert dbp.block == blk2
dbp.validate_votes(validators, validators)
# check quorumpossible lockset failure
rls = LockSet(len(validators))
for i, privkey in enumerate(privkeys):
if i < 4:
v = VoteBlock(height=2, round=0, blockhash='0' * 32)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert not rls.has_noquorum
assert rls.has_quorum_possible
with pytest.raises(InvalidProposalError): # NoQuorum necessary R0
bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls)
def test_VotingInstruction():
rls = LockSet(len(validators))
bh = '1' * 32
for i, privkey in enumerate(privkeys):
if i < 4: # quorum possible
v = VoteBlock(height=2, round=0, blockhash=bh)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert rls.has_quorum_possible
bp = VotingInstruction(height=2, round=1, round_lockset=rls)
bp.sign(privkeys[0])
assert bh == bp.blockhash
# noquorum
rls = LockSet(len(validators))
for i, privkey in enumerate(privkeys):
if i < 3: # noquorum possible
v = VoteBlock(height=2, round=0, blockhash=bh)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert not rls.has_quorum_possible
assert rls.has_noquorum
with pytest.raises(InvalidProposalError): # QuorumPossiblle necessary R0
bp = VotingInstruction(height=2, round=1, round_lockset=rls)
# noquorum
rls = LockSet(len(validators))
for i, privkey in enumerate(privkeys):
if i < 3: # noquorum possible
v = VoteBlock(height=2, round=0, blockhash=bh)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert not rls.has_quorum_possible
assert rls.has_noquorum
with pytest.raises(InvalidProposalError): # QuorumPossiblle necessary R0
bp = VotingInstruction(height=2, round=1, round_lockset=rls)
|
xlsxwriter/test/vml/test_write_div.py | DeltaEpsilon7787/XlsxWriter | 2,766 | 11132812 | <reponame>DeltaEpsilon7787/XlsxWriter
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, <NAME>, <EMAIL>
#
import unittest
from io import StringIO
from ...vml import Vml
class TestWriteDiv(unittest.TestCase):
"""
Test the Vml _write_div() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_div(self):
"""Test the _write_div() method"""
self.vml._write_div('left')
exp = """<div style="text-align:left"></div>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
drip/admin.py | pureblue/django-drip | 261 | 11132817 | <reponame>pureblue/django-drip<gh_stars>100-1000
import base64
import json
from django import forms
from django.contrib import admin
from drip.models import Drip, SentDrip, QuerySetRule
from drip.drips import configured_message_classes, message_class_for
from drip.utils import get_user_model
class QuerySetRuleInline(admin.TabularInline):
model = QuerySetRule
class DripForm(forms.ModelForm):
message_class = forms.ChoiceField(
choices=((k, '%s (%s)' % (k, v)) for k, v in configured_message_classes().items())
)
class Meta:
model = Drip
exclude = []
class DripAdmin(admin.ModelAdmin):
list_display = ('name', 'enabled', 'message_class')
inlines = [
QuerySetRuleInline,
]
form = DripForm
av = lambda self, view: self.admin_site.admin_view(view)
def timeline(self, request, drip_id, into_past, into_future):
"""
Return a list of people who should get emails.
"""
from django.shortcuts import render, get_object_or_404
drip = get_object_or_404(Drip, id=drip_id)
shifted_drips = []
seen_users = set()
for shifted_drip in drip.drip.walk(into_past=int(into_past), into_future=int(into_future)+1):
shifted_drip.prune()
shifted_drips.append({
'drip': shifted_drip,
'qs': shifted_drip.get_queryset().exclude(id__in=seen_users)
})
seen_users.update(shifted_drip.get_queryset().values_list('id', flat=True))
return render(request, 'drip/timeline.html', locals())
def view_drip_email(self, request, drip_id, into_past, into_future, user_id):
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
drip = get_object_or_404(Drip, id=drip_id)
User = get_user_model()
user = get_object_or_404(User, id=user_id)
drip_message = message_class_for(drip.message_class)(drip.drip, user)
html = ''
mime = ''
if drip_message.message.alternatives:
for body, mime in drip_message.message.alternatives:
if mime == 'text/html':
html = body
mime = 'text/html'
else:
html = drip_message.message.body
mime = 'text/plain'
return HttpResponse(html, content_type=mime)
def build_extra_context(self, extra_context):
from drip.utils import get_simple_fields
extra_context = extra_context or {}
User = get_user_model()
extra_context['field_data'] = json.dumps(get_simple_fields(User))
return extra_context
def add_view(self, request, extra_context=None):
return super(DripAdmin, self).add_view(
request, extra_context=self.build_extra_context(extra_context))
def change_view(self, request, object_id, extra_context=None):
return super(DripAdmin, self).change_view(
request, object_id, extra_context=self.build_extra_context(extra_context))
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(DripAdmin, self).get_urls()
my_urls = patterns('',
url(
r'^(?P<drip_id>[\d]+)/timeline/(?P<into_past>[\d]+)/(?P<into_future>[\d]+)/$',
self.av(self.timeline),
name='drip_timeline'
),
url(
r'^(?P<drip_id>[\d]+)/timeline/(?P<into_past>[\d]+)/(?P<into_future>[\d]+)/(?P<user_id>[\d]+)/$',
self.av(self.view_drip_email),
name='view_drip_email'
)
)
return my_urls + urls
admin.site.register(Drip, DripAdmin)
class SentDripAdmin(admin.ModelAdmin):
list_display = [f.name for f in SentDrip._meta.fields]
ordering = ['-id']
admin.site.register(SentDrip, SentDripAdmin)
|
tests/feature/test_multiline.py | epkanol/pytest-bdd | 944 | 11132833 | <filename>tests/feature/test_multiline.py
"""Multiline steps tests."""
import textwrap
import pytest
@pytest.mark.parametrize(
["feature_text", "expected_text"],
[
(
textwrap.dedent(
'''\
Feature: Multiline
Scenario: Multiline step using sub indentation
Given I have a step with:
"""
Some
Extra
Lines
"""
Then the text should be parsed with correct indentation
'''
),
"Some\n\nExtra\nLines",
),
(
textwrap.dedent(
"""\
Feature: Multiline
Scenario: Multiline step using sub indentation
Given I have a step with:
Some
Extra
Lines
Then the text should be parsed with correct indentation
"""
),
"Some\n\nExtra\nLines",
),
(
textwrap.dedent(
"""\
Feature: Multiline
Scenario: Multiline step using sub indentation
Given I have a step with:
Some
Extra
Lines
Then the text should be parsed with correct indentation
"""
),
" Some\n\n Extra\nLines",
),
(
textwrap.dedent(
"""\
Feature: Multiline
Scenario: Multiline step using sub indentation
Given I have a step with:
Some
Extra
Lines
"""
),
"Some\nExtra\nLines",
),
],
)
def test_multiline(testdir, feature_text, expected_text):
testdir.makefile(".feature", multiline=feature_text)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import parsers, given, then, scenario
expected_text = '''{expected_text}'''
@scenario("multiline.feature", "Multiline step using sub indentation")
def test_multiline(request):
assert request.getfixturevalue("text") == expected_text
@given(parsers.parse("I have a step with:\\n{{text}}"), target_fixture="i_have_text")
def i_have_text(text):
return text
@then("the text should be parsed with correct indentation")
def text_should_be_correct(i_have_text, text):
assert i_have_text == text == expected_text
""".format(
expected_text=expected_text.encode("unicode_escape").decode("utf-8"),
)
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_multiline_wrong_indent(testdir):
"""Multiline step using sub indentation wrong indent."""
testdir.makefile(
".feature",
multiline=textwrap.dedent(
"""\
Feature: Multiline
Scenario: Multiline step using sub indentation wrong indent
Given I have a step with:
Some
Extra
Lines
Then the text should be parsed with correct indentation
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import parsers, given, then, scenario
@scenario("multiline.feature", "Multiline step using sub indentation wrong indent")
def test_multiline(request):
pass
@given(parsers.parse("I have a step with:\\n{{text}}"))
def i_have_text(text):
return text
@then("the text should be parsed with correct indentation")
def text_should_be_correct(i_have_text, text):
assert i_have_text == text == expected_text
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.fnmatch_lines("*StepDefinitionNotFoundError: Step definition is not found:*")
|
tests/seleniumwire/test_backend.py | wkeeling/selenium-wire | 975 | 11132861 | <filename>tests/seleniumwire/test_backend.py
import json
import os
import ssl
import urllib.error
import urllib.request
from unittest import TestCase
from urllib.parse import parse_qs, urlsplit
from seleniumwire import backend
from tests import utils as testutils
class BackendIntegrationTest(TestCase):
"""This integration test uses a single instance of the backend proxy
server for the whole test suite. This makes it quicker, since the server
isn't restarted between tests, but it also means that the proxy configuration
can't be modified once the server has been started.
"""
backend = None
httpbin = None
def test_create_proxy(self):
html = self.make_request(f'{self.httpbin}/html')
self.assertIn(b'<NAME>', html)
def test_shutdown(self):
self.backend.shutdown()
with self.assertRaises(OSError):
self.make_request(f'{self.httpbin}/html')
def test_get_requests_single(self):
self.make_request(f'{self.httpbin}/html')
requests = self.backend.storage.load_requests()
self.assertEqual(len(requests), 1)
request = requests[0]
self.assertEqual('GET', request.method)
self.assertEqual(f'{self.httpbin}/html', request.url)
self.assertEqual('identity', request.headers['Accept-Encoding'])
self.assertEqual(200, request.response.status_code)
self.assertEqual('text/html; charset=utf-8', request.response.headers['Content-Type'])
self.assertTrue(len(request.response.body) > 0)
self.assertIn(b'html', request.response.body)
def test_get_requests_multiple(self):
self.make_request(f'{self.httpbin}/html')
self.make_request(f'{self.httpbin}/anything')
requests = self.backend.storage.load_requests()
self.assertEqual(2, len(requests))
def test_get_last_request(self):
self.make_request(f'{self.httpbin}/html')
self.make_request(f'{self.httpbin}/anything')
last_request = self.backend.storage.load_last_request()
self.assertEqual(f'{self.httpbin}/anything', last_request.url)
def test_get_last_request_none(self):
last_request = self.backend.storage.load_last_request()
self.assertIsNone(last_request)
def test_clear_requests(self):
self.make_request(f'{self.httpbin}/html')
self.make_request(f'{self.httpbin}/anything')
self.backend.storage.clear_requests()
self.assertEqual([], self.backend.storage.load_requests())
def test_find(self):
self.make_request(f'{self.httpbin}/anything/questions/tagged/django?page=2&sort=newest&pagesize=15')
self.make_request(f'{self.httpbin}/anything/3.4/library/http.client.html')
self.assertEqual(
f'{self.httpbin}/anything/questions/tagged/django?page=2&sort=newest&pagesize=15',
self.backend.storage.find('/questions/tagged/django').url,
)
self.assertEqual(
f'{self.httpbin}/anything/3.4/library/http.client.html', self.backend.storage.find('.*library.*').url
)
def test_get_request_body_empty(self):
self.make_request(f'{self.httpbin}/get')
last_request = self.backend.storage.load_last_request()
self.assertEqual(b'', last_request.body)
def test_get_response_body_json(self):
self.make_request(f'{self.httpbin}/get') # httpbin endpoints return JSON
last_request = self.backend.storage.load_last_request()
self.assertIsInstance(last_request.response.body, bytes)
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertEqual(f'{self.httpbin}/get', data['url'])
def test_get_response_body_image(self):
self.make_request(f'{self.httpbin}/image/png')
last_request = self.backend.storage.load_last_request()
self.assertIsInstance(last_request.response.body, bytes)
def test_get_response_body_empty(self):
self.make_request(f'{self.httpbin}/bytes/0')
redirect_request = self.backend.storage.load_requests()[0]
self.assertEqual(b'', redirect_request.response.body)
def test_set_header_overrides(self):
user_agent = 'Test_User_Agent_String'
self.backend.modifier.headers = {'User-Agent': user_agent}
self.make_request(f'{self.httpbin}/headers')
last_request = self.backend.storage.load_last_request()
self.assertEqual(user_agent, last_request.headers['User-Agent'])
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertEqual(user_agent, data['headers']['User-Agent'])
def test_set_header_overrides_case_insensitive(self):
user_agent = 'Test_User_Agent_String'
self.backend.modifier.headers = {'user-agent': user_agent} # Lowercase header name
self.make_request(f'{self.httpbin}/headers')
last_request = self.backend.storage.load_last_request()
self.assertEqual(user_agent, last_request.headers['User-Agent'])
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertEqual(user_agent, data['headers']['User-Agent'])
def test_set_header_overrides_filters_out_header(self):
self.backend.modifier.headers = {'User-Agent': None}
self.make_request(f'{self.httpbin}/headers')
last_request = self.backend.storage.load_last_request()
self.assertNotIn('User-Agent', last_request.headers)
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertNotIn('User-Agent', data['headers'])
def test_clear_header_overrides(self):
user_agent = 'Test_User_Agent_String'
self.backend.modifier.headers = {'User-Agent': user_agent}
del self.backend.modifier.headers
self.make_request(f'{self.httpbin}/headers')
last_request = self.backend.storage.load_last_request()
self.assertNotEqual(user_agent, last_request.headers['User-Agent'])
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertNotEqual(user_agent, data['headers']['User-Agent'])
def test_set_param_overrides(self):
self.backend.modifier.params = {'foo': 'baz'}
self.make_request(f'{self.httpbin}/get?foo=bar&spam=eggs')
last_request = self.backend.storage.load_last_request()
params = {k: v[0] for k, v in parse_qs(urlsplit(last_request.url).query).items()}
self.assertEqual({'foo': 'baz', 'spam': 'eggs'}, params)
def test_set_param_overrides_post(self):
self.backend.modifier.params = {'foo': 'baz'}
self.make_request(f'{self.httpbin}/post', method='POST', data=b'foo=bazzz&spam=eggs')
last_request = self.backend.storage.load_last_request()
qs = parse_qs(last_request.body.decode('utf-8'))
self.assertEqual(2, len(qs))
self.assertEqual('baz', qs['foo'][0])
self.assertEqual('eggs', qs['spam'][0])
def test_set_param_overrides_filters_out_param(self):
self.backend.modifier.params = {'foo': None}
self.make_request(f'{self.httpbin}/get?foo=bar&spam=eggs')
last_request = self.backend.storage.load_last_request()
query = urlsplit(last_request.url).query
self.assertEqual('spam=eggs', query)
def test_clear_param_overrides(self):
self.backend.modifier.params = {'foo': 'baz'}
del self.backend.modifier.params
self.make_request(f'{self.httpbin}/get')
last_request = self.backend.storage.load_last_request()
query = urlsplit(last_request.url).query
self.assertEqual('', query)
def test_set_querystring_overrides(self):
self.backend.modifier.querystring = 'foo=baz'
self.make_request(f'{self.httpbin}/get?foo=bar&spam=eggs')
last_request = self.backend.storage.load_last_request()
query = urlsplit(last_request.url)[3]
self.assertEqual('foo=baz', query)
def test_set_querystring_overrides_filters(self):
self.backend.modifier.querystring = '' # Empty string to filter a querystring (not None)
self.make_request(f'{self.httpbin}/get?foo=bar&spam=eggs')
last_request = self.backend.storage.load_last_request()
query = urlsplit(last_request.url)[3]
self.assertEqual('', query)
def test_clear_querystring_overrides(self):
self.backend.modifier.querystring = 'foo=baz'
del self.backend.modifier.querystring
self.make_request(f'{self.httpbin}/get?foo=bar')
last_request = self.backend.storage.load_last_request()
query = urlsplit(last_request.url)[3]
self.assertEqual('foo=bar', query)
def test_set_rewrite_rules(self):
self.backend.modifier.rewrite_rules = [
(f'{self.httpbin}/anything/foo/(.*)', rf'{self.httpbin}/anything/bar/\1'),
]
self.make_request(f'{self.httpbin}/anything/foo/x/y')
last_request = self.backend.storage.load_last_request()
self.assertEqual(f'{self.httpbin}/anything/bar/x/y', last_request.url)
def test_clear_rewrite_rules(self):
self.backend.modifier.rewrite_rules = [
(f'{self.httpbin}/anything/foo/(.*)', rf'{self.httpbin}/anything/bar/\1'),
]
del self.backend.modifier.rewrite_rules
self.make_request(f'{self.httpbin}/anything/foo/x/y')
last_request = self.backend.storage.load_last_request()
self.assertEqual(f'{self.httpbin}/anything/foo/x/y', last_request.url)
def test_set_single_scopes(self):
self.backend.scopes = [f'{self.httpbin}/anything/foo/.*']
self.make_request(f'{self.httpbin}/anything/foo/bar')
last_request = self.backend.storage.load_last_request()
self.assertEqual(f'{self.httpbin}/anything/foo/bar', last_request.url)
self.make_request(f'{self.httpbin}/anything/spam/bar')
last_request = self.backend.storage.load_last_request()
self.assertNotEqual(f'{self.httpbin}/anything/spam/bar', last_request.url)
def test_set_multiples_scopes(self):
self.backend.scopes = (f'{self.httpbin}/anything/foo/.*', f'{self.httpbin}/anything/spam/.*')
self.make_request(f'{self.httpbin}/anything/foo/bar')
last_request = self.backend.storage.load_last_request()
self.assertEqual(f'{self.httpbin}/anything/foo/bar', last_request.url)
self.make_request(f'{self.httpbin}/anything/spam/bar')
last_request = self.backend.storage.load_last_request()
self.assertEqual(f'{self.httpbin}/anything/spam/bar', last_request.url)
self.make_request(f'{self.httpbin}/anything/hello/bar')
last_request = self.backend.storage.load_last_request()
self.assertNotEqual(f'{self.httpbin}/anything/hello/bar', last_request.url)
def test_reset_scopes(self):
self.backend.scopes = (f'{self.httpbin}/anything/foo/.*', f'{self.httpbin}/anything/spam/.*')
self.backend.scopes = ()
self.make_request(f'{self.httpbin}/anything/hello/bar')
self.assertTrue(self.backend.storage.load_last_request())
def test_disable_encoding(self):
self.backend.options['disable_encoding'] = True
# Explicitly set the accept-encoding to gzip
self.backend.modifier.headers = {'Accept-Encoding': 'gzip'}
self.make_request(f'{self.httpbin}/anything')
last_request = self.backend.storage.load_last_request()
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertEqual('identity', data['headers']['Accept-Encoding'])
def test_intercept_request_headers(self):
user_agent = 'Test_User_Agent_String'
def interceptor(request):
del request.headers['User-Agent']
request.headers['User-Agent'] = user_agent
self.backend.request_interceptor = interceptor
self.make_request(f'{self.httpbin}/headers')
last_request = self.backend.storage.load_last_request()
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertEqual(user_agent, last_request.headers['User-Agent'])
self.assertEqual(user_agent, data['headers']['User-Agent'])
def test_intercept_request_params(self):
def interceptor(request):
# Update the existing parameters
request.params = {**request.params, 'foo': 'baz', 'a': 'b'}
self.backend.request_interceptor = interceptor
self.make_request(f'{self.httpbin}/get?foo=bar&spam=eggs')
last_request = self.backend.storage.load_last_request()
self.assertEqual({'foo': 'baz', 'spam': 'eggs', 'a': 'b'}, last_request.params)
data = json.loads(last_request.response.body.decode('utf-8'))
self.assertEqual({'foo': 'baz', 'spam': 'eggs', 'a': 'b'}, data['args'])
def test_intercept_request_body(self):
def interceptor(request):
data = json.loads(request.body.decode('utf-8'))
data.update({'foo': 'baz', 'a': 'b'})
request.body = json.dumps(data).encode('utf-8')
self.backend.request_interceptor = interceptor
self.make_request(f'{self.httpbin}/post', method='POST', data=b'{"foo": "bar", "spam": "eggs"}')
last_request = self.backend.storage.load_last_request()
self.assertEqual({'foo': 'baz', 'spam': 'eggs', 'a': 'b'}, json.loads(last_request.body.decode('utf-8')))
def test_intercept_response_headers(self):
def interceptor(request, response):
del response.headers['Cache-Control']
response.headers['Cache-Control'] = 'none'
self.backend.response_interceptor = interceptor
self.make_request(f'{self.httpbin}/anything')
last_request = self.backend.storage.load_last_request()
self.assertEqual('none', last_request.response.headers['Cache-Control'])
def test_intercept_response_body(self):
def interceptor(request, response):
response.body = b'helloworld'
del response.headers['Content-Length']
response.headers['Content-Length'] = '10'
self.backend.response_interceptor = interceptor
self.make_request(f'{self.httpbin}/anything')
last_request = self.backend.storage.load_last_request()
self.assertEqual(b'helloworld', last_request.response.body)
@classmethod
def setUpClass(cls):
cls.backend = backend.create()
cls.configure_proxy(*cls.backend.address()[:2])
cls.httpbin = testutils.Httpbin() if os.name != 'nt' else 'https://httpbin.org'
@classmethod
def tearDownClass(cls):
cls.backend.shutdown()
if os.name != 'nt':
cls.httpbin.shutdown()
def tearDown(self):
del self.backend.modifier.headers
del self.backend.modifier.params
del self.backend.modifier.querystring
del self.backend.modifier.rewrite_rules
self.backend.scopes = []
self.backend.request_interceptor = None
self.backend.response_interceptor = None
self.backend.storage.clear_requests()
self.backend.options['disable_encoding'] = False
@classmethod
def configure_proxy(cls, host, port):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
proxy_handler = urllib.request.ProxyHandler(
{
'http': 'http://{}:{}'.format(host, port),
'https': 'http://{}:{}'.format(host, port),
}
)
opener = urllib.request.build_opener(https_handler, proxy_handler)
urllib.request.install_opener(opener)
def make_request(self, url, method='GET', data=None):
request = urllib.request.Request(url, method=method, data=data)
request.add_header(
'User-Agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
)
with urllib.request.urlopen(request, timeout=5) as response:
html = response.read()
return html
|
tests/layer_tests/tensorflow_tests/test_tf_Floor.py | monroid/openvino | 2,406 | 11132869 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from unit_tests.utils.graph import build_graph
class TestFloor(CommonTFLayerTest):
def create_floor_net(self, shape, ir_version):
"""
Tensorflow net IR net
Input->Floor => Input->Floor
"""
#
# Create Tensorflow model
#
import tensorflow as tf
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
shapes = shape.copy()
# reshaping
if len(shapes) >= 3:
shapes.append(shapes.pop(1))
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
tf.floor(input, name='Operation')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'Floor': {'kind': 'op', 'type': 'Floor'},
'Floor_data': {'shape': shape, 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}
}
ref_net = build_graph(nodes_attributes,
[('input', 'input_data'),
('input_data', 'Floor'),
('Floor', 'Floor_data'),
('Floor_data', 'result')
])
return tf_net, ref_net
test_data_precommit = [dict(shape=[3, 2, 3, 7, 6])]
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_floor_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_floor_net(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
test_data = [dict(shape=[1]),
dict(shape=[2, 5]),
dict(shape=[5, 3, 7, 4]),
dict(shape=[3, 2, 3, 7, 6])]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_floor(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_floor_net(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
|
biostar/recipes/test/test_navigation.py | tangibleai/biostar-central | 477 | 11132900 | <gh_stars>100-1000
import logging, os
from django.test import TestCase, override_settings
from django.test import Client
from biostar.recipes import auth
from biostar.recipes import models
from django.conf import settings
from biostar.utils.helpers import get_uuid
from django.urls import reverse
from django.test import override_settings
logger = logging.getLogger('engine')
TEST_ROOT = os.path.abspath(os.path.join(settings.BASE_DIR, 'export', 'tested'))
TOC_ROOT = os.path.join(TEST_ROOT, 'toc')
__CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
# Ensure that the table of directory exists.
os.makedirs(TOC_ROOT, exist_ok=True)
@override_settings(MEDIA_ROOT=TEST_ROOT, TOC_ROOT=TOC_ROOT)
class SiteNavigation(TestCase):
def setUp(self):
logger.setLevel(logging.WARNING)
self.username = f"tested{get_uuid(10)}"
self.owner = models.User.objects.create(username=self.username, is_staff=True,
email="<EMAIL>")
self.owner.set_password("<PASSWORD>")
self.owner.save()
self.project = auth.create_project(user=self.owner, name="Test project",
privacy=models.Project.PUBLIC, uid="tested")
data = auth.create_data(project=self.project, path=__file__)
self.analysis = auth.create_analysis(project=self.project, json_text='', template="# Add code here.")
self.job = auth.create_job(analysis=self.analysis)
self.proj_params = dict(uid=self.project.uid)
self.analysis_params = dict(uid=self.analysis.uid)
self.recipes_id_param = dict(id=self.analysis.id)
self.data_params = dict(uid=data.uid)
self.job_params = dict(uid=self.job.uid)
self.data_file_params = dict(uid=data.uid, path="foo.txt")
self.job_file_params = dict(uid=self.job.uid, path="foo.txt")
def visit_urls(self, urls, codes, change_label=True, anon_urls=[]):
c = Client()
# Used to test norname urls
c.login(username=self.username, email='<EMAIL>', password='<PASSWORD>')
# Used to test with anon users
anon_c = Client()
def visit(pages, client):
for url in pages:
print(url)
resp = client.get(url, data={"q": "tested"})
code = resp.status_code
if code not in codes:
# We already know it is an error.
# Use this to prints the url and the code.
logger.error(f"")
logger.error(f"Error accessing: {url}, code={code} not in expected values {codes}")
self.assertTrue(code in codes)
visit(pages=urls, client=c)
visit(pages=anon_urls, client=anon_c)
def test_public_pages(self):
"Checking public pages"
api_urls = [
reverse('api_list'),
#reverse('recipe_api_json', kwargs=self.analysis_params),
#reverse('recipe_api_template', kwargs=self.analysis_params)
]
anon_urls = [
reverse("index"),
reverse('project_list'),
reverse('project_view', kwargs=self.proj_params),
]
urls = [
reverse('index'),
reverse('logout'),
reverse('login'),
reverse('search'),
reverse('project_list'),
reverse('latest_recipes'),
reverse('get_part', kwargs=dict(name='info', id=self.analysis.id)),
reverse('data_list', kwargs=self.proj_params),
reverse('data_view', kwargs=self.data_params),
reverse('data_upload', kwargs=self.proj_params),
reverse('data_edit', kwargs=self.data_params),
reverse('project_view', kwargs=self.proj_params),
reverse('project_users', kwargs=self.proj_params),
reverse('project_info', kwargs=self.proj_params),
reverse('project_edit', kwargs=self.proj_params),
reverse('recipe_list', kwargs=self.proj_params),
reverse('recipe_view', kwargs=self.analysis_params),
reverse('recipe_view', kwargs=self.analysis_params),
reverse('job_list', kwargs=self.proj_params),
reverse('job_view', kwargs=self.job_params),
#reverse('job_edit', kwargs=self.job_params),
]
self.visit_urls(urls=urls, codes=[200])
#self.visit_urls(urls=api_urls, codes=[200])
self.visit_urls(anon_urls=anon_urls, urls=[], codes=[200])
self.visit_urls(anon_urls=anon_urls, urls=[], codes=[200])
def test_page_redirect(self):
"Testing that a redirect occurs for some pages"
# Test cases to handle anonymous users .
anon_urls = [
reverse("job_delete", kwargs=self.job_params),
reverse("project_delete", kwargs=self.proj_params),
reverse("project_users", kwargs=self.proj_params),
reverse("project_edit", kwargs=self.proj_params),
reverse("data_edit", kwargs=self.data_params),
reverse("data_upload", kwargs=self.proj_params),
reverse("job_rerun", kwargs=self.job_params),
reverse("job_rerun", kwargs=self.job_params),
reverse("job_edit", kwargs=self.job_params),
reverse("recipe_delete", kwargs=self.analysis_params),
reverse("job_delete", kwargs=self.job_params),
reverse("data_delete", kwargs=self.data_params),
reverse("recipe_run", kwargs=self.analysis_params),
]
urls = [
reverse('signup'),
reverse("recycle_bin"),
reverse('index'),
reverse('project_list'),
reverse('logout'),
reverse('login'),
reverse('recipe_create', kwargs=self.proj_params),
reverse('project_create'),
reverse("recipe_run", kwargs=self.analysis_params),
]
self.visit_urls(urls, [302, 200])
self.visit_urls(anon_urls=anon_urls, codes=[302], urls=[])
|
pycwr/configure/location_config.py | 1271756664/study | 144 | 11132928 | # -*- coding: utf-8 -*-
import cartopy.io.shapereader as shpreader
import pandas as pd
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
Radar_info_Path = os.path.join(ROOT_DIR, "data", "radar_info.json")
CN_shp_path = os.path.join(ROOT_DIR, "data", "CHN_pronvices.shp")
last_open_dir = os.path.join(ROOT_DIR, "data", "default_opendir.json")
mbf_path = os.path.join(ROOT_DIR, "data", "beta_function_parameters.nc")
radar_info = pd.read_json(Radar_info_Path)
CN_shp_info = shpreader.Reader(CN_shp_path)
|
crawlers/python/python_crawler.py | bhanuteja2001/conrad | 244 | 11132931 | # -*- coding: utf-8 -*-
import os
import json
import datetime as dt
from bs4 import BeautifulSoup
from googleapiclient.discovery import build
from google.oauth2.service_account import Credentials
from ..base import BaseCrawler
class PythonCrawler(BaseCrawler):
def get_events(self):
credentials = Credentials.from_service_account_file(
"google_service_account_credentials.json",
scopes=[
"https://www.googleapis.com/auth/calendar",
"https://www.googleapis.com/auth/calendar.readonly",
],
)
service = build("calendar", "v3", credentials=credentials)
start_time = dt.datetime.now().strftime("%Y-%m-%dT00:00:00.000Z")
end_time = None
events = []
try:
page_token = None
while True:
event_page = (
service.events()
.list(
singleEvents="False",
orderBy="startTime",
calendarId="<EMAIL>",
pageToken=page_token,
timeMin=start_time,
timeMax=end_time,
)
.execute()
)
events.extend([event for event in event_page["items"]])
page_token = event_page.get("nextPageToken")
if not page_token:
break
except AccessTokenRefreshError:
print(
"The credentials have been revoked or expired, please re-run"
" the application to re-authorize."
)
for event in events:
if any(
[word in event["summary"].lower() for word in ["cancel", "postpone"]]
):
continue
try:
soup = BeautifulSoup(event["description"], "html.parser")
event_url = soup.find_all("a", href=True)[0]["href"]
except (KeyError, IndexError):
event_url = None
if "date" in event["start"]:
start_date = event["start"]["date"]
end_date = event["end"]["date"]
elif "dateTime" in event["start"]:
start_date = event["start"]["dateTime"].split("T")[0]
end_date = event["end"]["dateTime"].split("T")[0]
else:
raise ValueError("Event date not found!")
e = {
"name": event["summary"],
"url": event_url,
"city": None,
"state": None,
"country": None,
"location": event.get("location"),
"cfp_open": False,
"cfp_end_date": "1970-01-01",
"start_date": start_date,
"end_date": end_date,
"source": "https://wiki.python.org/moin/PythonEventsCalendar",
"tags": ["python"],
"kind": "conference",
"by": "bot",
}
self.events.append(e)
|
tests/test_cli_config.py | st--/jupytext | 5,378 | 11132932 | <reponame>st--/jupytext
import nbformat
import pytest
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext.cli import jupytext
from jupytext.compare import compare
from jupytext.header import header_to_metadata_and_cell
from jupytext.jupytext import read, write
def test_pairing_through_config_leaves_ipynb_unmodified(tmpdir):
cfg_file = tmpdir.join(".jupytext.yml")
nb_file = tmpdir.join("notebook.ipynb")
py_file = tmpdir.join("notebook.py")
cfg_file.write("formats: 'ipynb,py'\n")
nbformat.write(new_notebook(), str(nb_file))
jupytext([str(nb_file), "--sync"])
assert nb_file.isfile()
assert py_file.isfile()
nb = nbformat.read(nb_file, as_version=4)
assert "jupytext" not in nb.metadata
def test_formats(tmpdir):
tmpdir.join(".jupytext").write(
'''# Default pairing
formats = "ipynb,py"'''
)
test = tmpdir.join("test.py")
test.write("1 + 1\n")
jupytext([str(test), "--sync"])
assert tmpdir.join("test.ipynb").isfile()
def test_formats_with_suffix(tmpdir):
tmpdir.join(".jupytext").write('formats = "ipynb,.nb.py"')
test = tmpdir.join("test.py")
test.write("1 + 1\n")
test_nb = tmpdir.join("test.nb.py")
test_nb.write("1 + 1\n")
jupytext([str(test), "--sync"])
assert not tmpdir.join("test.ipynb").isfile()
jupytext([str(test_nb), "--sync"])
assert tmpdir.join("test.ipynb").isfile()
def test_formats_does_not_apply_to_config_file(tmpdir):
config = tmpdir.join(".jupytext.py")
config.write('c.formats = "ipynb,py"')
test = tmpdir.join("test.py")
test.write("1 + 1\n")
jupytext([str(test), str(config), "--sync"])
assert tmpdir.join("test.ipynb").isfile()
assert not tmpdir.join(".jupytext.ipynb").isfile()
def test_preferred_jupytext_formats_save(tmpdir):
tmpdir.join(".jupytext.yml").write("preferred_jupytext_formats_save: jl:percent")
tmp_ipynb = tmpdir.join("notebook.ipynb")
tmp_jl = tmpdir.join("notebook.jl")
nb = new_notebook(
cells=[new_code_cell("1 + 1")], metadata={"jupytext": {"formats": "ipynb,jl"}}
)
write(nb, str(tmp_ipynb))
jupytext([str(tmp_ipynb), "--sync"])
with open(str(tmp_jl)) as stream:
text_jl = stream.read()
# Parse the YAML header
metadata, _, _, _ = header_to_metadata_and_cell(text_jl.splitlines(), "#")
assert metadata["jupytext"]["formats"] == "ipynb,jl:percent"
@pytest.mark.parametrize(
"config",
[
# Way 1: preferred_jupytext_formats_save + formats
"""preferred_jupytext_formats_save: "python//py:percent"
formats: "ipynb,python//py"
""",
# Way 2: formats
"formats: ipynb,python//py:percent",
],
)
def test_save_using_preferred_and_default_format(config, tmpdir):
tmpdir.join(".jupytext.yml").write(config)
tmp_ipynb = tmpdir.join("notebook.ipynb")
tmp_py = tmpdir.join("python").join("notebook.py")
nb = new_notebook(cells=[new_code_cell("1 + 1")])
write(nb, str(tmp_ipynb))
jupytext([str(tmp_ipynb), "--sync"])
# read py file
nb_py = read(str(tmp_py))
assert nb_py.metadata["jupytext"]["text_representation"]["format_name"] == "percent"
def test_hide_notebook_metadata(tmpdir, no_jupytext_version_number):
tmpdir.join(".jupytext").write("hide_notebook_metadata = true")
tmp_ipynb = tmpdir.join("notebook.ipynb")
tmp_md = tmpdir.join("notebook.md")
nb = new_notebook(
cells=[new_code_cell("1 + 1")], metadata={"jupytext": {"formats": "ipynb,md"}}
)
write(nb, str(tmp_ipynb))
jupytext([str(tmp_ipynb), "--sync"])
with open(str(tmp_md)) as stream:
text_md = stream.read()
compare(
text_md,
"""<!--
---
jupyter:
jupytext:
formats: ipynb,md
hide_notebook_metadata: true
---
-->
```python
1 + 1
```
""",
)
def test_cli_config_on_windows_issue_629(tmpdir):
cfg_file = tmpdir.join("jupytext.yml")
cfg_file.write(
"""formats: "notebooks///ipynb,scripts///py:percent"
notebook_metadata_filter: "jupytext"
"""
)
tmpdir.mkdir("scripts").join("test.py").write("# %%\n 1+1\n")
jupytext(["--sync", str(tmpdir.join("scripts").join("*.py"))])
assert tmpdir.join("notebooks").join("test.ipynb").exists()
def test_sync_config_does_not_create_formats_metadata(
tmpdir, cwd_tmpdir, python_notebook
):
tmpdir.join("jupytext.yml").write(
"""formats: "ipynb,py:percent"
"""
)
write(python_notebook, "test.ipynb")
jupytext(["--sync", "test.ipynb"])
nb = read("test.py")
assert "formats" not in nb.metadata["jupytext"]
def test_multiple_formats_771(tmpdir, cwd_tmpdir, python_notebook):
tmpdir.join("jupytext.toml").write(
"""formats = "notebooks///ipynb,notebooks///py,scripts///py:percent"
"""
)
notebooks_dir = tmpdir.mkdir("notebooks")
scripts_dir = tmpdir.join("scripts")
write(python_notebook, str(notebooks_dir.join("notebook.ipynb")))
jupytext(["--sync", "notebooks/notebook.ipynb"])
assert notebooks_dir.join("notebook.py").isfile()
assert scripts_dir.join("notebook.py").isfile()
notebooks_dir.join("module.py").write("1 + 1\n")
jupytext(["--sync", "notebooks/module.py"])
assert notebooks_dir.join("module.ipynb").isfile()
assert scripts_dir.join("module.py").isfile()
|
bindings/python/performance.py | tbrekalo/edlib | 396 | 11132938 | <reponame>tbrekalo/edlib
#!/usr/bin/env python
import timeit
import edlib
import editdistance
import Levenshtein
with open('../../test_data/Enterobacteria_Phage_1/mutated_90_perc_oneline.fasta', 'r') as f:
queryFull = f.readline()
print('Read query: ', len(queryFull) ,' characters.')
with open('../../test_data/Enterobacteria_Phage_1/Enterobacteria_phage_1_oneline.fa', 'r') as f:
targetFull = f.readline()
print('Read target: ', len(targetFull) ,' characters.')
for seqLen in [30, 100, 1000, 10000, 50000]:
query = queryFull[:seqLen]
target = targetFull[:seqLen]
numRuns = max(1000000000 // (seqLen**2), 1)
print('Sequence length: ', seqLen)
edlibTime = timeit.timeit(stmt="edlib.align(query, target)",
number=numRuns, globals=globals()) / numRuns
print('Edlib: ', edlibTime)
print(edlib.align(query, target))
editdistanceTime = timeit.timeit(stmt="editdistance.eval(query, target)",
number=numRuns, globals=globals()) / numRuns
print('editdistance: ', editdistanceTime)
levenshteinTime = timeit.timeit(stmt="Levenshtein.distance(query, target)",
number=numRuns, globals=globals()) / numRuns
print('levenshtein: ', levenshteinTime)
print('edlib is %f times faster than editdistance.' % (editdistanceTime / edlibTime))
print('edlib is %f times faster than Levenshtein.' % (levenshteinTime / edlibTime))
|
up/tasks/det/models/utils/bbox_helper.py | ModelTC/EOD | 196 | 11132941 | # Standard Library
from up.utils.general.fp16_helper import to_float32
import time
# Import from third library
import numpy as np
import torch
from up.utils.general.global_flag import ALIGNED_FLAG
from up.extensions import gpu_iou_overlap
GPU_MEMORY = None
def allow_empty_tensor(num=1, empty_shape=(0, 4)):
"""Return an empty tensor directly if any of first `num` argument is empty"""
def decorate(func):
def wrapper(*args, **kwargs):
for arg in args[:num]:
if torch.is_tensor(arg) and arg.numel() == 0:
return arg.new_zeros(empty_shape)
return func(*args, **kwargs)
return wrapper
return decorate
def filter_by_size(boxes, min_size, start_index=0):
if boxes.shape[0] == 0:
return boxes, []
s = start_index
w = boxes[:, s + 2] - boxes[:, s + 0] + ALIGNED_FLAG.offset
h = boxes[:, s + 3] - boxes[:, s + 1] + ALIGNED_FLAG.offset
filter_inds = (w > min_size) & (h > min_size)
return boxes[filter_inds], filter_inds
@allow_empty_tensor(2)
@to_float32
def bbox_iou_overlaps(b1, b2, aligned=False):
if not b1.is_cuda or aligned:
return vanilla_bbox_iou_overlaps(b1, b2, aligned)
global GPU_MEMORY
gbytes = 1024.0**3
if GPU_MEMORY is None:
GPU_MEMORY = torch.cuda.get_device_properties(b1.device.index).total_memory
alloated_memory = torch.cuda.memory_allocated()
spare_memory = 0.5 * gbytes
available_memory = GPU_MEMORY - alloated_memory - spare_memory
size = b1.shape[0] * b2.shape[0]
needed_memory = 2 * size * 4
if needed_memory < available_memory:
ious = gpu_iou_overlap(b1, b2, mode='IoU')
else:
ious = vanilla_bbox_iou_overlaps(b1.cpu(), b2.cpu())
res_memory = size * 4
if res_memory < available_memory:
ious = ious.to(b1.device)
return ious
@allow_empty_tensor(2)
@to_float32
def vanilla_bbox_iou_overlaps(b1, b2, aligned=False, return_union=False):
"""
Arguments:
b1: dts, [n, >=4] (x1, y1, x2, y2, ...)
b1: gts, [n, >=4] (x1, y1, x2, y2, ...)
Returns:
intersection-over-union pair-wise.
"""
area1 = (b1[:, 2] - b1[:, 0] + ALIGNED_FLAG.offset) * (b1[:, 3] - b1[:, 1] + ALIGNED_FLAG.offset)
area2 = (b2[:, 2] - b2[:, 0] + ALIGNED_FLAG.offset) * (b2[:, 3] - b2[:, 1] + ALIGNED_FLAG.offset)
if aligned:
assert b1.size(0) == b2.size(0)
if b1.size(0) * b2.size(0) == 0:
return b1.new_zeros(b1.size(0), 1)
lt = torch.max(b1[:, :2], b2[:, :2]) # [rows, 2]
rb = torch.min(b1[:, 2:], b2[:, 2:]) # [rows, 2]
wh = (rb - lt + ALIGNED_FLAG.offset).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
union = area1 + area2 - overlap
union = torch.max(union, union.new_tensor([1e-6]))
return overlap / union
lt = torch.max(b1[:, None, :2], b2[:, :2])
rb = torch.min(b1[:, None, 2:4], b2[:, 2:4])
wh = (rb - lt + ALIGNED_FLAG.offset).clamp(min=0)
inter_area = wh[:, :, 0] * wh[:, :, 1]
union_area = area1[:, None] + area2 - inter_area
if return_union:
return inter_area / torch.clamp(union_area, min=ALIGNED_FLAG.offset), union_area
else:
return inter_area / torch.clamp(union_area, min=ALIGNED_FLAG.offset)
@allow_empty_tensor(2)
def generalized_box_iou(boxes1, boxes2, return_iou=False):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = bbox_iou_overlaps(boxes1, boxes2, return_union=True) # box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
if return_iou:
return iou - (area - union) / area, iou
else:
return iou - (area - union) / area
@allow_empty_tensor(2)
def bbox_iof_overlaps(b1, b2):
if not b1.is_cuda:
return vanilla_bbox_iof_overlaps(b1, b2)
global GPU_MEMORY
gbytes = 1024.0**3
if GPU_MEMORY is None:
GPU_MEMORY = torch.cuda.get_device_properties(b1.device.index).total_memory
alloated_memory = torch.cuda.memory_allocated()
spare_memory = 0.5 * gbytes
available_memory = GPU_MEMORY - alloated_memory - spare_memory
size = b1.shape[0] * b2.shape[0]
needed_memory = 2 * size * 4
if needed_memory < available_memory:
ious = gpu_iou_overlap(b1, b2, mode='IoF')
else:
ious = vanilla_bbox_iof_overlaps(b1.cpu(), b2.cpu())
res_memory = size * 4
if res_memory < available_memory:
ious = ious.to(b1.device)
return ious
@allow_empty_tensor(2)
def vanilla_bbox_iof_overlaps(b1, b2):
"""
Arguments:
b1: dts, [n, >=4] (x1, y1, x2, y2, ...)
b1: gts, [n, >=4] (x1, y1, x2, y2, ...)
Returns:
intersection-over-former-box pair-wise
"""
area1 = (b1[:, 2] - b1[:, 0] + ALIGNED_FLAG.offset) * (b1[:, 3] - b1[:, 1] + ALIGNED_FLAG.offset)
lt = torch.max(b1[:, None, :2], b2[:, :2])
rb = torch.min(b1[:, None, 2:4], b2[:, 2:4])
wh = (rb - lt + ALIGNED_FLAG.offset).clamp(min=0)
inter_area = wh[:, :, 0] * wh[:, :, 1]
return inter_area / torch.clamp(area1[:, None], min=ALIGNED_FLAG.offset)
@allow_empty_tensor(1)
def xywh2xyxy(boxes, stacked=False):
"""(x, y, w, h) -> (x1, y1, x2, y2)"""
cx, cy, w, h = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
xmin = cx - 0.5 * w
ymin = cy - 0.5 * h
xmax = cx + 0.5 * w - ALIGNED_FLAG.offset
ymax = cy + 0.5 * h - ALIGNED_FLAG.offset
if stacked:
return torch.stack([xmin, ymin, xmax, ymax], dim=1)
else:
return xmin, ymin, xmax, ymax
@allow_empty_tensor(1)
def xyxy2xywh(boxes, stacked=False):
"""(x1, y1, x2, y2) -> (x, y, w, h)"""
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
w = x2 - x1 + ALIGNED_FLAG.offset
h = y2 - y1 + ALIGNED_FLAG.offset
cx = x1 + 0.5 * w
cy = y1 + 0.5 * h
if stacked:
return torch.stack([cx, cy, w, h], dim=1)
else:
return cx, cy, w, h
@allow_empty_tensor(2)
def bbox2offset(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):
"""
Inverse transform that computes target bounding-box regression deltas
given proposal boxes and ground-truth boxes. The weights argument should be
a 4-tuple of multiplicative weights that are applied to the regression
target.
In older versions of this code (and in py-faster-rcnn), the weights were set
such that the regression deltas would have unit standard deviation on the
training dataset. Presently, rather than computing these statistics exactly,
we use a fixed set of weights (10., 10., 5., 5.) by default. These are
approximately the weights one would get from COCO using the previous unit
stdev heuristic.
"""
assert boxes.shape[0] == gt_boxes.shape[0]
ex_ctr_x, ex_ctr_y, ex_widths, ex_heights = xyxy2xywh(boxes)
gt_ctr_x, gt_ctr_y, gt_widths, gt_heights = xyxy2xywh(gt_boxes)
wx, wy, ww, wh = weights
offset_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
offset_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
offset_dw = ww * torch.log(gt_widths / ex_widths)
offset_dh = wh * torch.log(gt_heights / ex_heights)
offset = torch.stack((offset_dx, offset_dy, offset_dw, offset_dh), dim=1)
return offset
@allow_empty_tensor(2)
def offset2bbox(boxes, offset, weights=(1.0, 1.0, 1.0, 1.0), max_shape=None, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
"""
Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas(offset). See bbox_transform_inv
for a description of the weights argument.
"""
ctr_x, ctr_y, widths, heights = xyxy2xywh(boxes)
means = offset.new_tensor(means).view(1, -1).repeat(1, offset.size(-1) // 4)
stds = offset.new_tensor(stds).view(1, -1).repeat(1, offset.size(-1) // 4)
offset = offset * stds + means
wx, wy, ww, wh = weights
dx = offset[:, 0::4] / wx
dy = offset[:, 1::4] / wy
dw = offset[:, 2::4] / ww
dh = offset[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = torch.clamp(dw, max=np.log(1000. / 16.))
dh = torch.clamp(dh, max=np.log(1000. / 16.))
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = offset.new_zeros(offset.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - ALIGNED_FLAG.offset
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - ALIGNED_FLAG.offset
if max_shape is not None:
max_shape_p = [max_shape[0], max_shape[1]]
if not isinstance(max_shape_p, torch.Tensor):
max_shape_p = pred_boxes.new_tensor(max_shape_p)
max_shape_p = max_shape_p[:2].type_as(pred_boxes)
min_xy = pred_boxes.new_tensor(0)
max_xy = torch.cat([max_shape_p] * (offset.size(-1) // 2), dim=-1).flip(-1).unsqueeze(0)
pred_boxes = torch.where(pred_boxes < min_xy, min_xy, pred_boxes)
pred_boxes = torch.where(pred_boxes > max_xy, max_xy, pred_boxes)
return pred_boxes
@allow_empty_tensor(2)
def bbox2xyxyoffset(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):
"""
Using (dx1, dy1, dx2, dy2) corner offsets here!
Inverse transform that computes target bounding-box regression deltas
given proposal boxes and ground-truth boxes. The weights argument should be
a 4-tuple of multiplicative weights that are applied to the regression
target.
"""
assert boxes.shape[0] == gt_boxes.shape[0]
ex_ctr_x, ex_ctr_y, ex_widths, ex_heights = xyxy2xywh(boxes)
gt_x1, gt_y1, gt_x2, gt_y2 = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2], gt_boxes[:, 3]
ex_x1, ex_y1, ex_x2, ex_y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
wx1, wy1, wx2, wy2 = weights
offset_dx1 = wx1 * (gt_x1 - ex_x1) / ex_widths
offset_dy1 = wy1 * (gt_y1 - ex_y1) / ex_heights
offset_dx2 = wx2 * (gt_x2 - ex_x2) / ex_widths
offset_dy2 = wy2 * (gt_y2 - ex_y2) / ex_heights
offset = torch.stack((offset_dx1, offset_dy1, offset_dx2, offset_dy2), dim=1)
return offset
@allow_empty_tensor(2)
def xyxyoffset2bbox(boxes, offset, weights=(1.0, 1.0, 1.0, 1.0)):
"""
Using (dx1, dy1, dx2, dy2) corner offsets here!
Forward transform that maps proposal boxes to predicted ground-truth
boxes using `xyxy` bounding-box regression deltas(offset).
"""
_, _, widths, heights = xyxy2xywh(boxes)
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
wx1, wy1, wx2, wy2 = weights
dx1 = offset[:, 0::4] / wx1
dy1 = offset[:, 1::4] / wy1
dx2 = offset[:, 2::4] / wx2
dy2 = offset[:, 3::4] / wy2
pred_x1 = dx1 * widths[:, None] + x1[:, None]
pred_y1 = dy1 * heights[:, None] + y1[:, None]
pred_x2 = dx2 * widths[:, None] + x2[:, None]
pred_y2 = dy2 * heights[:, None] + y2[:, None]
pred_boxes = offset.new_zeros(offset.shape)
# x1
pred_boxes[:, 0::4] = pred_x1
# y1
pred_boxes[:, 1::4] = pred_y1
# x2
pred_boxes[:, 2::4] = pred_x2
# y2
pred_boxes[:, 3::4] = pred_y2
return pred_boxes
@allow_empty_tensor(2)
def offset2tiled_bbox(boxes, offset, weights=(1.0, 1.0, 1.0, 1.0)):
"""
Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas(offset). See bbox_transform_inv
for a description of the weights argument.
"""
if boxes.shape[0] == 0:
return boxes.new_zeros((1, offset.shape[1]))
widths = boxes[:, 2] - boxes[:, 0] + ALIGNED_FLAG.offset
heights = boxes[:, 3] - boxes[:, 1] + ALIGNED_FLAG.offset
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = offset[:, 0::4] / wx
dy = offset[:, 1::4] / wy
dw = offset[:, 2::4] / ww
dh = offset[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = torch.clamp(dw, max=np.log(1000. / 16.))
dh = torch.clamp(dh, max=np.log(1000. / 16.))
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = offset.new_zeros(offset.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - ALIGNED_FLAG.offset
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - ALIGNED_FLAG.offset
return pred_boxes
@allow_empty_tensor(1)
def normalize_offset(offset, mean, std):
mean = offset.new_tensor(mean).reshape(-1, 4)
std = offset.new_tensor(std).reshape(-1, 4)
return (offset - mean) / std
@allow_empty_tensor(1)
def unnormalize_offset(offset, mean, std):
mean = offset.new_tensor(mean).reshape(-1, 4)
std = offset.new_tensor(std).reshape(-1, 4)
return offset * std + mean
@allow_empty_tensor(1)
def clip_bbox(bbox, img_size):
h, w = img_size[:2]
dh, dw = 0, 0
if len(img_size) > 6:
dw, dh = img_size[6], img_size[7]
bbox[:, 0] = torch.clamp(bbox[:, 0], min=0, max=w - ALIGNED_FLAG.offset + dw)
bbox[:, 1] = torch.clamp(bbox[:, 1], min=0, max=h - ALIGNED_FLAG.offset + dh)
bbox[:, 2] = torch.clamp(bbox[:, 2], min=0, max=w - ALIGNED_FLAG.offset + dw)
bbox[:, 3] = torch.clamp(bbox[:, 3], min=0, max=h - ALIGNED_FLAG.offset + dh)
return bbox
@allow_empty_tensor(1)
def clip_tiled_boxes(bbox, img_size):
assert bbox.shape[1] % 4 == 0, \
'bbox.shape[1] is {}, but must be divisible by 4'.format(bbox.shape[1])
h, w = img_size[:2]
bbox[:, 0::4] = torch.clamp(bbox[:, 0::4], min=0, max=w - ALIGNED_FLAG.offset)
bbox[:, 1::4] = torch.clamp(bbox[:, 1::4], min=0, max=h - ALIGNED_FLAG.offset)
bbox[:, 2::4] = torch.clamp(bbox[:, 2::4], min=0, max=w - ALIGNED_FLAG.offset)
bbox[:, 3::4] = torch.clamp(bbox[:, 3::4], min=0, max=h - ALIGNED_FLAG.offset)
return bbox
@allow_empty_tensor(1)
def flip_tiled_bboxes(boxes, width):
boxes_flipped = boxes.clone()
boxes_flipped[:, 0::4] = width - boxes[:, 2::4] - ALIGNED_FLAG.offset
boxes_flipped[:, 2::4] = width - boxes[:, 0::4] - ALIGNED_FLAG.offset
return boxes_flipped
def test_bbox_iou_overlaps():
b1 = torch.FloatTensor([[0, 0, 4, 4], [1, 2, 3, 5], [5, 5, 5, 5]])
b2 = torch.FloatTensor([[0, 0, 4, 4], [1, 2, 3, 5], [5, 5, 5, 5], [100, 100, 200, 200]])
overlaps = bbox_iou_overlaps(b1, b2)
print(overlaps)
def test_bbox_iof_overlaps():
b1 = torch.FloatTensor([[0, 0, 4, 4], [1, 2, 3, 5], [5, 5, 5, 5]])
b2 = torch.FloatTensor([[0, 0, 4, 4], [1, 2, 3, 5], [5, 5, 5, 5], [100, 100, 200, 200]])
overlaps = bbox_iof_overlaps(b1, b2)
print(overlaps)
def test_xyxy_xywh():
b1 = torch.FloatTensor([[0, 0, 4, 4], [1, 2, 3, 5], [5, 5, 5, 5]])
b2 = xyxy2xywh(b1)
b2 = torch.stack(b2, dim=1)
b3 = xywh2xyxy(b2)
b3 = torch.stack(b3, dim=1)
print(b1)
print(b2)
print(b3)
def test_offset():
b1 = torch.FloatTensor([[0, 0, 4, 4], [1, 2, 3, 5], [4, 4, 5, 5]])
tg = torch.FloatTensor([[1, 1, 5, 5], [0, 2, 4, 5], [4, 4, 5, 5]])
offset = bbox2offset(b1, tg)
print(offset)
pred = offset2bbox(b1, offset)
print(pred)
def test_clip_bbox():
b1 = torch.FloatTensor([[0, 0, 9, 29], [1, 2, 19, 39], [4, 4, 59, 59]])
print(b1)
b2 = clip_bbox(b1, (30, 35))
print(b2)
def test_iou(iou_fn, a, b):
n = 5
s = time.time()
for i in range(n):
iou = iou_fn(a, b)
del iou
torch.cuda.synchronize()
e = time.time()
t = e - s
memory = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 / 1024
return t, memory
def rand_bbox(n):
box = torch.randn(n, 4).cuda() * 10
x1 = torch.min(box[:, 0], box[:, 2])
x2 = torch.max(box[:, 0], box[:, 2])
y1 = torch.min(box[:, 1], box[:, 3])
y2 = torch.max(box[:, 1], box[:, 3])
return torch.stack([x1, y1, x2, y2], dim=1)
def box_voting(top_dets, all_dets, thresh, scoring_method='id', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
top_dets_out = top_dets.cpu().numpy().copy()
top_boxes = top_dets[:, :4]
all_boxes = all_dets[:, :4]
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_iou_overlaps(top_boxes, all_boxes)
top_to_all_overlaps = top_to_all_overlaps.cpu().numpy()
all_boxes = all_boxes.cpu().numpy()
all_scores = all_scores.cpu().numpy()
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'id':
# Identity, nothing to do
pass
elif scoring_method == 'temp_avg':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'avg':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'iou_avg':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'generalized_avg':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'quasi_sum':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError('Unknown scoring method {}'.format(scoring_method))
top_dets_out = torch.from_numpy(top_dets_out).to(top_dets)
return top_dets_out
# if __name__ == '__main__':
# test_bbox_iou_overlaps()
# test_bbox_iof_overlaps()
# test_xyxy_xywh()
# test_offset()
# test_clip_bbox()
# test_box_voting()
|
roman_numbers/main.py | DazEB2/SimplePyScripts | 117 | 11132958 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/zopefoundation/roman
import roman
print(roman.toRoman(5)) # V
print(roman.toRoman(9)) # IX
print(roman.toRoman(10)) # X
print(roman.toRoman(13)) # XIII
print(roman.toRoman(255)) # CCLV
print(roman.toRoman(1024)) # MXXIV
print()
print(roman.fromRoman('V')) # 5
print(roman.fromRoman('IX')) # 9
print(roman.fromRoman('X')) # 10
print(roman.fromRoman('XIII')) # 13
print(roman.fromRoman('CCLV')) # 255
print(roman.fromRoman('MXXIV')) # 1024
assert roman.fromRoman(roman.toRoman(5)) == 5
assert roman.fromRoman(roman.toRoman(1024)) == 1024
assert roman.fromRoman(roman.toRoman(4048)) == 4048
assert roman.toRoman(roman.fromRoman('MXXIV')) == 'MXXIV'
assert roman.toRoman(roman.fromRoman('XIII')) == 'XIII'
assert roman.toRoman(roman.fromRoman('IX')) == 'IX'
|
release/scripts/presets/render/TV_PAL_4_colon_3.py | rbabari/blender | 365 | 11132980 | <reponame>rbabari/blender
import bpy
bpy.context.scene.render.resolution_x = 720
bpy.context.scene.render.resolution_y = 576
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.pixel_aspect_x = 12
bpy.context.scene.render.pixel_aspect_y = 11
bpy.context.scene.render.fps = 25
bpy.context.scene.render.fps_base = 1
|
rojak-pantau/rojak_pantau/spiders/metrotvnews.py | pyk/rojak | 107 | 11132984 | <filename>rojak-pantau/rojak_pantau/spiders/metrotvnews.py
# -*- coding: utf-8 -*-
from datetime import datetime
from scrapy import Request
from scrapy.exceptions import CloseSpider
from scrapy.loader import ItemLoader
from rojak_pantau.items import News
from rojak_pantau.i18n import _
from rojak_pantau.util.wib_to_utc import wib_to_utc
from rojak_pantau.spiders.base import BaseSpider
NEWS_HEADLINE = 'headline'
NEWS_GRID = 'grid'
class MetrotvnewsSpider(BaseSpider):
name = "metrotvnews"
allowed_domains = ["metrotvnews.com"]
start_urls = (
'http://www.metrotvnews.com/more/topic/8602/0',
)
def parse(self, response):
self.logger.info('parse: {}'.format(response))
is_no_update = False
# Collect list of news from current page
articles_grid = response.css('li:not(.last) > div.grid')
articles = zip(articles_grid, [NEWS_GRID] * len(articles_grid))
articles += zip(response.css('div.topic'), [NEWS_HEADLINE])
if not articles:
raise CloseSpider('article not found')
for article in articles:
# Close the spider if we don't find the list of urls
url_selectors = None
if article[1] == NEWS_GRID:
url_selectors = article[0].css('h2 > a::attr(href)')
elif article[1] == NEWS_HEADLINE:
url_selectors = article[0].css('h1 > a::attr(href)')
if not url_selectors:
raise CloseSpider('url_selectors not found')
url = url_selectors.extract()[0]
self.logger.info('Url: {}'.format(url))
# Example: Minggu, 09 Oct 2016 15:14
info_selectors = article[0].css('div.reg::text')
if not info_selectors:
raise CloseSpider('info_selectors not found')
info = info_selectors.extract()[1]
# Example: 09 Oct 2016 15:14
info_time = info.split(',')[1].strip()
# Parse date information
try:
published_at_wib = datetime.strptime(info_time, '%d %b %Y %H:%M')
except ValueError as e:
raise CloseSpider('cannot_parse_date: {}'.format(e))
published_at = wib_to_utc(published_at_wib)
if self.media['last_scraped_at'] >= published_at:
is_no_update = True
break
# For each url we create new scrapy request
yield Request(url, callback=self.parse_news)
if is_no_update:
self.logger.info('Media have no update')
return
# Collect news on next page
if response.css('div.bu.fr > a'):
next_page = response.css('div.bu.fr > a[rel="next"]::attr(href)').extract()[0]
next_page_url = response.urljoin(next_page)
yield Request(next_page_url, callback=self.parse)
# Collect news item
def parse_news(self, response):
self.logger.info('parse_news: {}'.format(response))
is_video = response.css('ul.breadcrumb > li > a::text').extract()[0] == 'VIDEO'
# Init item loader
# extract news title, published_at, author, content, url
# Required: title, raw_content, published_at
loader = ItemLoader(item=News(), response=response)
loader.add_value('url', response.url)
# Will be dropped if video page
if is_video:
return loader.load_item()
title_selectors = response.css('div.part.lead.pr > h1::text')
if not title_selectors:
# Will be dropped on the item pipeline
return loader.load_item()
title = title_selectors.extract()[0]
loader.add_value('title', title)
xpath_query = """
//div[@class="part article"]/node()
[not(
descendant-or-self::comment()|
descendant-or-self::style|
descendant-or-self::script|
descendant-or-self::div|
descendant-or-self::span|
descendant-or-self::img|
descendant-or-self::table|
descendant-or-self::iframe
)]
"""
raw_content_selectors = response.xpath(xpath_query)
if not raw_content_selectors:
# Will be dropped on the item pipeline
return loader.load_item()
raw_content = raw_content_selectors.extract()
raw_content = ' '.join([w.strip() for w in raw_content])
raw_content = raw_content.strip()
loader.add_value('raw_content', raw_content)
# Example: Bambang - 10 Oktober 2016 21:10 wib
info_selectors = response.css('div.part.lead.pr > span::text')
if not info_selectors:
# Will be dropped on the item pipeline
return loader.load_item()
info = info_selectors.extract()[0]
# Parse date information
# Example: 10 Oktober 2016 21:10 wib
date_str = info.split('-')[1].strip()
if not date_str:
# Will be dropped on the item pipeline
return loader.load_item()
# Example: 10 October 2016 21:10
date_str = ' '.join([_(w) for w in date_str[:-4].split(' ')])
try:
published_at_wib = datetime.strptime(date_str, '%d %B %Y %H:%M')
except ValueError:
# Will be dropped on the item pipeline
return loader.load_item()
published_at = wib_to_utc(published_at_wib)
loader.add_value('published_at', published_at)
author_name = info.split('-')[0].strip()
if not author_name:
loader.add_value('author_name', '')
else:
loader.add_value('author_name', author_name)
# Move scraped news to pipeline
return loader.load_item()
|
src/fuzzingtool/utils/file_utils.py | NESCAU-UFLA/FuzzingTool | 131 | 11132999 | # Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from os import walk
from os.path import dirname, abspath
from typing import List
def readFile(fileName: str) -> List[str]:
"""Reads content of a file.
@type fileName: str
@param fileName: The file path and name
@returns List[str]: The content into the file
"""
try:
with open(f'{fileName}', 'r') as thisFile:
return [line.rstrip('\n') for line in thisFile if not line.startswith('#!')]
except FileNotFoundError:
raise Exception(f"File '{fileName}' not found")
def splitFilenames(files: list) -> List[str]:
"""Splits the files, removing the extension and __init__.py
@type files: list
@param files: The filenames to split
@returns List[str]: The splited content, without extension
"""
if '__init__.py' in files:
files.remove('__init__.py')
return [file.split('.')[0] for file in files]
def getPluginNamesFromCategory(category: str) -> List[str]:
"""Gets the plugin filenames
@type category: str
@param category: The category of the plugins
@returns List[str]: The list with the plugin filenames
"""
try:
_, _, pluginFiles = next(walk(f"./fuzzingtool/core/plugins/{category}/"))
except:
_, _, pluginFiles = next(walk(f"{dirname(dirname(abspath(__file__)))}/core/plugins/{category}/"))
return splitFilenames(pluginFiles)
def getReports() -> List[str]:
"""Gets the report filenames
@returns List[str]: The list with the report filenames
"""
try:
_, _, reportFiles = next(walk(f"./fuzzingtool/reports/reports/"))
except:
_, _, reportFiles = next(walk(f"{dirname(dirname(abspath(__file__)))}/reports/reports/"))
return splitFilenames(reportFiles) |
plugins/modules/oci_resource_manager_template_category_facts.py | slmjy/oci-ansible-collection | 108 | 11133023 | <filename>plugins/modules/oci_resource_manager_template_category_facts.py
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_resource_manager_template_category_facts
short_description: Fetches details about one or multiple TemplateCategory resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple TemplateCategory resources in Oracle Cloud Infrastructure
- Lists template categories.
version_added: "2.9.0"
author: Oracle (@oracle)
options: {}
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_display_name_option ]
"""
EXAMPLES = """
- name: List template_categories
oci_resource_manager_template_category_facts:
"""
RETURN = """
template_categories:
description:
- List of TemplateCategory resources
returned: on success
type: complex
contains:
id:
description:
- Unique identifier for the template category.
Possible values are `0` (Quick Starts), `1` (Service), `2` (Architecture), and `3` (Private).
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The name of the template category.
returned: on success
type: str
sample: display_name_example
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.resource_manager import ResourceManagerClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class TemplateCategoryFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return []
def list_resources(self):
optional_list_method_params = [
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_template_categories, **optional_kwargs
)
TemplateCategoryFactsHelperCustom = get_custom_class(
"TemplateCategoryFactsHelperCustom"
)
class ResourceFactsHelper(
TemplateCategoryFactsHelperCustom, TemplateCategoryFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict(display_name=dict(type="str"),))
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="template_category",
service_client_class=ResourceManagerClient,
namespace="resource_manager",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(template_categories=result)
if __name__ == "__main__":
main()
|
earthpy/tests/test_plot_bands.py | nkorinek/earthpy | 350 | 11133038 | <filename>earthpy/tests/test_plot_bands.py
"""Tests for the plot bands function"""
import pytest
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import earthpy.plot as ep
import numpy as np
plt.show = lambda: None
@pytest.fixture
def one_band_3dims():
"""Return a 3-dim numpy array vals 0-9"""
return np.array(
[[[8, 0, 2, 6, 3], [2, 8, 2, 8, 4], [3, 9, 1, 5, 4], [5, 9, 2, 7, 7]]]
)
def test_arr_parameter():
"""Raise an AttributeError if an array is not provided."""
with pytest.raises(
AttributeError, match="Input arr should be a numpy array"
):
ep.plot_bands(arr=(1, 2))
plt.close()
def test_num_titles(image_array_2bands):
"""Test the number of titles.
If a user provides two titles for a single band array, the function
should raise an error OR if the title list is a different length than
the array it should also raise an errors.
"""
single_band = image_array_2bands[0]
with pytest.raises(
ValueError,
match="plot_bands expects one title for a single band array",
):
ep.plot_bands(arr=single_band, title=["Title1", "Title2"])
with pytest.raises(ValueError, match="plot_bands() expects the number"):
ep.plot_bands(
arr=image_array_2bands, title=["Title1", "Title2", "Title3"]
)
plt.close()
def test_str_for_title(image_array_2bands):
"""Test that a single string title renders properly."""
single_band = image_array_2bands[0]
ax = ep.plot_bands(arr=single_band, title="my title")
plot_title = ax.get_title()
assert "my title" in plot_title
plt.close()
def test_num_axes(image_array_2bands):
"""Test the number of axes.
If provided with a 2 band array, plot_bands should return 3 axes.
And 2 colorbars
"""
ax = ep.plot_bands(image_array_2bands)
ax = list(ax)
cb = [a.images[0].colorbar for a in ax if a.images]
assert len(ax) == 3
assert len(cb) == 2
plt.close()
def test_two_plot_title(image_array_2bands):
"""Test that the default title is provided for a 2 band array plot."""
ax = ep.plot_bands(image_array_2bands)
num_plts = image_array_2bands.shape[0]
all_titles = [ax[i].get_title() for i in range(num_plts)]
assert all_titles == ["Band 1", "Band 2"]
plt.close()
def test_custom_plot_title(image_array_2bands):
"""Test that the custom title is applied for a 2 band array plot."""
ax = ep.plot_bands(image_array_2bands, title=["Red Band", "Green Band"])
num_plts = image_array_2bands.shape[0]
all_titles = [ax[i].get_title() for i in range(num_plts)]
assert all_titles == ["Red Band", "Green Band"]
plt.close()
def test_single_band_3dims(one_band_3dims):
"""Test single band plot with three dimensions.
If you provide a single band array with 3 dimensions (shape[0]==1
test that it still plots and only returns a single axis.
"""
ax = ep.plot_bands(one_band_3dims)
arr = ax.get_images()[0].get_array()
assert arr.ndim == 2
assert len(ax.get_images()) == 1
plt.close()
def test_single_band_2dims(one_band_3dims):
"""Test single band plot with two dimensions
If you provide a single band array with 3 dimensions (shape[0] == 1)
test that it still plots and only returns a single axis.
"""
single_band_2dims = one_band_3dims[0]
ax = ep.plot_bands(single_band_2dims)
# Get array from mpl figure
arr = ax.get_images()[0].get_array()
assert arr.ndim == 2
assert len(ax.get_images()) == 1
plt.close()
def test_cbar_param(one_band_3dims):
"""Test that the colorbar param works for a single band arr"""
one_band_2dims = one_band_3dims[0]
ax = ep.plot_bands(one_band_2dims, scale=True)
arr = ax.get_images()[0].get_array()
c_bar = ax.images[0].colorbar
# Return arr should be scaled by default between 0-255
assert arr.min() == 0 and arr.max() == 255
# A cbar should be drawn in this plot
assert c_bar
plt.close()
def test_not_scaled_single_band(one_band_3dims):
"""Test if user turns off scaling and cbar the data vals remain intact.
Also if no cbar is specified it should not render.
"""
one_band_2dims = one_band_3dims[0]
ax = ep.plot_bands(one_band_2dims, cbar=False)
arr = ax.get_images()[0].get_array()
c_bar = ax.images[0].colorbar
# Return arr is unscaled for plotting
assert (
arr.min() == one_band_2dims.min() and arr.max() == one_band_2dims.max()
)
# A cbar should not be drawn in this plot
assert not c_bar
plt.close()
def test_not_scaled_multi_band(image_array_2bands):
"""Test if the user turns off scaling for multi bands the data vals
remain intact.
"""
im = image_array_2bands
ax = ep.plot_bands(im)
# Get all arrays to be plotted
all_arrs = [a.get_images()[0].get_array() for a in ax if a.get_images()]
all_arrs_flat = np.concatenate(all_arrs, axis=0)
# Return arr is unscaled for plotting
assert all_arrs_flat.min() == im.min() and all_arrs_flat.max() == im.max()
plt.close()
def test_vmin_vmax_multi_band(image_array_2bands):
"""Test vmin and max apply properly in multi band images
If the data are scaled between -10 and 10 the cbar vals should reflect
that.
"""
one_band_2dims = image_array_2bands
vmin = -10
vmax = 10
ax = ep.plot_bands(one_band_2dims, vmin=vmin, vmax=vmax)
# Get all cbars - the min and max vals for all cbars should be -10 and 10
cb_max = [a.images[0].colorbar.vmax for a in ax if a.images]
cb_min = [a.images[0].colorbar.vmin for a in ax if a.images]
assert all(map(lambda x: x == vmin, cb_min))
assert all(map(lambda x: x == vmax, cb_max))
plt.close()
def test_vmin_vmax_single_band(one_band_3dims):
"""Test vmin and max apply properly
If the data are scaled between -10 and 10 the cbar vals should reflect
that.
"""
one_band_2dims = one_band_3dims[0]
vmin = 0
vmax = 10
ax = ep.plot_bands(one_band_2dims, vmin=vmin, vmax=vmax)
c_bar = ax.images[0].colorbar
# Cbar should be scaled between the vmin and vmax vals
assert c_bar.vmin == vmin and c_bar.vmax == vmax
plt.close()
def test_extent(one_band_3dims):
"""Test that extent param returns a plot with the correct extent."""
one_band_2dims = one_band_3dims[0]
# shift extents by 10
xmin = one_band_2dims.shape[1]
xmax = one_band_2dims.shape[1] + xmin
ymin = one_band_2dims.shape[0]
ymax = one_band_2dims.shape[0] + ymin
ext = [xmin, xmax, ymin, ymax]
ax = ep.plot_bands(one_band_2dims, extent=ext)
pl_extent = list(ax.get_xlim() + ax.get_ylim())
# Cbar should be scaled between the vmin and vmax vals
assert pl_extent == ext
plt.close()
def test_multi_panel_single_band(one_band_3dims):
"""Test that multi panel works with single band arr."""
title1 = "Title axis one"
title2 = "Title axis two"
f, (ax1, ax2) = plt.subplots(2, 1)
ep.plot_bands(one_band_3dims, title=title1, ax=ax1)
ep.plot_bands(one_band_3dims, title=title2, ax=ax2)
# get all axis subplot elements - note this returns subplots and axes
all_axes = f.axes
assert len(all_axes) == 4
assert all_axes[0].get_title() == title1
assert all_axes[1].get_title() == title2
def test_alpha(image_array_2bands):
"""Test that the alpha param returns a plot with the correct alpha."""
alpha_val = 0.5
alpha_ax = ep.plot_bands(image_array_2bands, cols=2, alpha=alpha_val)
for i in range(len(alpha_ax)):
assert alpha_ax[i].get_images()[0].get_alpha() == alpha_val
def test_norm_scale_false(image_array_2bands):
"""Test that the norm param returns a plot with the correct norm
boundaries."""
norm_bounds = colors.BoundaryNorm([0, 1], 2)
norm_ax = ep.plot_bands(image_array_2bands, cols=2, norm=norm_bounds)
for axes in norm_ax:
assert norm_bounds.boundaries[0] == axes.get_images()[0].norm.vmin
assert norm_bounds.boundaries[1] == axes.get_images()[0].norm.vmax
def test_norm_scale_true(image_array_2bands):
"""Test that the norm param returns a plot with the correct norm
boundaries."""
norm_bounds = colors.BoundaryNorm([0, 1], 2)
norm_ax = ep.plot_bands(
image_array_2bands, cols=2, norm=norm_bounds, scale=True
)
for axes in norm_ax:
assert norm_bounds.boundaries[0] == axes.get_images()[0].norm.vmin
assert norm_bounds.boundaries[1] == axes.get_images()[0].norm.vmax
def test_norm_scale_unset(image_array_2bands):
"""Test that the norm param returns a plot with the correct norm
boundaries."""
norm_bounds = colors.BoundaryNorm([0, 1], 2)
norm_ax = ep.plot_bands(image_array_2bands, cols=2, norm=norm_bounds)
for axes in norm_ax:
assert norm_bounds.boundaries[0] == axes.get_images()[0].norm.vmin
assert norm_bounds.boundaries[1] == axes.get_images()[0].norm.vmax
|
tests/test_api_v1_firewall_traffic_shaper_queue.py | jaredhendrickson13/pfsense-api | 311 | 11133048 | <filename>tests/test_api_v1_firewall_traffic_shaper_queue.py
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unit_test_framework
class APIUnitTestFirewallTrafficShaperQueue(unit_test_framework.APIUnitTest):
uri = "/api/v1/firewall/traffic_shaper/queue"
post_tests = [
{
"name": "Create PRIQ parent traffic shaper",
"uri": "/api/v1/firewall/traffic_shaper",
"payload": {
"interface": "lan",
"scheduler": "PRIQ",
"bandwidthtype": "Gb",
"bandwidth": 1,
"enabled": True,
"qlimit": 1000,
"tbrconfig": 1000,
"apply": True
}
},
{
"name": "Create traffic shaper queue",
"payload": {
"interface": "lan",
"name": "Test_Queue",
"priority": 15,
"description": "Traffic Shaper Queue unit test",
"default": True
}
},
{
"name": "Check interface requirement",
"status": 400,
"return": 4110
},
{
"name": "Check interface exists validation",
"status": 400,
"return": 4111,
"payload": {
"interface": "INVALID"
}
},
{
"name": "Check interface traffic shaper exists validation",
"status": 400,
"return": 4122,
"payload": {
"interface": "wan"
}
},
{
"name": "Check name requirement",
"status": 400,
"return": 4123,
"payload": {
"interface": "lan"
}
},
{
"name": "Check name character validation",
"status": 400,
"return": 4124,
"payload": {
"interface": "lan",
"name": "THIS NAME IS NOT VALID!!!"
}
},
{
"name": "Check name minimum length constraint",
"status": 400,
"return": 4124, # Regex fails with an empty string, use that error code instead of 4125
"payload": {
"interface": "lan",
"name": ""
}
},
{
"name": "Check name maximum length constraint",
"status": 400,
"return": 4125,
"payload": {
"interface": "lan",
"name": "THIS_NAME_IS_TOO_LONG"
}
},
{
"name": "Check name unique constraint",
"status": 400,
"return": 4126,
"payload": {
"interface": "lan",
"name": "Test_Queue"
}
},
{
"name": "Check PRIQ priority minimum constraint",
"status": 400,
"return": 4128,
"payload": {
"interface": "lan",
"name": "New_Queue",
"priority": -1
}
},
{
"name": "Check PRIQ priority maximum constraint",
"status": 400,
"return": 4128,
"payload": {
"interface": "lan",
"name": "New_Queue",
"priority": 16
}
},
{
"name": "Check queue limit minimum constraint",
"status": 400,
"return": 4120,
"payload": {
"interface": "lan",
"name": "New_Queue",
"qlimit": 0
}
},
{
"name": "Update to FAIRQ parent traffic shaper",
"uri": "/api/v1/firewall/traffic_shaper",
"method": "PUT",
"payload": {
"interface": "lan",
"scheduler": "FAIRQ",
"bandwidthtype": "Mb",
"bandwidth": 2000
}
},
{
"name": "Check FAIRQ/CBQ priority minimum constraint",
"status": 400,
"return": 4127,
"payload": {
"interface": "lan",
"name": "New_Queue",
"priority": -1
}
},
{
"name": "Check FAIRQ/CBQ priority maximum constraint",
"status": 400,
"return": 4127,
"payload": {
"interface": "lan",
"name": "New_Queue",
"priority": 8
}
},
{
"name": "Check bandwidth type validation",
"status": 400,
"return": 4116,
"payload": {
"interface": "lan",
"name": "New_Queue",
"bandwidthtype": "INVALID"
}
},
{
"name": "Check bandwidth minimum constraint",
"status": 400,
"return": 4118,
"payload": {
"interface": "lan",
"name": "New_Queue",
"bandwidthtype": "Gb",
"bandwidth": 0
}
},
{
"name": "Check percentage-based bandwidth maximum constraint",
"status": 400,
"return": 4119,
"payload": {
"interface": "lan",
"name": "New_Queue",
"bandwidthtype": "%",
"bandwidth": 101
}
},
{
"name": "Check bandwidth parent maximum constraint",
"status": 400,
"return": 4129,
"payload": {
"interface": "lan",
"name": "New_Queue",
"bandwidthtype": "Gb",
"bandwidth": 100 # 100Gb is over the parent's 1Gb maximum
}
},
{
"name": "Create traffic shaper queue to increase child bandwidth",
"payload": {
"interface": "lan",
"name": "Mb-BW_Queue",
"bandwidthtype": "Gb",
"bandwidth": 1,
"red": True
}
},
{
"name": "Create another traffic shaper queue to push sum of child bandwidth over parent maximum",
"status": 400,
"return": 4129,
"payload": {
"interface": "lan",
"name": "New_Queue",
"bandwidthtype": "Mb",
"bandwidth": 1024, # 1024Mb + our other child queue's 1Gb will exceed parent's 2000Mb limit
"red": True
}
},
{
"name": "Update to HFSC parent traffic shaper",
"uri": "/api/v1/firewall/traffic_shaper",
"method": "PUT",
"payload": {
"interface": "lan",
"scheduler": "HFSC"
}
},
{
"name": "Check upperlimit3 requirement",
"status": 400,
"return": 4133,
"payload": {
"interface": "lan",
"name": "New_Queue",
"upperlimit": True
}
},
{
"name": "Check upperlimit3 validation",
"status": 400,
"return": 4134,
"payload": {
"interface": "lan",
"name": "New_Queue",
"upperlimit": True,
"upperlimit3": "INVALID"
}
},
{
"name": "Check upperlimit1 validation",
"status": 400,
"return": 4130,
"payload": {
"interface": "lan",
"name": "New_Queue",
"upperlimit": True,
"upperlimit1": "INVALID",
"upperlimit3": "1Mb"
}
},
{
"name": "Check upperlimit2 requirement",
"status": 400,
"return": 4131,
"payload": {
"interface": "lan",
"name": "New_Queue",
"upperlimit": True,
"upperlimit1": "1Mb",
"upperlimit3": "1Mb"
}
},
{
"name": "Check upperlimit2 validation",
"status": 400,
"return": 4132,
"payload": {
"interface": "lan",
"name": "New_Queue",
"upperlimit": True,
"upperlimit1": "1Mb",
"upperlimit2": 0,
"upperlimit3": "1Mb"
}
},
{
"name": "Check linkshare3 requirement",
"status": 400,
"return": 4138,
"payload": {
"interface": "lan",
"name": "New_Queue",
"linkshare": True
}
},
{
"name": "Check linkshare3 validation",
"status": 400,
"return": 4139,
"payload": {
"interface": "lan",
"name": "New_Queue",
"linkshare": True,
"linkshare3": "INVALID"
}
},
{
"name": "Check linkshare1 validation",
"status": 400,
"return": 4135,
"payload": {
"interface": "lan",
"name": "New_Queue",
"linkshare": True,
"linkshare1": "INVALID",
"linkshare3": "1Mb"
}
},
{
"name": "Check linkshare2 requirement",
"status": 400,
"return": 4136,
"payload": {
"interface": "lan",
"name": "New_Queue",
"linkshare": True,
"linkshare1": "1Mb",
"linkshare3": "1Mb"
}
},
{
"name": "Check linkshare2 validation",
"status": 400,
"return": 4137,
"payload": {
"interface": "lan",
"name": "New_Queue",
"linkshare": True,
"linkshare1": "1Mb",
"linkshare2": 0,
"linkshare3": "1Mb"
}
},
{
"name": "Check realtime3 requirement",
"status": 400,
"return": 4143,
"payload": {
"interface": "lan",
"name": "New_Queue",
"realtime": True
}
},
{
"name": "Check realtime3 validation",
"status": 400,
"return": 4144,
"payload": {
"interface": "lan",
"name": "New_Queue",
"realtime": True,
"realtime3": "INVALID"
}
},
{
"name": "Check realtime1 validation",
"status": 400,
"return": 4140,
"payload": {
"interface": "lan",
"name": "New_Queue",
"realtime": True,
"realtime1": "INVALID",
"realtime3": "1Mb"
}
},
{
"name": "Check realtime2 requirement",
"status": 400,
"return": 4141,
"payload": {
"interface": "lan",
"name": "New_Queue",
"realtime": True,
"realtime1": "1Mb",
"realtime3": "1Mb"
}
},
{
"name": "Check realtime2 validation",
"status": 400,
"return": 4142,
"payload": {
"interface": "lan",
"name": "New_Queue",
"realtime": True,
"realtime1": "1Mb",
"realtime2": 0,
"realtime3": "1Mb"
}
},
]
delete_tests = [
{
"name": "Delete Test_Queue",
"payload": {
"interface": "lan",
"name": "Test_Queue"
}
},
{
"name": "Check interface requirement",
"status": 400,
"return": 4110
},
{
"name": "Check interface validation",
"status": 400,
"return": 4111,
"payload": {
"interface": "INVALID"
}
},
{
"name": "Check interface with no traffic shaper",
"status": 400,
"return": 4122,
"payload": {
"interface": "wan",
}
},
{
"name": "Check name requirement",
"status": 400,
"return": 4123,
"payload": {
"interface": "lan"
}
},
{
"name": "Check name validation",
"status": 400,
"return": 4145,
"payload": {
"interface": "lan",
"name": "INVALID"
}
},
{
"name": "Delete parent traffic shaper",
"uri": "/api/v1/firewall/traffic_shaper",
"payload": {
"interface": "lan",
"apply": True
}
},
]
APIUnitTestFirewallTrafficShaperQueue()
|
slack_sdk/socket_mode/aiohttp/__init__.py | priya1puresoftware/python-slack-sdk | 2,486 | 11133055 | """aiohttp based Socket Mode client
* https://api.slack.com/apis/connections/socket
* https://slack.dev/python-slack-sdk/socket-mode/
* https://pypi.org/project/aiohttp/
"""
import asyncio
import logging
import time
from asyncio import Future, Lock
from asyncio import Queue
from logging import Logger
from typing import Union, Optional, List, Callable, Awaitable
import aiohttp
from aiohttp import ClientWebSocketResponse, WSMessage, WSMsgType, ClientConnectionError
from slack_sdk.proxy_env_variable_loader import load_http_proxy_from_env
from slack_sdk.socket_mode.async_client import AsyncBaseSocketModeClient
from slack_sdk.socket_mode.async_listeners import (
AsyncWebSocketMessageListener,
AsyncSocketModeRequestListener,
)
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.web.async_client import AsyncWebClient
class SocketModeClient(AsyncBaseSocketModeClient):
logger: Logger
web_client: AsyncWebClient
app_token: str
wss_uri: Optional[str]
auto_reconnect_enabled: bool
message_queue: Queue
message_listeners: List[
Union[
AsyncWebSocketMessageListener,
Callable[
["AsyncBaseSocketModeClient", dict, Optional[str]], Awaitable[None]
],
]
]
socket_mode_request_listeners: List[
Union[
AsyncSocketModeRequestListener,
Callable[["AsyncBaseSocketModeClient", SocketModeRequest], Awaitable[None]],
]
]
message_receiver: Optional[Future]
message_processor: Future
proxy: Optional[str]
ping_interval: float
trace_enabled: bool
last_ping_pong_time: Optional[float]
current_session: Optional[ClientWebSocketResponse]
current_session_monitor: Optional[Future]
auto_reconnect_enabled: bool
default_auto_reconnect_enabled: bool
closed: bool
stale: bool
connect_operation_lock: Lock
on_message_listeners: List[Callable[[WSMessage], Awaitable[None]]]
on_error_listeners: List[Callable[[WSMessage], Awaitable[None]]]
on_close_listeners: List[Callable[[WSMessage], Awaitable[None]]]
def __init__(
self,
app_token: str,
logger: Optional[Logger] = None,
web_client: Optional[AsyncWebClient] = None,
proxy: Optional[str] = None,
auto_reconnect_enabled: bool = True,
ping_interval: float = 5,
trace_enabled: bool = False,
on_message_listeners: Optional[
List[Callable[[WSMessage], Awaitable[None]]]
] = None,
on_error_listeners: Optional[
List[Callable[[WSMessage], Awaitable[None]]]
] = None,
on_close_listeners: Optional[
List[Callable[[WSMessage], Awaitable[None]]]
] = None,
):
"""Socket Mode client
Args:
app_token: App-level token
logger: Custom logger
web_client: Web API client
auto_reconnect_enabled: True if automatic reconnection is enabled (default: True)
ping_interval: interval for ping-pong with Slack servers (seconds)
trace_enabled: True if more verbose logs to see what's happening under the hood
proxy: the HTTP proxy URL
on_message_listeners: listener functions for on_message
on_error_listeners: listener functions for on_error
on_close_listeners: listener functions for on_close
"""
self.app_token = app_token
self.logger = logger or logging.getLogger(__name__)
self.web_client = web_client or AsyncWebClient()
self.closed = False
self.stale = False
self.connect_operation_lock = Lock()
self.proxy = proxy
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self.logger)
if env_variable is not None:
self.proxy = env_variable
self.default_auto_reconnect_enabled = auto_reconnect_enabled
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
self.ping_interval = ping_interval
self.trace_enabled = trace_enabled
self.last_ping_pong_time = None
self.wss_uri = None
self.message_queue = Queue()
self.message_listeners = []
self.socket_mode_request_listeners = []
self.current_session = None
self.current_session_monitor = None
# https://docs.aiohttp.org/en/stable/client_reference.html
# Unless you are connecting to a large, unknown number of different servers
# over the lifetime of your application,
# it is suggested you use a single session for the lifetime of your application
# to benefit from connection pooling.
self.aiohttp_client_session = aiohttp.ClientSession()
self.on_message_listeners = on_message_listeners or []
self.on_error_listeners = on_error_listeners or []
self.on_close_listeners = on_close_listeners or []
self.message_receiver = None
self.message_processor = asyncio.ensure_future(self.process_messages())
async def monitor_current_session(self) -> None:
# In the asyncio runtime, accessing a shared object (self.current_session here) from
# multiple tasks can cause race conditions and errors.
# To avoid such, we access only the session that is active when this loop starts.
session: ClientWebSocketResponse = self.current_session
session_id: str = self.build_session_id(session)
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"A new monitor_current_session() execution loop for {session_id} started"
)
try:
logging_interval = 100
counter_for_logging = 0
while not self.closed:
if session != self.current_session:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"The monitor_current_session task for {session_id} is now cancelled"
)
break
try:
if self.trace_enabled and self.logger.level <= logging.DEBUG:
# The logging here is for detailed investigation on potential issues in this client.
# If you don't see this log for a while, it means that
# this receive_messages execution is no longer working for some reason.
counter_for_logging += 1
if counter_for_logging >= logging_interval:
counter_for_logging = 0
log_message = (
"#monitor_current_session method has been verifying if this session is active "
f"(session: {session_id}, logging interval: {logging_interval})"
)
self.logger.debug(log_message)
await asyncio.sleep(self.ping_interval)
if session is not None and session.closed is False:
t = time.time()
if self.last_ping_pong_time is None:
self.last_ping_pong_time = float(t)
try:
await session.ping(f"sdk-ping-pong:{t}")
except Exception as e:
# The ping() method can fail for some reason.
# To establish a new connection even in this scenario,
# we ignore the exception here.
self.logger.warning(
f"Failed to send a ping message ({session_id}): {e}"
)
if self.auto_reconnect_enabled:
should_reconnect = False
if session is None or session.closed:
self.logger.info(
f"The session ({session_id}) seems to be already closed. Reconnecting..."
)
should_reconnect = True
if await self.is_ping_pong_failing():
disconnected_seconds = int(
time.time() - self.last_ping_pong_time
)
self.logger.info(
f"The session ({session_id}) seems to be stale. Reconnecting..."
f" reason: disconnected for {disconnected_seconds}+ seconds)"
)
self.stale = True
self.last_ping_pong_time = None
should_reconnect = True
if should_reconnect is True or not await self.is_connected():
await self.connect_to_new_endpoint()
except Exception as e:
self.logger.error(
f"Failed to check the current session ({session_id}) or reconnect to the server "
f"(error: {type(e).__name__}, message: {e})"
)
except asyncio.CancelledError:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"The monitor_current_session task for {session_id} is now cancelled"
)
raise
async def receive_messages(self) -> None:
# In the asyncio runtime, accessing a shared object (self.current_session here) from
# multiple tasks can cause race conditions and errors.
# To avoid such, we access only the session that is active when this loop starts.
session = self.current_session
session_id = self.build_session_id(session)
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"A new receive_messages() execution loop with {session_id} started"
)
try:
consecutive_error_count = 0
logging_interval = 100
counter_for_logging = 0
while not self.closed:
if session != self.current_session:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"The running receive_messages task for {session_id} is now cancelled"
)
break
try:
message: WSMessage = await session.receive()
# just in case, checking if the value is not None
if message is not None:
if self.logger.level <= logging.DEBUG:
# The following logging prints every single received message
# except empty message data ones.
type = WSMsgType(message.type)
message_type = (
type.name if type is not None else message.type
)
message_data = message.data
if isinstance(message_data, bytes):
message_data = message_data.decode("utf-8")
if len(message_data) > 0:
# To skip the empty message that Slack server-side often sends
self.logger.debug(
f"Received message "
f"(type: {message_type}, "
f"data: {message_data}, "
f"extra: {message.extra}, "
f"session: {session_id})"
)
if self.trace_enabled:
# The logging here is for detailed trouble shooting of potential issues in this client.
# If you don't see this log for a while, it can mean that
# this receive_messages execution is no longer working for some reason.
counter_for_logging += 1
if counter_for_logging >= logging_interval:
counter_for_logging = 0
log_message = (
"#receive_messages method has been working without any issues "
f"(session: {session_id}, logging interval: {logging_interval})"
)
self.logger.debug(log_message)
if message.type == WSMsgType.TEXT:
message_data = message.data
await self.enqueue_message(message_data)
for listener in self.on_message_listeners:
await listener(message)
elif message.type == WSMsgType.CLOSE:
if self.auto_reconnect_enabled:
self.logger.info(
f"Received CLOSE event from {session_id}. Reconnecting..."
)
await self.connect_to_new_endpoint()
for listener in self.on_close_listeners:
await listener(message)
elif message.type == WSMsgType.ERROR:
for listener in self.on_error_listeners:
await listener(message)
elif message.type == WSMsgType.CLOSED:
await asyncio.sleep(self.ping_interval)
continue
elif message.type == WSMsgType.PING:
await session.pong(message.data)
continue
elif message.type == WSMsgType.PONG:
if message.data is not None:
str_message_data = message.data.decode("utf-8")
elements = str_message_data.split(":")
if (
len(elements) == 2
and elements[0] == "sdk-ping-pong"
):
try:
self.last_ping_pong_time = float(elements[1])
except Exception as e:
self.logger.warning(
f"Failed to parse the last_ping_pong_time value from {str_message_data}"
f" - error : {e}, session: {session_id}"
)
continue
consecutive_error_count = 0
except Exception as e:
consecutive_error_count += 1
self.logger.error(
f"Failed to receive or enqueue a message: {type(e).__name__}, {e} ({session_id})"
)
if isinstance(e, ClientConnectionError):
await asyncio.sleep(self.ping_interval)
else:
await asyncio.sleep(consecutive_error_count)
except asyncio.CancelledError:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"The running receive_messages task for {session_id} is now cancelled"
)
raise
async def is_ping_pong_failing(self) -> bool:
if self.last_ping_pong_time is None:
return False
disconnected_seconds = int(time.time() - self.last_ping_pong_time)
return disconnected_seconds >= (self.ping_interval * 4)
async def is_connected(self) -> bool:
connected: bool = (
not self.closed
and not self.stale
and self.current_session is not None
and not self.current_session.closed
and not await self.is_ping_pong_failing()
)
if self.logger.level <= logging.DEBUG and connected is False:
# Prints more detailed information about the inactive connection
is_ping_pong_failing = await self.is_ping_pong_failing()
session_id = await self.session_id()
self.logger.debug(
"Inactive connection detected ("
f"session_id: {session_id}, "
f"closed: {self.closed}, "
f"stale: {self.stale}, "
f"current_session.closed: {self.current_session.closed}, "
f"is_ping_pong_failing: {is_ping_pong_failing}"
")"
)
return connected
async def session_id(self) -> str:
return self.build_session_id(self.current_session)
async def connect(self):
old_session: Optional[ClientWebSocketResponse] = (
None if self.current_session is None else self.current_session
)
if self.wss_uri is None:
# If the underlying WSS URL does not exist,
# acquiring a new active WSS URL from the server-side first
self.wss_uri = await self.issue_new_wss_url()
self.current_session = await self.aiohttp_client_session.ws_connect(
self.wss_uri,
autoping=False,
heartbeat=self.ping_interval,
proxy=self.proxy,
)
session_id: str = await self.session_id()
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
self.stale = False
self.logger.info(f"A new session ({session_id}) has been established")
# The first ping from the new connection
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Sending a ping message with the newly established connection ({session_id})..."
)
t = time.time()
await self.current_session.ping(f"sdk-ping-pong:{t}")
if self.current_session_monitor is not None:
self.current_session_monitor.cancel()
self.current_session_monitor = asyncio.ensure_future(
self.monitor_current_session()
)
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"A new monitor_current_session() executor has been recreated for {session_id}"
)
if self.message_receiver is not None:
self.message_receiver.cancel()
self.message_receiver = asyncio.ensure_future(self.receive_messages())
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"A new receive_messages() executor has been recreated for {session_id}"
)
if old_session is not None:
await old_session.close()
old_session_id = self.build_session_id(old_session)
self.logger.info(f"The old session ({old_session_id}) has been abandoned")
async def disconnect(self):
if self.current_session is not None:
await self.current_session.close()
session_id = await self.session_id()
self.logger.info(
f"The current session ({session_id}) has been abandoned by disconnect() method call"
)
async def send_message(self, message: str):
session_id = await self.session_id()
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Sending a message: {message} from session: {session_id}"
)
try:
await self.current_session.send_str(message)
except ConnectionError as e:
# We rarely get this exception while replacing the underlying WebSocket connections.
# We can do one more try here as the self.current_session should be ready now.
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Failed to send a message (error: {e}, message: {message}, session: {session_id})"
" as the underlying connection was replaced. Retrying the same request only one time..."
)
# Although acquiring self.connect_operation_lock also for the first method call is the safest way,
# we avoid synchronizing a lot for better performance. That's why we are doing a retry here.
try:
await self.connect_operation_lock.acquire()
if await self.is_connected():
await self.current_session.send_str(message)
else:
self.logger.warning(
f"The current session ({session_id}) is no longer active. "
"Failed to send a message"
)
raise e
finally:
if self.connect_operation_lock.locked() is True:
self.connect_operation_lock.release()
async def close(self):
self.closed = True
self.auto_reconnect_enabled = False
await self.disconnect()
if self.message_processor is not None:
self.message_processor.cancel()
if self.current_session_monitor is not None:
self.current_session_monitor.cancel()
if self.message_receiver is not None:
self.message_receiver.cancel()
if self.aiohttp_client_session is not None:
await self.aiohttp_client_session.close()
@classmethod
def build_session_id(cls, session: ClientWebSocketResponse) -> str:
if session is None:
return ""
return "s_" + str(hash(session))
|
xirl/configs/xmagical/pretraining/tcn.py | xxdreck/google-research | 23,901 | 11133056 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TCN config."""
from base_configs.pretrain import get_config as _get_config
def get_config():
"""TCN config."""
config = _get_config()
config.algorithm = "tcn"
config.optim.train_max_iters = 4_000
config.frame_sampler.strategy = "window"
config.frame_sampler.num_frames_per_sequence = 40
config.model.model_type = "resnet18_linear"
config.model.normalize_embeddigs = False
config.model.learnable_temp = False
config.loss.tcn.pos_radius = 1
config.loss.tcn.neg_radius = 4
config.loss.tcn.num_pairs = 2
config.loss.tcn.margin = 1.0
config.loss.tcn.temperature = 0.1
return config
|
tools/generator/misp-galaxy.py | wagner-certat/misp-taxonomies | 209 | 11133066 | <reponame>wagner-certat/misp-taxonomies
import json
import requests
debug = False
galaxy_url = 'https://raw.githubusercontent.com/MISP/misp-galaxy/main/clusters/'
elements = ['tools.json', 'threat-actors.json']
# elements = ['threat-actor-tools.json']
taxonomy = {}
taxonomy['namespace'] = 'misp-galaxy'
taxonomy['description'] = 'Elements from the misp-galaxy as taxonomy (temporary measure)'
taxonomy['version'] = 1 # FIXME - this should be incremented manually
taxonomy['predicates'] = []
taxonomy['values'] = []
for element in elements:
g_element = requests.get(galaxy_url + element).json()
p_description = g_element['description']
if element.endswith('s.json'):
p_value = element[:-6]
elif element.endswith('-vocabulary.json'):
p_value = element[:-16]
else:
p_value = element
taxonomy['predicates'].append({'value': p_value, 'expanded': p_description})
t_value = {}
t_value['predicate'] = p_value
t_value['entry'] = []
for g_value in g_element['values']:
item = {}
item['value'] = g_value['value']
item['expanded'] = g_value['value']
if 'description' in g_value:
item['description'] = g_value['description']
t_value['entry'].append(item)
# if 'synonyms' in g_value:
# for g_value_synonym in g_value['synonyms']:
# item_s = dict(item)
# item_s['value'] = g_value_synonym
# item_s['expanded'] = g_value_synonym
# t_value['entry'].append(item_s)
taxonomy['values'].append(t_value)
file_out = '../../misp-galaxy/machinetag.json'
with open(file_out, 'w') as f:
f.write(json.dumps(taxonomy, sort_keys=True, indent=4, separators=(',', ': ')))
print("JSON saved to " + file_out)
# t = Taxonomy(taxonomy)
# with open('out-t.json', 'w') as f:
# f.write(json.dumps(t._json(), sort_keys=True, indent=4, separators=(',', ': ')))
|
tensorboard/plugin_util.py | Digitaltransform/tensorboard | 6,139 | 11133070 | <reponame>Digitaltransform/tensorboard
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities that may be especially useful to plugins."""
import threading
from bleach.sanitizer import Cleaner
# pylint: disable=g-bad-import-order
# Google-only: import markdown_freewisdom
import markdown
from tensorboard import context as _context
from tensorboard.backend import experiment_id as _experiment_id
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
_ALLOWED_ATTRIBUTES = {
"a": ["href", "title"],
"img": ["src", "title", "alt"],
}
_ALLOWED_TAGS = [
"ul",
"ol",
"li",
"p",
"pre",
"code",
"blockquote",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"br",
"strong",
"em",
"a",
"img",
"table",
"thead",
"tbody",
"td",
"tr",
"th",
]
# Cache Markdown converter to avoid expensive initialization at each
# call to `markdown_to_safe_html`. Cache a different instance per thread.
class _MarkdownStore(threading.local):
def __init__(self):
self.markdown = markdown.Markdown(
extensions=[
"markdown.extensions.tables",
"markdown.extensions.fenced_code",
]
)
_MARKDOWN_STORE = _MarkdownStore()
# Cache Cleaner to avoid expensive initialization at each call to `clean`.
# Cache a different instance per thread.
class _CleanerStore(threading.local):
def __init__(self):
self.cleaner = Cleaner(
tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES
)
_CLEANER_STORE = _CleanerStore()
def safe_html(unsafe_string):
"""Return the input as a str, sanitized for insertion into the DOM.
Arguments:
unsafe_string: A Unicode string or UTF-8--encoded bytestring
possibly containing unsafe HTML markup.
Returns:
A string containing safe HTML.
"""
total_null_bytes = 0
if isinstance(unsafe_string, bytes):
unsafe_string = unsafe_string.decode("utf-8")
return _CLEANER_STORE.cleaner.clean(unsafe_string)
def markdown_to_safe_html(markdown_string):
"""Convert Markdown to HTML that's safe to splice into the DOM.
Arguments:
markdown_string: A Unicode string or UTF-8--encoded bytestring
containing Markdown source. Markdown tables are supported.
Returns:
A string containing safe HTML.
"""
return markdowns_to_safe_html([markdown_string], lambda xs: xs[0])
def markdowns_to_safe_html(markdown_strings, combine):
"""Convert multiple Markdown documents to one safe HTML document.
One could also achieve this by calling `markdown_to_safe_html`
multiple times and combining the results. Compared to that approach,
this function may be faster, because HTML sanitization (which can be
expensive) is performed only once rather than once per input. It may
also be less precise: if one of the input documents has unsafe HTML
that is sanitized away, that sanitization might affect other
documents, even if those documents are safe.
Args:
markdown_strings: List of Markdown source strings to convert, as
Unicode strings or UTF-8--encoded bytestrings. Markdown tables
are supported.
combine: Callback function that takes a list of unsafe HTML
strings of the same shape as `markdown_strings` and combines
them into a single unsafe HTML string, which will be sanitized
and returned.
Returns:
A string containing safe HTML.
"""
unsafe_htmls = []
total_null_bytes = 0
for source in markdown_strings:
# Convert to utf-8 whenever we have a binary input.
if isinstance(source, bytes):
source_decoded = source.decode("utf-8")
# Remove null bytes and warn if there were any, since it probably means
# we were given a bad encoding.
source = source_decoded.replace("\x00", "")
total_null_bytes += len(source_decoded) - len(source)
unsafe_html = _MARKDOWN_STORE.markdown.convert(source)
unsafe_htmls.append(unsafe_html)
unsafe_combined = combine(unsafe_htmls)
sanitized_combined = _CLEANER_STORE.cleaner.clean(unsafe_combined)
warning = ""
if total_null_bytes:
warning = (
"<!-- WARNING: discarded %d null bytes in markdown string "
"after UTF-8 decoding -->\n"
) % total_null_bytes
return warning + sanitized_combined
def context(environ):
"""Get a TensorBoard `RequestContext` from a WSGI environment.
Returns:
A `RequestContext` value.
"""
return _context.from_environ(environ)
def experiment_id(environ):
"""Determine the experiment ID associated with a WSGI request.
Each request to TensorBoard has an associated experiment ID, which is
always a string and may be empty. This experiment ID should be passed
to data providers.
Args:
environ: A WSGI environment `dict`. For a Werkzeug request, this is
`request.environ`.
Returns:
A experiment ID, as a possibly-empty `str`.
"""
return environ.get(_experiment_id.WSGI_ENVIRON_KEY, "")
class _MetadataVersionChecker:
"""TensorBoard-internal utility for warning when data is too new.
Specify a maximum known `version` number as stored in summary
metadata, and automatically reject and warn on data from newer
versions. This keeps a (single) bit of internal state to handle
logging a warning to the user at most once.
This should only be used by plugins bundled with TensorBoard, since
it may instruct users to upgrade their copy of TensorBoard.
"""
def __init__(self, data_kind, latest_known_version):
"""Initialize a `_MetadataVersionChecker`.
Args:
data_kind: A human-readable description of the kind of data
being read, like "scalar" or "histogram" or "PR curve".
latest_known_version: Highest tolerated value of `version`,
like `0`.
"""
self._data_kind = data_kind
self._latest_known_version = latest_known_version
self._warned = False
def ok(self, version, run, tag):
"""Test whether `version` is permitted, else complain."""
if 0 <= version <= self._latest_known_version:
return True
self._maybe_warn(version, run, tag)
return False
def _maybe_warn(self, version, run, tag):
if self._warned:
return
self._warned = True
logger.warning(
"Some %s data is too new to be read by this version of TensorBoard. "
"Upgrading TensorBoard may fix this. "
"(sample: run %r, tag %r, data version %r)",
self._data_kind,
run,
tag,
version,
)
|
stix/utils/__init__.py | saegel/python-stix | 194 | 11133080 | <gh_stars>100-1000
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import contextlib
import functools
import keyword
import warnings
import lxml.etree
from mixbox.entities import Entity, EntityList
import mixbox.xml
from mixbox.vendor.six import iteritems, string_types
import stix
# relative
from . import dates
CDATA_START = "<![CDATA["
CDATA_END = "]]>"
CONFLICTING_NAMES = keyword.kwlist + ['id', 'type', 'range']
@contextlib.contextmanager
def ignored(*exceptions):
"""Allows you to ignore exceptions cleanly using context managers. This
exists in Python 3.
"""
try:
yield
except exceptions:
pass
def raise_warnings(func):
"""Function decorator that causes all Python warnings to be raised as
exceptions in the wrapped function.
Example:
>>> @raise_warnings
>>> def foo():
>>> warnings.warn("this will raise an exception")
"""
@functools.wraps(func)
def inner(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('error')
return func(*args, **kwargs)
return inner
def silence_warnings(func):
"""Function decorator that silences/ignores all Python warnings in the
wrapped function.
Example:
>>> @silence_warnings
>>> def foo():
>>> warnings.warn("this will not appear")
"""
@functools.wraps(func)
def inner(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
return func(*args, **kwargs)
return inner
def is_cdata(text):
"""Returns ``True`` if `text` contains a CDATA block.
Example:
>>> is_cdata("<![CDATA[Foo]]>")
True
>>> is_cdata("NOPE")
False
"""
if not text:
return False
return CDATA_START in text
def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = lxml.etree.fromstring(xml)
return node.text
def cdata(text):
"""Wraps the input `text` in a ``<![CDATA[ ]]>`` block.
If the text contains CDATA sections already, they are stripped and replaced
by the application of an outer-most CDATA block.
Args:
text: A string to wrap in a CDATA block.
Returns:
The `text` value wrapped in ``<![CDATA[]]>``
"""
if not text:
return text
if is_cdata(text):
text = strip_cdata(text)
escaped = "{0}{1}{2}".format(CDATA_START, text, CDATA_END)
return escaped
def is_stix(entity):
"""Returns true if `entity` is an instance of :class:`.Entity`."""
return isinstance(entity, stix.Entity)
def is_cybox(entity):
"""Returns true if `entity` is a Cybox object"""
try:
return entity.__module__.startswith("cybox.")
except AttributeError:
return False
def is_entity(entity):
"""Returns true if `entity` is an instance of :class:`.Entity` or
:class:`mixbox.Entity`.
"""
return isinstance(entity, (Entity, stix.Entity))
def is_entitylist(entity):
"""Returns true if `entity` is an instance of :class:`.EntityList`
or :class:`mixbox.entities.EntityList`.
"""
return isinstance(entity, (EntityList, stix.EntityList))
def is_typedlist(entity):
"""Returns true if `entity` is an instance of :class:`.TypedList`
"""
return isinstance(entity, stix.TypedList)
def private_name(name):
"""Returns the internal, private name used when setting Entity property
values. Basically, it appends a "_" to `name` if there isn't already
one there.
"""
if name.startswith("_"):
return name
return "_" + name
def attr_name(name):
"""Converts `name` into the form expected for python-stix and
python-cybox properties.
This is used when attempting to access the property getter/setter via
the __dict__ instance var entries.
Example:
>>> attr_name("id")
'id_'
>>> attr_name("Title")
'title
"""
name = name.lower()
if name.startswith("_"):
name = name[1:]
if name in CONFLICTING_NAMES:
name += "_"
return name
def key_name(name):
"""Converts the input attribute name `name` into a key to be
used in `to_dict()` return dictionaries.
"""
name = attr_name(name)
if name.endswith("_"):
return name[:-1]
return name
def is_sequence(item):
"""Returns ``True`` if `value` is a sequence type (e.g., ``list``, or
``tuple``). String types will return ``False``.
"""
return hasattr(item, "__iter__") and not isinstance(item, string_types)
def check_version(expected, found):
"""Raises ValueError if `found` is not equal to or found within
`expected`.
"""
if is_sequence(expected):
is_good = found in expected
else:
is_good = (found == expected)
if not is_good:
error = "Version '{0}' is invalid. Expected {1}."
error = error.format(found, expected)
raise ValueError(error)
def iter_vars(obj):
"""Returns a generator which yields a ``(property name, property value)``
tuple with each iteration.
Note:
This will not yield vars that are attached during parse, such as
``__input_schemalocations__`` and ``__input_namespaces__``.
"""
def check(name):
return name not in ('__input_namespaces__', '__input_schemalocations__')
instance_vars = iteritems(vars(obj))
return ((attr_name(name), val) for name, val in instance_vars if check(name))
def is_dictable(obj):
"""Returns ``True`` if `obj` has a ``to_dict()`` method."""
return hasattr(obj, "to_dict")
def is_timestamp(obj):
"""Returns ``True`` if `obj` is an instance of ``datetime.datetime``."""
return isinstance(obj, datetime.datetime)
def is_date(obj):
"""Returns ``True`` if `obj` is an instance of ``datetime.date``."""
return isinstance(obj, datetime.date)
def is_bool(obj):
"""Returns ``True`` if `obj` is a ``bool``."""
return isinstance(obj, bool)
def has_value(var):
"""Returns ``True`` if `var` is not ``None`` and not empty."""
if var is None:
return
return bool(var) or (var in (False, 0))
@silence_warnings
def to_dict(entity, skip=()):
"""Returns a dictionary representation of `entity`. This will iterate over
the instance vars of `entity` and construct keys and values from those
variable names and values.
Args:
entity: A ``Entity`` object.
skip: An iterable containing keys to exclude from the dictionary. These
should be the dictionary key names, and not the instance variable
name (e.g., 'id' and NOT 'id_').
Returns:
A dictionary representation of the input `entity`.
"""
def dict_iter(items):
return [x.to_dict() if is_dictable(x) else x for x in items]
d = {}
for name, field in iter_vars(entity):
key = key_name(name)
if key in skip or not has_value(field):
continue
if is_dictable(field):
d[key] = field.to_dict()
elif is_timestamp(field):
d[key] = dates.serialize_value(field)
elif is_date(field):
d[key] = dates.serialize_date(field)
elif mixbox.xml.is_element(field) or mixbox.xml.is_etree(field):
d[key] = lxml.etree.tostring(field)
elif is_sequence(field):
d[key] = dict_iter(field)
else:
d[key] = field
return d
def xml_bool(value):
"""Returns ``True`` if `value` is an acceptable xs:boolean ``True`` value.
Returns ``False`` if `value` is an acceptable xs:boolean ``False`` value.
If `value` is ``None``, this function will return ``None``.
"""
if value is None:
return None
if value in mixbox.xml.FALSE:
return False
if value in mixbox.xml.TRUE:
return True
error = "Unable to determine the xml boolean value of '{0}'".format(value)
raise ValueError(error)
def cast_var(item, klass, arg=None):
"""Attempt to cast `item` to an instance of `klass`.
Args:
item: The object to cast.
klass: The class to cast to.
arg: The kwarg name to use for the `klass` ``__init__()`` parameter. If
``None``, a positional argument will be used.
"""
if not arg:
return klass(item)
kwarg = {arg: item} # kwarg dict
return klass(**kwarg) # klass(value='foobar')
def remove_entries(d, keys):
"""Removes all the `keys` from the dictionary `d`.
Args:
d: A dictionary.
keys: An iterable collection of dictionary keys to remove.
"""
for key in keys:
d.pop(key, None)
# Namespace flattening
from .nsparser import * # noqa
from .dates import * # noqa
from .parser import * # noqa
from .walk import * # noqa
|
Lib/test/test_zipapp.py | tai271828/RustPython | 11,058 | 11133084 | <reponame>tai271828/RustPython
"""Test harness for the zipapp module."""
import io
import pathlib
import stat
import sys
import tempfile
import unittest
import zipapp
import zipfile
from test.support import requires_zlib
from unittest.mock import patch
class ZipAppTest(unittest.TestCase):
"""Test zipapp module functionality."""
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.tmpdir = pathlib.Path(tmpdir.name)
def test_create_archive(self):
# Test packing a directory.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
self.assertTrue(target.is_file())
def test_create_archive_with_pathlib(self):
# Test packing a directory using Path objects for source and target.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target)
self.assertTrue(target.is_file())
def test_create_archive_with_subdirs(self):
# Test packing a directory includes entries for subdirectories.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'foo').mkdir()
(source / 'bar').mkdir()
(source / 'foo' / '__init__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target)
target.seek(0)
with zipfile.ZipFile(target, 'r') as z:
self.assertIn('foo/', z.namelist())
self.assertIn('bar/', z.namelist())
def test_create_archive_with_filter(self):
# Test packing a directory and using filter to specify
# which files to include.
def skip_pyc_files(path):
return path.suffix != '.pyc'
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'test.py').touch()
(source / 'test.pyc').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target, filter=skip_pyc_files)
with zipfile.ZipFile(target, 'r') as z:
self.assertIn('__main__.py', z.namelist())
self.assertIn('test.py', z.namelist())
self.assertNotIn('test.pyc', z.namelist())
def test_create_archive_filter_exclude_dir(self):
# Test packing a directory and using a filter to exclude a
# subdirectory (ensures that the path supplied to include
# is relative to the source location, as expected).
def skip_dummy_dir(path):
return path.parts[0] != 'dummy'
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'test.py').touch()
(source / 'dummy').mkdir()
(source / 'dummy' / 'test2.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target, filter=skip_dummy_dir)
with zipfile.ZipFile(target, 'r') as z:
self.assertEqual(len(z.namelist()), 2)
self.assertIn('__main__.py', z.namelist())
self.assertIn('test.py', z.namelist())
def test_create_archive_default_target(self):
# Test packing a directory to the default name.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
zipapp.create_archive(str(source))
expected_target = self.tmpdir / 'source.pyz'
self.assertTrue(expected_target.is_file())
@requires_zlib
def test_create_archive_with_compression(self):
# Test packing a directory into a compressed archive.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'test.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target, compressed=True)
with zipfile.ZipFile(target, 'r') as z:
for name in ('__main__.py', 'test.py'):
self.assertEqual(z.getinfo(name).compress_type,
zipfile.ZIP_DEFLATED)
def test_no_main(self):
# Test that packing a directory with no __main__.py fails.
source = self.tmpdir / 'source'
source.mkdir()
(source / 'foo.py').touch()
target = self.tmpdir / 'source.pyz'
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target))
def test_main_and_main_py(self):
# Test that supplying a main argument with __main__.py fails.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
def test_main_written(self):
# Test that the __main__.py is written correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / 'foo.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
with zipfile.ZipFile(str(target), 'r') as z:
self.assertIn('__main__.py', z.namelist())
self.assertIn(b'pkg.mod.fn()', z.read('__main__.py'))
def test_main_only_written_once(self):
# Test that we don't write multiple __main__.py files.
# The initial implementation had this bug; zip files allow
# multiple entries with the same name
source = self.tmpdir / 'source'
source.mkdir()
# Write 2 files, as the original bug wrote __main__.py
# once for each file written :-(
# See http://bugs.python.org/review/23491/diff/13982/Lib/zipapp.py#newcode67Lib/zipapp.py:67
# (line 67)
(source / 'foo.py').touch()
(source / 'bar.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
with zipfile.ZipFile(str(target), 'r') as z:
self.assertEqual(1, z.namelist().count('__main__.py'))
def test_main_validation(self):
# Test that invalid values for main are rejected.
source = self.tmpdir / 'source'
source.mkdir()
target = self.tmpdir / 'source.pyz'
problems = [
'', 'foo', 'foo:', ':bar', '12:bar', 'a.b.c.:d',
'.a:b', 'a:b.', 'a:.b', 'a:silly name'
]
for main in problems:
with self.subTest(main=main):
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target), main=main)
def test_default_no_shebang(self):
# Test that no shebang line is written to the target by default.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
with target.open('rb') as f:
self.assertNotEqual(f.read(2), b'#!')
def test_custom_interpreter(self):
# Test that a shebang line with a custom interpreter is written
# correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
with target.open('rb') as f:
self.assertEqual(f.read(2), b'#!')
self.assertEqual(b'python\n', f.readline())
def test_pack_to_fileobj(self):
# Test that we can pack to a file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target, interpreter='python')
self.assertTrue(target.getvalue().startswith(b'#!python\n'))
def test_read_shebang(self):
# Test that we can read the shebang line correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
self.assertEqual(zipapp.get_interpreter(str(target)), 'python')
def test_read_missing_shebang(self):
# Test that reading the shebang line of a file without one returns None.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
self.assertEqual(zipapp.get_interpreter(str(target)), None)
def test_modify_shebang(self):
# Test that we can change the shebang of a file.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = self.tmpdir / 'changed.pyz'
zipapp.create_archive(str(target), str(new_target), interpreter='python2.7')
self.assertEqual(zipapp.get_interpreter(str(new_target)), 'python2.7')
def test_write_shebang_to_fileobj(self):
# Test that we can change the shebang of a file, writing the result to a
# file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = io.BytesIO()
zipapp.create_archive(str(target), new_target, interpreter='python2.7')
self.assertTrue(new_target.getvalue().startswith(b'#!python2.7\n'))
def test_read_from_pathobj(self):
# Test that we can copy an archive using a pathlib.Path object
# for the source.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target1 = self.tmpdir / 'target1.pyz'
target2 = self.tmpdir / 'target2.pyz'
zipapp.create_archive(source, target1, interpreter='python')
zipapp.create_archive(target1, target2, interpreter='python2.7')
self.assertEqual(zipapp.get_interpreter(target2), 'python2.7')
def test_read_from_fileobj(self):
# Test that we can copy an archive using an open file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
temp_archive = io.BytesIO()
zipapp.create_archive(str(source), temp_archive, interpreter='python')
new_target = io.BytesIO()
temp_archive.seek(0)
zipapp.create_archive(temp_archive, new_target, interpreter='python2.7')
self.assertTrue(new_target.getvalue().startswith(b'#!python2.7\n'))
def test_remove_shebang(self):
# Test that we can remove the shebang from a file.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = self.tmpdir / 'changed.pyz'
zipapp.create_archive(str(target), str(new_target), interpreter=None)
self.assertEqual(zipapp.get_interpreter(str(new_target)), None)
def test_content_of_copied_archive(self):
# Test that copying an archive doesn't corrupt it.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target, interpreter='python')
new_target = io.BytesIO()
target.seek(0)
zipapp.create_archive(target, new_target, interpreter=None)
new_target.seek(0)
with zipfile.ZipFile(new_target, 'r') as z:
self.assertEqual(set(z.namelist()), {'__main__.py'})
# (Unix only) tests that archives with shebang lines are made executable
@unittest.skipIf(sys.platform == 'win32',
'Windows does not support an executable bit')
def test_shebang_is_executable(self):
# Test that an archive with a shebang line is made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
self.assertTrue(target.stat().st_mode & stat.S_IEXEC)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(sys.platform == 'win32',
'Windows does not support an executable bit')
def test_no_shebang_is_not_executable(self):
# Test that an archive with no shebang line is not made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter=None)
self.assertFalse(target.stat().st_mode & stat.S_IEXEC)
class ZipAppCmdlineTest(unittest.TestCase):
"""Test zipapp module command line API."""
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.tmpdir = pathlib.Path(tmpdir.name)
def make_archive(self):
# Test that an archive with no shebang line is not made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target)
return target
def test_cmdline_create(self):
# Test the basic command line API.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
args = [str(source)]
zipapp.main(args)
target = source.with_suffix('.pyz')
self.assertTrue(target.is_file())
def test_cmdline_copy(self):
# Test copying an archive.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(target)]
zipapp.main(args)
self.assertTrue(target.is_file())
def test_cmdline_copy_inplace(self):
# Test copying an archive in place fails.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(original)]
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero return code.
self.assertTrue(cm.exception.code)
def test_cmdline_copy_change_main(self):
# Test copying an archive doesn't allow changing __main__.py.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(target), '-m', 'foo:bar']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero return code.
self.assertTrue(cm.exception.code)
@patch('sys.stdout', new_callable=io.StringIO)
def test_info_command(self, mock_stdout):
# Test the output of the info command.
target = self.make_archive()
args = [str(target), '--info']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a zero return code.
self.assertEqual(cm.exception.code, 0)
self.assertEqual(mock_stdout.getvalue(), "Interpreter: <none>\n")
def test_info_error(self):
# Test the info command fails when the archive does not exist.
target = self.tmpdir / 'dummy.pyz'
args = [str(target), '--info']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero return code.
self.assertTrue(cm.exception.code)
if __name__ == "__main__":
unittest.main()
|
src/tests/ftest/server/daos_server_config.py | grom72/daos | 429 | 11133114 | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from apricot import TestWithServers
from server_utils import ServerFailed
class DaosServerConfigTest(TestWithServers):
"""Daos server configuration tests.
Test Class Description:
Simple test to verify that the daos_server starts/stops properly given
positive and negative values to its configuration file.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a DaosServerConfigTest object."""
super().__init__(*args, **kwargs)
self.start_agents_once = False
self.start_servers_once = False
self.setup_start_agents = False
self.setup_start_servers = False
def test_daos_server_config_basic(self):
"""JIRA ID: DAOS-1525.
Test Description: Test daos_server start/stops properly.
on the system.
:avocado: tags=all,small,control,daily_regression,server_start,basic
"""
# Setup the servers
self.add_server_manager()
self.configure_manager(
"server", self.server_managers[0], self.hostlist_servers,
self.hostfile_servers_slots)
# Get the input to verify
c_val = self.params.get("config_val", "/run/server_config_val/*/")
# Identify the attribute and modify its value to test value
self.assertTrue(
self.server_managers[0].set_config_value(c_val[0], c_val[1]),
"Error setting the '{}' config file parameter to '{}'".format(
c_val[0], c_val[1]))
self.log.info(
"Starting server with %s = %s, expected to %s",
c_val[0], c_val[1], c_val[2])
try:
self.server_managers[0].start()
exception = None
except ServerFailed as err:
exception = err
# Verify
fail_message = ""
if c_val[2] == "FAIL" and exception is None:
self.log.error("Server was expected to fail")
fail_message = (
"Server start completed successfully when it was expected to "
"fail with {} = {}".format(c_val[0], c_val[1]))
elif c_val[2] == "PASS" and exception is not None:
self.log.error("Server was expected to start")
fail_message = (
"Server start failed when it was expected to complete "
"successfully with {} = {}: {}".format(
c_val[0], c_val[1], exception))
if fail_message:
self.fail(fail_message)
self.log.info("Test passed!")
|
homeassistant/components/joaoapps_join/__init__.py | basicpail/core | 22,481 | 11133125 | <filename>homeassistant/components/joaoapps_join/__init__.py
"""Support for Joaoapps Join services."""
import logging
from pyjoin import (
get_devices,
ring_device,
send_file,
send_notification,
send_sms,
send_url,
set_wallpaper,
)
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_DEVICE_ID, CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "joaoapps_join"
CONF_DEVICE_IDS = "device_ids"
CONF_DEVICE_NAMES = "device_names"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_DEVICE_IDS): cv.string,
vol.Optional(CONF_DEVICE_NAMES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
def register_device(hass, api_key, name, device_id, device_ids, device_names):
"""Register services for each join device listed."""
def ring_service(service):
"""Service to ring devices."""
ring_device(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
)
def set_wallpaper_service(service):
"""Service to set wallpaper on devices."""
set_wallpaper(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_file_service(service):
"""Service to send files to devices."""
send_file(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_url_service(service):
"""Service to open url on devices."""
send_url(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_tasker_service(service):
"""Service to open url on devices."""
send_notification(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
text=service.data.get("command"),
)
def send_sms_service(service):
"""Service to send sms from devices."""
send_sms(
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
sms_number=service.data.get("number"),
sms_text=service.data.get("message"),
api_key=api_key,
)
hass.services.register(DOMAIN, f"{name}ring", ring_service)
hass.services.register(DOMAIN, f"{name}set_wallpaper", set_wallpaper_service)
hass.services.register(DOMAIN, f"{name}send_sms", send_sms_service)
hass.services.register(DOMAIN, f"{name}send_file", send_file_service)
hass.services.register(DOMAIN, f"{name}send_url", send_url_service)
hass.services.register(DOMAIN, f"{name}send_tasker", send_tasker_service)
def setup(hass, config):
"""Set up the Join services."""
for device in config[DOMAIN]:
api_key = device.get(CONF_API_KEY)
device_id = device.get(CONF_DEVICE_ID)
device_ids = device.get(CONF_DEVICE_IDS)
device_names = device.get(CONF_DEVICE_NAMES)
name = device.get(CONF_NAME)
name = f"{name.lower().replace(' ', '_')}_" if name else ""
if api_key and not get_devices(api_key):
_LOGGER.error("Error connecting to Join, check API key")
return False
if device_id is None and device_ids is None and device_names is None:
_LOGGER.error(
"No device was provided. Please specify device_id"
", device_ids, or device_names"
)
return False
register_device(hass, api_key, name, device_id, device_ids, device_names)
return True
|
isserviceup/services/hashicorp.py | EvgeshaGars/is-service-up | 182 | 11133127 | <reponame>EvgeshaGars/is-service-up
from isserviceup.services.models.statuspage import StatusPagePlugin
class HashiCorp(StatusPagePlugin):
name = 'HashiCorp'
status_url = 'https://status.hashicorp.com/'
icon_url = '/images/icons/hashicorp.png'
|
mvpa2/testing/__init__.py | nno/PyMVPA | 227 | 11133134 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Helpers to unify/facilitate unittesting within PyMVPA
"""
__docformat__ = 'restructuredtext'
import numpy as np # we barely can step somewhere without it
from mvpa2.base import externals
from mvpa2 import pymvpa_dataroot
if __debug__:
from mvpa2.base import debug
debug('INIT', 'mvpa2.testing')
from mvpa2.testing.tools import *
if __debug__:
from mvpa2.base import debug
_ENFORCE_CA_ENABLED = 'ENFORCE_CA_ENABLED' in debug.active
else:
_ENFORCE_CA_ENABLED = False
from mvpa2.testing.sweep import sweepargs
if __debug__:
debug('INIT', 'mvpa2.testing end')
|
utils/metrics.py | jeromerony/dml_cross_entropy | 139 | 11133193 | from typing import Dict, List, Optional
import faiss
import torch
import torch.nn.functional as F
class AverageMeter:
"""Computes and stores the average and current value on device"""
def __init__(self, device, length):
self.device = device
self.length = length
self.reset()
def reset(self):
self.values = torch.zeros(self.length, device=self.device, dtype=torch.float)
self.counter = 0
self.last_counter = 0
def append(self, val):
self.values[self.counter] = val.detach()
self.counter += 1
self.last_counter += 1
@property
def val(self):
return self.values[self.counter - 1]
@property
def avg(self):
return self.values[:self.counter].mean()
@property
def values_list(self):
return self.values[:self.counter].cpu().tolist()
@property
def last_avg(self):
if self.last_counter == 0:
return self.latest_avg
else:
self.latest_avg = self.values[self.counter - self.last_counter:self.counter].mean()
self.last_counter = 0
return self.latest_avg
@torch.no_grad()
def recall_at_ks(query_features: torch.Tensor,
query_labels: torch.LongTensor,
ks: List[int],
gallery_features: Optional[torch.Tensor] = None,
gallery_labels: Optional[torch.Tensor] = None,
cosine: bool = False) -> Dict[int, float]:
"""
Compute the recall between samples at each k. This function uses about 8GB of memory.
Parameters
----------
query_features : torch.Tensor
Features for each query sample. shape: (num_queries, num_features)
query_labels : torch.LongTensor
Labels corresponding to the query features. shape: (num_queries,)
ks : List[int]
Values at which to compute the recall.
gallery_features : torch.Tensor
Features for each gallery sample. shape: (num_queries, num_features)
gallery_labels : torch.LongTensor
Labels corresponding to the gallery features. shape: (num_queries,)
cosine : bool
Use cosine distance between samples instead of euclidean distance.
Returns
-------
recalls : Dict[int, float]
Values of the recall at each k.
"""
offset = 0
if gallery_features is None and gallery_labels is None:
offset = 1
gallery_features = query_features
gallery_labels = query_labels
elif gallery_features is None or gallery_labels is None:
raise ValueError('gallery_features and gallery_labels needs to be both None or both Tensors.')
if cosine:
query_features = F.normalize(query_features, p=2, dim=1)
gallery_features = F.normalize(gallery_features, p=2, dim=1)
to_cpu_numpy = lambda x: x.cpu().numpy()
q_f, q_l, g_f, g_l = map(to_cpu_numpy, [query_features, query_labels, gallery_features, gallery_labels])
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
max_k = max(ks)
index_function = faiss.GpuIndexFlatIP if cosine else faiss.GpuIndexFlatL2
index = index_function(res, g_f.shape[1], flat_config)
index.add(g_f)
closest_indices = index.search(q_f, max_k + offset)[1]
recalls = {}
for k in ks:
indices = closest_indices[:, offset:k + offset]
recalls[k] = (q_l[:, None] == g_l[indices]).any(1).mean()
return {k: round(v * 100, 2) for k, v in recalls.items()}
|
Chapter15/gensim/word2vec_wiki.py | kksonge/Hands-On-Machine-Learning-for-Algorithmic-Trading | 944 | 11133215 | <reponame>kksonge/Hands-On-Machine-Learning-for-Algorithmic-Trading<filename>Chapter15/gensim/word2vec_wiki.py<gh_stars>100-1000
# coding: utf-8
from pathlib import Path
from argparse import ArgumentParser
from time import time
import pandas as pd
import numpy as np
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
np.random.seed(42)
LANGUAGES = ['en', 'es']
def combine_files():
for language in LANGUAGES:
source_dir = DATA_DIR / language / 'sentences'
target_file = Path('wiki', language, 'wiki.txt')
with target_file.open('a') as target:
for source in source_dir.glob('*.txt'):
for line in source.open('r'):
target.write(line)
def get_accuracy(acc, detail=False):
results = [[c['section'], len(c['correct']), len(c['incorrect'])] for c in acc]
results = pd.DataFrame(results, columns=['category', 'correct', 'incorrect'])
results['average'] = results.correct.div(results[['correct', 'incorrect']].sum(1))
results.sort_values('average', ascending=False)
if detail:
print(results)
return results.iloc[-1, 1:].tolist()
language = 'es'
PROJECT_DIR = Path('/home/stefan/projects/odsc_2018/word2vec-translation')
ANALOGIES_PATH = PROJECT_DIR / 'data' / 'analogies' / 'analogies-{}.txt'.format(language)
gensim_path = Path('wiki', language)
if not gensim_path.exists():
gensim_path.mkdir(parents=True, exist_ok=True)
sentence_path = gensim_path / 'wiki.txt'
sentences = LineSentence(str(sentence_path))
start = time()
model = Word2Vec(sentences,
sg=1,
size=300,
window=5,
min_count=5,
negative=10,
workers=8,
iter=1,
alpha=0.05)
print('Duration: {:,.1f}s'.format(time() - start))
model.wv.save(str(gensim_path / 'word_vectors.bin'))
acc = get_accuracy(model.wv.accuracy(str(ANALOGIES_PATH), case_insensitive=True))
print('Base Accuracy: Correct {:,d} | Wrong {:,d} | Avg {:,.2%}\n'.format(*acc))
accuracies = [acc]
for i in range(1, 11):
start = time()
model.train(sentences, epochs=1, total_examples=model.corpus_count)
accuracies.append(get_accuracy(model.wv.accuracy(str(ANALOGIES_PATH))))
print('{} | Duration: {:,.1f} | Accuracy: {:.2%} '.format(i, time() - start, accuracies[-1][-1]))
pd.DataFrame(accuracies, columns=['correct', 'wrong', 'average']).to_csv(gensim_path / 'accuracies.csv', index=False)
model.wv.save(str(gensim_path / 'word_vectors_final.bin'))
|
tensorflow/contrib/timeseries/python/timeseries/state_management.py | PaulWang1905/tensorflow | 848 | 11133236 | <gh_stars>100-1000
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for wrapping a model to operate on different data shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.model import ModelOutputs
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
class PassthroughStateManager(object):
"""A minimal wrapper for models which do not need state management."""
def __init__(self):
self._input_statistics = None
self._graph_initialized = False
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
del model # unused
self._graph_initialized = True
self._input_statistics = input_statistics
def define_loss(self, model, features, mode):
"""Wrap "model" with StateManager-specific operations.
Args:
model: The model (inheriting from TimeSeriesModel) to manage state for.
features: A dictionary with the following key/value pairs:
feature_keys.TrainEvalFeatures.TIMES: A [batch size x window size]
Tensor with times for each observation.
feature_keys.TrainEvalFeatures.VALUES: A [batch size x window size x num
features] Tensor with values for each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
Returns:
A ModelOutputs object.
Raises:
ValueError: If start state was specified.
"""
if feature_keys.State.STATE_TUPLE in features:
raise ValueError(
"Overriding start state is not supported for this model.")
return model.define_loss(features, mode)
class _OverridableStateManager(PassthroughStateManager):
"""Base class for state managers which support overriding model state."""
@abc.abstractmethod
def _define_loss_with_saved_state(self, model, features, mode):
pass
def define_loss(self, model, features, mode):
"""Switches between explicit start state and managed state."""
if feature_keys.FilteringFeatures.STATE_TUPLE in features:
# Explicit start state has been provided, so we should use that.
if mode == estimator_lib.ModeKeys.TRAIN:
raise ValueError(
"Overriding saved state for training is not supported (but a value "
"for feature {} was specified).".format(
feature_keys.FilteringFeatures.STATE_TUPLE))
start_state = features[feature_keys.FilteringFeatures.STATE_TUPLE]
del features[feature_keys.FilteringFeatures.STATE_TUPLE]
return model.get_batch_loss(
features=features, mode=mode, state=start_state)
else:
# No explicit start state; use managed state.
return self._define_loss_with_saved_state(
model=model, features=features, mode=mode)
class FilteringOnlyStateManager(_OverridableStateManager):
"""State manager for models which use state only for filtering.
Window-based models (ARModel) do not require state to be fed during training
(instead requiring a specific window size). Rather than requiring a minimum
window size for filtering, these models maintain this window in their state,
and so need state to be fed.
"""
def _define_loss_with_saved_state(self, model, features, mode):
return model.define_loss(features, mode)
class ChainingStateManager(_OverridableStateManager):
"""Maintains state across a batch for SequentialTimeSeriesModel subclasses.
The batch dimension is treated as indexing sequential chunks of the same
timeseries. End state from each chunk is fed as start state to the next chunk
during the next timestep. This is an approximation to full-batch training for
sequential models, but is typically much faster while still accurately
recovering parameters. The speedup comes from reduced scheduling overhead of
TensorFlow ops, since each operation can do much more work.
"""
def __init__(self, state_saving_interval=20, checkpoint_state=False):
"""Initialize the state manager.
Args:
state_saving_interval: This state manager saves intermediate model state
every `state_saving_interval` times. Larger values save memory, and
checkpoint size if `checkpoint_state` is enabled, but models
will need to impute across artificial gaps of up to this size
(i.e. gaps not appearing in the original data). This imputation may
affect training. Set state_saving_interval to 1 to avoid any
artificial imputation.
checkpoint_state: If True, saved intermediate model state will be
written to checkpoints. Checkpoints will then scale with dataset
size. If False, state will be freshly imputed from the beginning of a
series each time the model is restored, which means it may take a few
iterations for state to warm up.
"""
super(ChainingStateManager, self).__init__()
self._checkpoint_state = checkpoint_state
self._state_saving_interval = state_saving_interval
self._start_state = None
self._cached_states = None
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
super(ChainingStateManager, self).initialize_graph(
model=model, input_statistics=input_statistics)
self._start_state = model.get_start_state()
self._cached_states = math_utils.TupleOfTensorsLookup(
key_dtype=dtypes.int64,
default_values=self._start_state,
empty_key=-1,
deleted_key=-2,
name="cached_states",
checkpoint=self._checkpoint_state)
def _define_loss_with_saved_state(self, model, features, mode):
"""Feeds end state from one training iteration into the next.
Args:
model: The model to wrap. Compatible with children of TimeSeriesModel.
features: Dictionary with Tensor values defining the data to be
processed. The expected key/value pairs are at minimum:
feature_keys.TrainEvalFeatures.TIMES: A [number of chunks x window
size] Tensor with times for each observation, the result of chunking
a single longer time series.
feature_keys.TrainEvalFeatures.VALUES: A [number of chunks x window
size x num features] Tensor with values for each observation,
corresponding to times.
mode: The tf.estimator.ModeKeys mode to use. For EVAL and INFER, no
batching is performed, which may be slow. This is to avoid giving
cached and almost certainly stale values.
Returns:
A ModelOutputs object.
Raises:
ValueError: If initialize_graph has not been called.
"""
if not self._graph_initialized:
raise ValueError("ChainingStateManager requires initialize_graph() to be "
"called before use.")
(loss_op, end_state, batch_predictions) = self._update_cached_states(
model=model,
features=features,
mode=mode)
# Add a batch dimension so state can be used directly (e.g. for predictions)
# without the user manually reshaping it.
last_end_state_flat = [end_state_value[-1][None]
for end_state_value in nest.flatten(end_state)]
batch_predictions["observed"] = features[
feature_keys.TrainEvalFeatures.VALUES]
return ModelOutputs(
loss=loss_op,
end_state=nest.pack_sequence_as(end_state, last_end_state_flat),
predictions=batch_predictions,
prediction_times=features[feature_keys.TrainEvalFeatures.TIMES])
def _get_chunk_number(self, time):
return time // self._state_saving_interval
def _get_cached_states(self, times):
"""Retrieve cached states for a batch of times."""
read_chunk_numbers = self._get_chunk_number(times)
looked_up_state = list(self._cached_states.lookup(
math_ops.cast(read_chunk_numbers, dtypes.int64)))
looked_up_state = tuple(looked_up_state)
# We need to special-case the first chunk in a series to explicitly rely on
# the model's starting state so that gradients flow back to it. Otherwise it
# would affect only initialization, and would not be read from or updated
# during training. Not doing this also isolates that part of the graph,
# leading to errors on model reload if there are trainable variables
# affecting a model's start state.
if self._input_statistics is not None:
start_time = self._input_statistics.start_time
else:
start_time = 0
set_to_start_state = math_ops.equal(read_chunk_numbers,
self._get_chunk_number(start_time))
new_states = []
for start_state_value, cache_variable in zip(
nest.flatten(
math_utils.replicate_state(self._start_state,
array_ops.shape(times)[0])),
nest.flatten(looked_up_state)):
new_states.append(
array_ops.where(set_to_start_state, start_state_value,
cache_variable))
looked_up_state = nest.pack_sequence_as(looked_up_state, new_states)
return looked_up_state
def _update_cached_states(self, model, features, mode):
"""Read, process, and write chunks to the cache."""
times = features[feature_keys.TrainEvalFeatures.TIMES]
looked_up_state = self._get_cached_states(times[:, 0])
(model_loss, intermediate_states,
batch_predictions) = model.per_step_batch_loss(
features=features,
mode=mode,
state=looked_up_state)
# We need to at least write to the bucket after the one we read from.
min_chunk_numbers = self._get_chunk_number(times) + 1
# We write to the bucket that would have been read had the window started at
# the next sample (except for the last sample in the window, which gets
# written to the next bucket). This assumes fixed missing times (i.e. if we
# were presented with times [10, 50] we will never see times [30, 50]).
#
# TODO(allenl): Retrieve the highest time less than the current time rather
# than relying on fixed bucketing.
write_chunk_numbers = math_ops.maximum(
self._get_chunk_number(array_ops.concat(
[times[:, 1:], times[:, -1:] + 1], axis=1)),
min_chunk_numbers)
# Write once for every computed state; this may mean that we write multiple
# times to the same cell, but later writes will take precedence.
save_ops = [
self._cached_states.insert(
keys=write_chunk_numbers,
values=intermediate_states)]
end_state = nest.pack_sequence_as(
intermediate_states,
[state_element[:, -1]
for state_element in nest.flatten(intermediate_states)])
with ops.control_dependencies(save_ops):
# Make sure end states get saved at each iteration
loss_op = array_ops.identity(model_loss)
return loss_op, end_state, batch_predictions
|
dd_1/Part 2/Section 09 - Project 4/project_4_goal_3/playground.py | Rebell-Leader/bg | 3,266 | 11133298 | import itertools
from datetime import datetime
import constants
import parse_utils
# for fname, class_name, parser in zip(constants.fnames, constants.class_names, constants.parsers):
# file_iter = parse_utils.iter_file(fname, class_name, parser)
# print(fname)
# for _ in range(3):
# print(next(file_iter))
# print()
# gen = parse_utils.iter_combined_plain_tuple(constants.fnames, constants.class_names,
# constants.parsers, constants.compress_fields)
#
# print(list(next(gen)))
# print(list(next(gen)))
# nt = parse_utils.create_combo_named_tuple_class(constants.fnames, constants.compress_fields)
# print(nt._fields)
data_iter = parse_utils.iter_combined(constants.fnames, constants.class_names,
constants.parsers, constants.compress_fields)
for row in itertools.islice(data_iter, 5):
print(row)
print('-------------------------------')
cutoff_date = datetime(2018, 3, 1)
filtered_iter = parse_utils.filtered_iter_combined(constants.fnames, constants.class_names,
constants.parsers, constants.compress_fields,
key=lambda row: row.last_updated >= cutoff_date)
for row in filtered_iter:
print(row) |
dist/ba_data/python/bastd/game/ninjafight.py | Bartixxx32/Bombsquad-Ballistica-Modded-Server | 317 | 11133304 | # Released under the MIT License. See LICENSE for details.
#
"""Provides Ninja Fight mini-game."""
# ba_meta require api 6
# (see https://ballistica.net/wiki/meta-tag-system)
from __future__ import annotations
import random
from typing import TYPE_CHECKING
import ba
from bastd.actor.spazbot import SpazBotSet, ChargerBot, SpazBotDiedMessage
from bastd.actor.onscreentimer import OnScreenTimer
if TYPE_CHECKING:
from typing import Any, Optional
class Player(ba.Player['Team']):
"""Our player type for this game."""
class Team(ba.Team[Player]):
"""Our team type for this game."""
# ba_meta export game
class NinjaFightGame(ba.TeamGameActivity[Player, Team]):
"""
A co-op game where you try to defeat a group
of Ninjas as fast as possible
"""
name = '<NAME>'
description = 'How fast can you defeat the ninjas?'
scoreconfig = ba.ScoreConfig(label='Time',
scoretype=ba.ScoreType.MILLISECONDS,
lower_is_better=True)
default_music = ba.MusicType.TO_THE_DEATH
@classmethod
def get_supported_maps(cls, sessiontype: type[ba.Session]) -> list[str]:
# For now we're hard-coding spawn positions and whatnot
# so we need to be sure to specify that we only support
# a specific map.
return ['Courtyard']
@classmethod
def supports_session_type(cls, sessiontype: type[ba.Session]) -> bool:
# We currently support Co-Op only.
return issubclass(sessiontype, ba.CoopSession)
# In the constructor we should load any media we need/etc.
# ...but not actually create anything yet.
def __init__(self, settings: dict):
super().__init__(settings)
self._winsound = ba.getsound('score')
self._won = False
self._timer: Optional[OnScreenTimer] = None
self._bots = SpazBotSet()
self._preset = str(settings['preset'])
# Called when our game actually begins.
def on_begin(self) -> None:
super().on_begin()
is_pro = self._preset == 'pro'
# In pro mode there's no powerups.
if not is_pro:
self.setup_standard_powerup_drops()
# Make our on-screen timer and start it roughly when our bots appear.
self._timer = OnScreenTimer()
ba.timer(4.0, self._timer.start)
# Spawn some baddies.
ba.timer(
1.0, lambda: self._bots.spawn_bot(
ChargerBot, pos=(3, 3, -2), spawn_time=3.0))
ba.timer(
2.0, lambda: self._bots.spawn_bot(
ChargerBot, pos=(-3, 3, -2), spawn_time=3.0))
ba.timer(
3.0, lambda: self._bots.spawn_bot(
ChargerBot, pos=(5, 3, -2), spawn_time=3.0))
ba.timer(
4.0, lambda: self._bots.spawn_bot(
ChargerBot, pos=(-5, 3, -2), spawn_time=3.0))
# Add some extras for multiplayer or pro mode.
assert self.initialplayerinfos is not None
if len(self.initialplayerinfos) > 2 or is_pro:
ba.timer(
5.0, lambda: self._bots.spawn_bot(
ChargerBot, pos=(0, 3, -5), spawn_time=3.0))
if len(self.initialplayerinfos) > 3 or is_pro:
ba.timer(
6.0, lambda: self._bots.spawn_bot(
ChargerBot, pos=(0, 3, 1), spawn_time=3.0))
# Called for each spawning player.
def spawn_player(self, player: Player) -> ba.Actor:
# Let's spawn close to the center.
spawn_center = (0, 3, -2)
pos = (spawn_center[0] + random.uniform(-1.5, 1.5), spawn_center[1],
spawn_center[2] + random.uniform(-1.5, 1.5))
return self.spawn_player_spaz(player, position=pos)
def _check_if_won(self) -> None:
# Simply end the game if there's no living bots.
# FIXME: Should also make sure all bots have been spawned;
# if spawning is spread out enough that we're able to kill
# all living bots before the next spawns, it would incorrectly
# count as a win.
if not self._bots.have_living_bots():
self._won = True
self.end_game()
# Called for miscellaneous messages.
def handlemessage(self, msg: Any) -> Any:
# A player has died.
if isinstance(msg, ba.PlayerDiedMessage):
super().handlemessage(msg) # Augment standard behavior.
self.respawn_player(msg.getplayer(Player))
# A spaz-bot has died.
elif isinstance(msg, SpazBotDiedMessage):
# Unfortunately the bot-set will always tell us there are living
# bots if we ask here (the currently-dying bot isn't officially
# marked dead yet) ..so lets push a call into the event loop to
# check once this guy has finished dying.
ba.pushcall(self._check_if_won)
# Let the base class handle anything we don't.
else:
return super().handlemessage(msg)
return None
# When this is called, we should fill out results and end the game
# *regardless* of whether is has been won. (this may be called due
# to a tournament ending or other external reason).
def end_game(self) -> None:
# Stop our on-screen timer so players can see what they got.
assert self._timer is not None
self._timer.stop()
results = ba.GameResults()
# If we won, set our score to the elapsed time in milliseconds.
# (there should just be 1 team here since this is co-op).
# ..if we didn't win, leave scores as default (None) which means
# we lost.
if self._won:
elapsed_time_ms = int((ba.time() - self._timer.starttime) * 1000.0)
ba.cameraflash()
ba.playsound(self._winsound)
for team in self.teams:
for player in team.players:
if player.actor:
player.actor.handlemessage(ba.CelebrateMessage())
results.set_team_score(team, elapsed_time_ms)
# Ends the activity.
self.end(results)
|
venv/Lib/site-packages/nipype/interfaces/afni/svm.py | richung99/digitizePlots | 585 | 11133321 | <reponame>richung99/digitizePlots<gh_stars>100-1000
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""AFNI's svm interfaces."""
from ..base import TraitedSpec, traits, File
from .base import AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec
class SVMTrainInputSpec(AFNICommandInputSpec):
# training options
ttype = traits.Str(
desc="tname: classification or regression", argstr="-type %s", mandatory=True
)
in_file = File(
desc="A 3D+t AFNI brik dataset to be used for training.",
argstr="-trainvol %s",
mandatory=True,
exists=True,
copyfile=False,
)
out_file = File(
name_template="%s_vectors",
desc="output sum of weighted linear support vectors file name",
argstr="-bucket %s",
suffix="_bucket",
name_source="in_file",
)
model = File(
name_template="%s_model",
desc="basename for the brik containing the SVM model",
argstr="-model %s",
suffix="_model",
name_source="in_file",
)
alphas = File(
name_template="%s_alphas",
desc="output alphas file name",
argstr="-alpha %s",
suffix="_alphas",
name_source="in_file",
)
mask = File(
desc="byte-format brik file used to mask voxels in the analysis",
argstr="-mask %s",
position=-1,
exists=True,
copyfile=False,
)
nomodelmask = traits.Bool(
desc="Flag to enable the omission of a mask file", argstr="-nomodelmask"
)
trainlabels = File(
desc=".1D labels corresponding to the stimulus paradigm for the training data.",
argstr="-trainlabels %s",
exists=True,
)
censor = File(
desc=".1D censor file that allows the user to ignore certain samples in the training data.",
argstr="-censor %s",
exists=True,
)
kernel = traits.Str(
desc="string specifying type of kernel function:linear, polynomial, rbf, sigmoid",
argstr="-kernel %s",
)
max_iterations = traits.Int(
desc="Specify the maximum number of iterations for the optimization.",
argstr="-max_iterations %d",
)
w_out = traits.Bool(
desc="output sum of weighted linear support vectors", argstr="-wout"
)
options = traits.Str(desc="additional options for SVM-light", argstr="%s")
class SVMTrainOutputSpec(TraitedSpec):
out_file = File(desc="sum of weighted linear support vectors file name")
model = File(desc="brik containing the SVM model file name")
alphas = File(desc="output alphas file name")
class SVMTrain(AFNICommand):
"""Temporally predictive modeling with the support vector machine
SVM Train Only
For complete details, see the `3dsvm Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dsvm.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> svmTrain = afni.SVMTrain()
>>> svmTrain.inputs.in_file = 'run1+orig'
>>> svmTrain.inputs.trainlabels = 'run1_categories.1D'
>>> svmTrain.inputs.ttype = 'regression'
>>> svmTrain.inputs.mask = 'mask.nii'
>>> svmTrain.inputs.model = 'model_run1'
>>> svmTrain.inputs.alphas = 'alphas_run1'
>>> res = svmTrain.run() # doctest: +SKIP
"""
_cmd = "3dsvm"
input_spec = SVMTrainInputSpec
output_spec = SVMTrainOutputSpec
_additional_metadata = ["suffix"]
def _format_arg(self, name, trait_spec, value):
return super(SVMTrain, self)._format_arg(name, trait_spec, value)
class SVMTestInputSpec(AFNICommandInputSpec):
# testing options
model = traits.Str(
desc="modname is the basename for the brik containing the SVM model",
argstr="-model %s",
mandatory=True,
)
in_file = File(
desc="A 3D or 3D+t AFNI brik dataset to be used for testing.",
argstr="-testvol %s",
exists=True,
mandatory=True,
)
out_file = File(
name_template="%s_predictions",
desc="filename for .1D prediction file(s).",
argstr="-predictions %s",
)
testlabels = File(
desc="*true* class category .1D labels for the test dataset. It is used to calculate the prediction accuracy performance",
exists=True,
argstr="-testlabels %s",
)
classout = traits.Bool(
desc="Flag to specify that pname files should be integer-valued, corresponding to class category decisions.",
argstr="-classout",
)
nopredcensord = traits.Bool(
desc="Flag to prevent writing predicted values for censored time-points",
argstr="-nopredcensord",
)
nodetrend = traits.Bool(
desc="Flag to specify that pname files should not be linearly detrended",
argstr="-nodetrend",
)
multiclass = traits.Bool(
desc="Specifies multiclass algorithm for classification",
argstr="-multiclass %s",
)
options = traits.Str(desc="additional options for SVM-light", argstr="%s")
class SVMTest(AFNICommand):
"""Temporally predictive modeling with the support vector machine
SVM Test Only
For complete details, see the `3dsvm Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dsvm.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> svmTest = afni.SVMTest()
>>> svmTest.inputs.in_file= 'run2+orig'
>>> svmTest.inputs.model= 'run1+orig_model'
>>> svmTest.inputs.testlabels= 'run2_categories.1D'
>>> svmTest.inputs.out_file= 'pred2_model1'
>>> res = svmTest.run() # doctest: +SKIP
"""
_cmd = "3dsvm"
input_spec = SVMTestInputSpec
output_spec = AFNICommandOutputSpec
|
source/msoAutoShapeTypes.py | XLTechie/nvdaTests | 1,592 | 11133331 | <filename>source/msoAutoShapeTypes.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2017 NV Access Limited
#This file is covered by the GNU General Public License.
import controlTypes
msoShape10pointStar=149
msoShape12pointStar=150
msoShape16pointStar=94
msoShape24pointStar=95
msoShape32pointStar=96
msoShape4pointStar=91
msoShape5pointStar=92
msoShape6pointStar=147
msoShape7pointStar=148
msoShape8pointStar=93
msoShapeActionButtonBackorPrevious=129
msoShapeActionButtonBeginning=131
msoShapeActionButtonCustom=125
msoShapeActionButtonDocument=134
msoShapeActionButtonEnd=132
msoShapeActionButtonForwardorNext=130
msoShapeActionButtonHelp=127
msoShapeActionButtonHome=126
msoShapeActionButtonInformation=128
msoShapeActionButtonMovie=136
msoShapeActionButtonReturn=133
msoShapeActionButtonSound=135
msoShapeArc=25
msoShapeBalloon=137
msoShapeBentArrow=41
msoShapeBentUpArrow=44
msoShapeBevel=15
msoShapeBlockArc=20
msoShapeCan=13
msoShapeChartPlus=182
msoShapeChartStar=181
msoShapeChartX=180
msoShapeChevron=52
msoShapeChord=161
msoShapeCircularArrow=60
msoShapeCloud=179
msoShapeCloudCallout=108
msoShapeCorner=162
msoShapeCornerTabs=169
msoShapeCross=11
msoShapeCube=14
msoShapeCurvedDownArrow=48
msoShapeCurvedDownRibbon=100
msoShapeCurvedLeftArrow=46
msoShapeCurvedRightArrow=45
msoShapeCurvedUpArrow=47
msoShapeCurvedUpRibbon=99
msoShapeDecagon=144
msoShapeDiagonalStripe=141
msoShapeDiamond=4
msoShapeDodecagon=146
msoShapeDonut=18
msoShapeDoubleBrace=27
msoShapeDoubleBracket=26
msoShapeDoubleWave=104
msoShapeDownArrow=36
msoShapeDownArrowCallout=56
msoShapeDownRibbon=98
msoShapeExplosion1=89
msoShapeExplosion2=90
msoShapeFlowchartAlternateProcess=62
msoShapeFlowchartCard=75
msoShapeFlowchartCollate=79
msoShapeFlowchartConnector=73
msoShapeFlowchartData=64
msoShapeFlowchartDecision=63
msoShapeFlowchartDelay=84
msoShapeFlowchartDirectAccessStorage=87
msoShapeFlowchartDisplay=88
msoShapeFlowchartDocument=67
msoShapeFlowchartExtract=81
msoShapeFlowchartInternalStorage=66
msoShapeFlowchartMagneticDisk=86
msoShapeFlowchartManualInput=71
msoShapeFlowchartManualOperation=72
msoShapeFlowchartMerge=82
msoShapeFlowchartMultidocument=68
msoShapeFlowchartOfflineStorage=139
msoShapeFlowchartOffpageConnector=74
msoShapeFlowchartOr=78
msoShapeFlowchartPredefinedProcess=65
msoShapeFlowchartPreparation=70
msoShapeFlowchartProcess=61
msoShapeFlowchartPunchedTape=76
msoShapeFlowchartSequentialAccessStorage=85
msoShapeFlowchartSort=80
msoShapeFlowchartStoredData=83
msoShapeFlowchartSummingJunction=77
msoShapeFlowchartTerminator=69
msoShapeFoldedCorner=16
msoShapeFrame=158
msoShapeFunnel=174
msoShapeGear6=172
msoShapeGear9=173
msoShapeHalfFrame=159
msoShapeHeart=21
msoShapeHeptagon=145
msoShapeHexagon=10
msoShapeHorizontalScroll=102
msoShapeIsoscelesTriangle=7
msoShapeLeftArrow=34
msoShapeLeftArrowCallout=54
msoShapeLeftBrace=31
msoShapeLeftBracket=29
msoShapeLeftCircularArrow=176
msoShapeLeftRightArrow=37
msoShapeLeftRightArrowCallout=57
msoShapeLeftRightCircularArrow=177
msoShapeLeftRightRibbon=140
msoShapeLeftRightUpArrow=40
msoShapeLeftUpArrow=43
msoShapeLightningBolt=22
msoShapeLineCallout1=109
msoShapeLineCallout1AccentBar=113
msoShapeLineCallout1BorderandAccentBar=121
msoShapeLineCallout1NoBorder=117
msoShapeLineCallout2=110
msoShapeLineCallout2AccentBar=114
msoShapeLineCallout2BorderandAccentBar=122
msoShapeLineCallout2NoBorder=118
msoShapeLineCallout3=111
msoShapeLineCallout3AccentBar=115
msoShapeLineCallout3BorderandAccentBar=123
msoShapeLineCallout3NoBorder=119
msoShapeLineCallout4=112
msoShapeLineCallout4AccentBar=116
msoShapeLineCallout4BorderandAccentBar=124
msoShapeLineCallout4NoBorder=120
msoShapeLineInverse=183
msoShapeMathDivide=166
msoShapeMathEqual=167
msoShapeMathMinus=164
msoShapeMathMultiply=165
msoShapeMathNotEqual=168
msoShapeMathPlus=163
msoShapeMixed=-2
msoShapeMoon=24
msoShapeNonIsoscelesTrapezoid=143
msoShapeNoSymbol=19
msoShapeNotchedRightArrow=50
msoShapeNotPrimitive=138
msoShapeOctagon=6
msoShapeOval=9
msoShapeOvalCallout=107
msoShapeParallelogram=2
msoShapePentagon=51
msoShapePie=142
msoShapePieWedge=175
msoShapePlaque=28
msoShapePlaqueTabs=171
msoShapeQuadArrow=39
msoShapeQuadArrowCallout=59
msoShapeRectangle=1
msoShapeRectangularCallout=105
msoShapeRegularPentagon=12
msoShapeRightArrow=33
msoShapeRightArrowCallout=53
msoShapeRightBrace=32
msoShapeRightBracket=30
msoShapeRightTriangle=8
msoShapeRound1Rectangle=151
msoShapeRound2DiagRectangle=157
msoShapeRound2SameRectangle=152
msoShapeRoundedRectangle=5
msoShapeRoundedRectangularCallout=106
msoShapeSmileyFace=17
msoShapeSnip1Rectangle=155
msoShapeSnip2DiagRectangle=157
msoShapeSnip2SameRectangle=156
msoShapeSnipRoundRectangle=154
msoShapeSquareTabs=170
msoShapeStripedRightArrow=49
msoShapeSun=23
msoShapeSwooshArrow=178
msoShapeTear=160
msoShapeTrapezoid=3
msoShapeUpArrow=35
msoShapeUpArrowCallout=55
msoShapeUpDownArrow=38
msoShapeUpDownArrowCallout=58
msoShapeUpRibbon=97
msoShapeUTurnArrow=42
msoShapeVerticalScroll=101
msoShapeWave=103
msoAutoShapeTypeToRoleText={
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape10pointStar:pgettext("shape","10-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape12pointStar:pgettext("shape","12-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape16pointStar:pgettext("shape","16-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape24pointStar:pgettext("shape","24-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape32pointStar:pgettext("shape","32-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape4pointStar:pgettext("shape","4-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape5pointStar:pgettext("shape","5-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape6pointStar:pgettext("shape","6-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape7pointStar:pgettext("shape","7-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShape8pointStar:pgettext("shape","8-point star"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeArc:pgettext("shape","Arc"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeBalloon:pgettext("shape","Balloon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeBentArrow:pgettext("shape","Bent arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeBentUpArrow:pgettext("shape","Bent Up Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeBevel:pgettext("shape","Bevel"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeBlockArc:pgettext("shape","Block arc"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCan:pgettext("shape","Can"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeChartPlus:pgettext("shape","Chart Plus symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeChartStar:pgettext("shape","Chart Star Symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeChartX:pgettext("shape","Chart X Symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeChevron:pgettext("shape","Chevron"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeChord:pgettext("shape","Circle with line through center"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCircularArrow:pgettext("shape","Circular Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCloud:pgettext("shape","Cloud shape"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCloudCallout:pgettext("shape","Cloud callout"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCorner:pgettext("shape","Corner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCornerTabs:pgettext("shape","four snipped corners"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCross:pgettext("shape","Cross"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCube:pgettext("shape","Cube"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCurvedDownArrow:pgettext("shape","Curved Down Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCurvedDownRibbon:pgettext("shape","Ribbon banner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCurvedLeftArrow:pgettext("shape","Curved Left Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCurvedRightArrow:pgettext("shape","Curved Right Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCurvedUpArrow:pgettext("shape","Curved Up Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeCurvedUpRibbon:pgettext("shape","Curved Up Ribbon banner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDecagon:pgettext("shape","Decagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDiagonalStripe:pgettext("shape","Diagonal stripe"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDiamond:pgettext("shape","Diamond"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDodecagon:pgettext("shape","Dodecagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDonut:pgettext("shape","Donut"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDoubleBrace:pgettext("shape","Double brace"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDoubleBracket:pgettext("shape","Double bracket"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDoubleWave:pgettext("shape","Double wave"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDownArrow:pgettext("shape","Down Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDownArrowCallout:pgettext("shape","Callout with Down Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeDownRibbon:pgettext("shape","Ribbon banner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeExplosion1:pgettext("shape","Explosion"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeExplosion2:pgettext("shape","Explosion"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartAlternateProcess:pgettext("shape","Alternate process flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartCard:pgettext("shape","Card flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartCollate:pgettext("shape","Collate flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartConnector:pgettext("shape","Connector flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartData:pgettext("shape","Data flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartDecision:pgettext("shape","Decision flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartDelay:pgettext("shape","Delay flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartDirectAccessStorage:pgettext("shape","Direct access storage flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartDisplay:pgettext("shape","Display flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartDocument:pgettext("shape","Document flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartExtract:pgettext("shape","Extract flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartInternalStorage:pgettext("shape","Internal storage flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartMagneticDisk:pgettext("shape","Magnetic disk flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartManualInput:pgettext("shape","Manual input flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartManualOperation:pgettext("shape","Manual operation flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartMerge:pgettext("shape","Merge flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartMultidocument:pgettext("shape","Multi-document flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartOfflineStorage:pgettext("shape","Offline storage flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartOffpageConnector:pgettext("shape","Off-page connector flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartOr:pgettext("shape","'Or' flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartPredefinedProcess:pgettext("shape","Predefined process flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartPreparation:pgettext("shape","Preparation flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartProcess:pgettext("shape","Process flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartPunchedTape:pgettext("shape","Punched tape flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartSequentialAccessStorage:pgettext("shape","Sequential access storage flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartSort:pgettext("shape","Sort flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartStoredData:pgettext("shape","Stored data flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartSummingJunction:pgettext("shape","Summing junction flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFlowchartTerminator:pgettext("shape","Terminator flowchart symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFoldedCorner:pgettext("shape","Folded corner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFrame:pgettext("shape","Rectangular picture frame"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeFunnel:pgettext("shape","Funnel"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeGear6:pgettext("shape","Gear with six teeth"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeGear9:pgettext("shape","Gear with nine teeth"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeHalfFrame:pgettext("shape","Half of rectangular picture frame"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeHeart:pgettext("shape","Heart"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeHeptagon:pgettext("shape","Heptagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeHexagon:pgettext("shape","Hexagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeHorizontalScroll:pgettext("shape","Horizontal scroll"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeIsoscelesTriangle:pgettext("shape","Isosceles triangle"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftArrow:pgettext("shape","Left Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftArrowCallout:pgettext("shape","Callout with Left Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftBrace:pgettext("shape","Left brace"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftBracket:pgettext("shape","Left bracket"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftCircularArrow:pgettext("shape","Counter-clockwise Circular arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftRightArrow:pgettext("shape","Double-ended horizontal Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftRightArrowCallout:pgettext("shape","Callout with Double-ended horizontal Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftRightCircularArrow:pgettext("shape","Double-ended Circular arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftRightRibbon:pgettext("shape","Ribbon with left and right arrows"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftRightUpArrow:pgettext("shape","Left right and up Arrows"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLeftUpArrow:pgettext("shape","Left and up Arrows"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLightningBolt:pgettext("shape","Lightning bolt"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout1:pgettext("shape","Line Callout"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout1AccentBar:pgettext("shape","Callout with horizontal accent bar"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout1BorderandAccentBar:pgettext("shape","Callout with border and horizontal accent bar"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout1NoBorder:pgettext("shape","Callout with horizontal line"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout2:pgettext("shape","Callout with diagonal straight line"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout2AccentBar:pgettext("shape","Callout with diagonal callout line and accent bar"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout2BorderandAccentBar:pgettext("shape","Callout with border, diagonal straight line and accent bar"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout2NoBorder:pgettext("shape","Callout with no border and diagonal callout line"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout3:pgettext("shape","Callout with angled line"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout3AccentBar:pgettext("shape","Callout with angled callout line and accent bar"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout3BorderandAccentBar:pgettext("shape","Callout with border, angled callout line, and accent bar"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout3NoBorder:pgettext("shape","Callout with no border and angled callout line"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout4:pgettext("shape","Callout with callout line segments forming a U-shape"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout4AccentBar:pgettext("shape","Callout with accent bar and callout line segments forming a U-shape"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout4BorderandAccentBar:pgettext("shape","Callout with border, accent bar, and callout line segments forming a U-shape"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineCallout4NoBorder:pgettext("shape","Callout with no border and callout line segments forming a U-shape"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeLineInverse:pgettext("shape","Line inverse"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMathDivide:pgettext("shape","Math Division symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMathEqual:pgettext("shape","Math Equivalence symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMathMinus:pgettext("shape","Math Subtraction symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMathMultiply:pgettext("shape","Math Multiplication symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMathNotEqual:pgettext("shape","Math Non-equivalence symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMathPlus:pgettext("shape","Math Addition symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeMoon:pgettext("shape","Moon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeNonIsoscelesTrapezoid:pgettext("shape","Trapezoid with asymmetrical non-parallel sides"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeNoSymbol:pgettext("shape","'No' symbol"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeNotchedRightArrow:pgettext("shape","Notched RightArrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeOctagon:pgettext("shape","Octagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeOval:pgettext("shape","Oval"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeOvalCallout:pgettext("shape","Oval-shaped callout"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeParallelogram:pgettext("shape","Parallelogram"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapePentagon:pgettext("shape","Pentagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapePie:pgettext("shape","Incomplete Pie with wedge missing"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapePieWedge:pgettext("shape","Quarter Pie Wedge"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapePlaque:pgettext("shape","Plaque"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapePlaqueTabs:pgettext("shape","Plaque Tabs"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeQuadArrow:pgettext("shape","Arrows pointing left right up and down"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeQuadArrowCallout:pgettext("shape","Callout with Arrows pointing left right up and down"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRectangle:pgettext("shape","Rectangle"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRectangularCallout:pgettext("shape","Rectangular callout"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRegularPentagon:pgettext("shape","Pentagon"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRightArrow:pgettext("shape","Right Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRightArrowCallout:pgettext("shape","Callout with Right Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRightBrace:pgettext("shape","Right brace"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRightBracket:pgettext("shape","Right bracket"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRightTriangle:pgettext("shape","Right triangle"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRound1Rectangle:pgettext("shape","Rectangle with one rounded corner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRound2DiagRectangle:pgettext("shape","Rectangle with two rounded corners diagonally-opposed"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRound2SameRectangle:pgettext("shape","Rectangle with two-rounded corners that share a side"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRoundedRectangle:pgettext("shape","Rounded rectangle"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeRoundedRectangularCallout:pgettext("shape","Rounded rectangle-shaped callout"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSmileyFace:pgettext("shape","Smiley face"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSnip1Rectangle:pgettext("shape","Rectangle with one snipped corner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSnip2DiagRectangle:pgettext("shape","Rectangle with two snipped corners diagonally-opposed"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSnip2SameRectangle:pgettext("shape","Rectangle with two snipped corners that share a side"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSnipRoundRectangle:pgettext("shape","Rectangle with one snipped corner and one rounded corner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSquareTabs:pgettext("shape","Four small squares that define a rectangular shape"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeStripedRightArrow:pgettext("shape","Right Arrow with Stripes"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSun:pgettext("shape","Sun"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeSwooshArrow:pgettext("shape","Curved arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeTear:pgettext("shape","Water droplet"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeTrapezoid:pgettext("shape","Trapezoid"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeUpArrow:pgettext("shape","Up Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeUpArrowCallout:pgettext("shape","Callout with UpArrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeUpDownArrow:pgettext("shape","Double-ended Arrow pointing up and down"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeUpDownArrowCallout:pgettext("shape","Callout with arrows that point up and down"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeUpRibbon:pgettext("shape","Ribbon banner"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeUTurnArrow:pgettext("shape","U-shaped Arrow"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeVerticalScroll:pgettext("shape","Vertical scroll"),
# Translators: a shape name from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeWave:pgettext("shape","Wave"),
}
msoAutoShapeTypeToActionLabel={
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonBackorPrevious:pgettext("action","Back"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonBeginning:pgettext("action","Beginning"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonDocument:pgettext("action","Document"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonEnd:pgettext("action","End"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonForwardorNext:pgettext("action","Next"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonHelp:pgettext("action","Help"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonHome:pgettext("action","Home"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonInformation:pgettext("action","Information"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonMovie:pgettext("action","Movie"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonReturn:pgettext("shape","Return"),
# Translators: an action button label from Microsoft Office.
# See MSOAutoShapeType enumeration from https://msdn.microsoft.com/en-us/library/office/ff862770.aspx?f=255&MSPPError=-2147217396
msoShapeActionButtonSound:pgettext("action","Sound"),
}
msoAutoShapeTypeToRole={
msoShapeActionButtonBackorPrevious:controlTypes.Role.BUTTON,
msoShapeActionButtonBeginning:controlTypes.Role.BUTTON,
msoShapeActionButtonCustom:controlTypes.Role.BUTTON,
msoShapeActionButtonDocument:controlTypes.Role.BUTTON,
msoShapeActionButtonEnd:controlTypes.Role.BUTTON,
msoShapeActionButtonForwardorNext:controlTypes.Role.BUTTON,
msoShapeActionButtonHelp:controlTypes.Role.BUTTON,
msoShapeActionButtonHome:controlTypes.Role.BUTTON,
msoShapeActionButtonInformation:controlTypes.Role.BUTTON,
msoShapeActionButtonMovie:controlTypes.Role.BUTTON,
msoShapeActionButtonReturn:controlTypes.Role.BUTTON,
msoShapeActionButtonSound:controlTypes.Role.BUTTON,
}
|
psutil_example/process_management/process_pid_list.py | DazEB2/SimplePyScripts | 117 | 11133333 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install psutil
import psutil
process_pid_list = psutil.pids()
print('Process pid list ({}): {}'.format(len(process_pid_list), process_pid_list))
|
lib/django-1.3/django/core/management/commands/startapp.py | MiCHiLU/google_appengine_sdk | 790 | 11133335 | import os
from django.core.management.base import copy_helper, CommandError, LabelCommand
from django.utils.importlib import import_module
class Command(LabelCommand):
help = "Creates a Django app directory structure for the given app name in the current directory."
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, app_name, directory=None, **options):
if directory is None:
directory = os.getcwd()
# Determine the project_name by using the basename of directory,
# which should be the full path of the project directory (or the
# current directory if no directory was passed).
project_name = os.path.basename(directory)
if app_name == project_name:
raise CommandError("You cannot create an app with the same name"
" (%r) as your project." % app_name)
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as an app name. Please try another name." % app_name)
copy_helper(self.style, 'app', app_name, directory, project_name)
class ProjectCommand(Command):
help = ("Creates a Django app directory structure for the given app name"
" in this project's directory.")
def __init__(self, project_directory):
super(ProjectCommand, self).__init__()
self.project_directory = project_directory
def handle_label(self, app_name, **options):
super(ProjectCommand, self).handle_label(app_name, self.project_directory, **options)
|
snorkel/classification/task.py | melonwater211/snorkel | 2,906 | 11133340 | <reponame>melonwater211/snorkel
import logging
from functools import partial
from typing import Callable, List, Mapping, Optional, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from snorkel.analysis import Scorer
Outputs = Mapping[str, List[torch.FloatTensor]]
class Operation:
"""A single operation (forward pass of a module) to execute in a Task.
See ``Task`` for more detail on the usage and semantics of an Operation.
Parameters
----------
name
The name of this operation (defaults to module_name since for most workflows,
each module is only used once per forward pass)
module_name
The name of the module in the module pool that this operation uses
inputs
The inputs that the specified module expects, given as a list of names of
previous operations (or optionally a tuple of the operation name and a key
if the output of that module is a dict instead of a Tensor).
Note that the original input to the model can be referred to as "_input_".
Example
-------
>>> op1 = Operation(module_name="linear1", inputs=[("_input_", "features")])
>>> op2 = Operation(module_name="linear2", inputs=["linear1"])
>>> op_sequence = [op1, op2]
Attributes
----------
name
See above
module_name
See above
inputs
See above
"""
def __init__(
self,
module_name: str,
inputs: Sequence[Union[str, Tuple[str, str]]],
name: Optional[str] = None,
) -> None:
self.name = name or module_name
self.module_name = module_name
self.inputs = inputs
def __repr__(self) -> str:
return (
f"Operation(name={self.name}, "
f"module_name={self.module_name}, "
f"inputs={self.inputs})"
)
class Task:
r"""A single task (a collection of modules and specified path through them).
Parameters
----------
name
The name of the task
module_pool
A ModuleDict mapping module names to the modules themselves
op_sequence
A list of ``Operation``\s to execute in order, defining the flow of information
through the network for this task
scorer
A ``Scorer`` with the desired metrics to calculate for this task
loss_func
A function that converts final logits into loss values.
Defaults to F.cross_entropy() if none is provided.
To use probalistic labels for training, use the Snorkel-defined method
cross_entropy_with_probs() instead.
output_func
A function that converts final logits into 'outputs' (e.g. probabilities)
Defaults to F.softmax(..., dim=1).
Attributes
----------
name
See above
module_pool
See above
op_sequence
See above
scorer
See above
loss_func
See above
output_func
See above
"""
def __init__(
self,
name: str,
module_pool: nn.ModuleDict,
op_sequence: Sequence[Operation],
scorer: Scorer = Scorer(metrics=["accuracy"]),
loss_func: Optional[Callable[..., torch.Tensor]] = None,
output_func: Optional[Callable[..., torch.Tensor]] = None,
) -> None:
self.name = name
self.module_pool = module_pool
self.op_sequence = op_sequence
self.loss_func = loss_func or F.cross_entropy
self.output_func = output_func or partial(F.softmax, dim=1)
self.scorer = scorer
logging.info(f"Created task: {self.name}")
def __repr__(self) -> str:
cls_name = type(self).__name__
return f"{cls_name}(name={self.name})"
|
tests/components/datadog/test_init.py | MrDelik/core | 30,023 | 11133342 | """The tests for the Datadog component."""
from unittest import mock
from unittest.mock import MagicMock, patch
import homeassistant.components.datadog as datadog
from homeassistant.const import (
EVENT_LOGBOOK_ENTRY,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
async def test_invalid_config(hass):
"""Test invalid configuration."""
with assert_setup_component(0):
assert not await async_setup_component(
hass, datadog.DOMAIN, {datadog.DOMAIN: {"host1": "host1"}}
)
async def test_datadog_setup_full(hass):
"""Test setup with all data."""
config = {datadog.DOMAIN: {"host": "host", "port": 123, "rate": 1, "prefix": "foo"}}
hass.bus.listen = MagicMock()
with patch("homeassistant.components.datadog.initialize") as mock_init, patch(
"homeassistant.components.datadog.statsd"
):
assert await async_setup_component(hass, datadog.DOMAIN, config)
assert mock_init.call_count == 1
assert mock_init.call_args == mock.call(statsd_host="host", statsd_port=123)
assert hass.bus.listen.called
assert hass.bus.listen.call_args_list[0][0][0] == EVENT_LOGBOOK_ENTRY
assert hass.bus.listen.call_args_list[1][0][0] == EVENT_STATE_CHANGED
async def test_datadog_setup_defaults(hass):
"""Test setup with defaults."""
hass.bus.listen = mock.MagicMock()
with patch("homeassistant.components.datadog.initialize") as mock_init, patch(
"homeassistant.components.datadog.statsd"
):
assert await async_setup_component(
hass,
datadog.DOMAIN,
{
datadog.DOMAIN: {
"host": "host",
"port": datadog.DEFAULT_PORT,
"prefix": datadog.DEFAULT_PREFIX,
}
},
)
assert mock_init.call_count == 1
assert mock_init.call_args == mock.call(statsd_host="host", statsd_port=8125)
assert hass.bus.listen.called
async def test_logbook_entry(hass):
"""Test event listener."""
hass.bus.listen = mock.MagicMock()
with patch("homeassistant.components.datadog.initialize"), patch(
"homeassistant.components.datadog.statsd"
) as mock_statsd:
assert await async_setup_component(
hass,
datadog.DOMAIN,
{datadog.DOMAIN: {"host": "host", "rate": datadog.DEFAULT_RATE}},
)
assert hass.bus.listen.called
handler_method = hass.bus.listen.call_args_list[0][0][1]
event = {
"domain": "automation",
"entity_id": "sensor.foo.bar",
"message": "foo bar biz",
"name": "triggered something",
}
handler_method(mock.MagicMock(data=event))
assert mock_statsd.event.call_count == 1
assert mock_statsd.event.call_args == mock.call(
title="Home Assistant",
text="%%% \n **{}** {} \n %%%".format(event["name"], event["message"]),
tags=["entity:sensor.foo.bar", "domain:automation"],
)
mock_statsd.event.reset_mock()
async def test_state_changed(hass):
"""Test event listener."""
hass.bus.listen = mock.MagicMock()
with patch("homeassistant.components.datadog.initialize"), patch(
"homeassistant.components.datadog.statsd"
) as mock_statsd:
assert await async_setup_component(
hass,
datadog.DOMAIN,
{
datadog.DOMAIN: {
"host": "host",
"prefix": "ha",
"rate": datadog.DEFAULT_RATE,
}
},
)
assert hass.bus.listen.called
handler_method = hass.bus.listen.call_args_list[1][0][1]
valid = {"1": 1, "1.0": 1.0, STATE_ON: 1, STATE_OFF: 0}
attributes = {"elevation": 3.2, "temperature": 5.0, "up": True, "down": False}
for in_, out in valid.items():
state = mock.MagicMock(
domain="sensor",
entity_id="sensor.foo.bar",
state=in_,
attributes=attributes,
)
handler_method(mock.MagicMock(data={"new_state": state}))
assert mock_statsd.gauge.call_count == 5
for attribute, value in attributes.items():
value = int(value) if isinstance(value, bool) else value
mock_statsd.gauge.assert_has_calls(
[
mock.call(
f"ha.sensor.{attribute}",
value,
sample_rate=1,
tags=[f"entity:{state.entity_id}"],
)
]
)
assert mock_statsd.gauge.call_args == mock.call(
"ha.sensor",
out,
sample_rate=1,
tags=[f"entity:{state.entity_id}"],
)
mock_statsd.gauge.reset_mock()
for invalid in ("foo", "", object):
handler_method(
mock.MagicMock(data={"new_state": ha.State("domain.test", invalid, {})})
)
assert not mock_statsd.gauge.called
|
tests/misc/test_startup_time.py | wenliangzhao2018/d2go | 687 | 11133362 | <filename>tests/misc/test_startup_time.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from d2go.initializer import (
REGISTER_D2_DATASETS_TIME,
REGISTER_TIME,
SETUP_ENV_TIME,
)
class TestStartupTime(unittest.TestCase):
@unittest.skipIf(True, "Will exceed threshold")
def test_setup_env_time(self):
self.assertLess(sum(SETUP_ENV_TIME), 5.0)
def test_register_d2_datasets_time(self):
self.assertLess(sum(REGISTER_D2_DATASETS_TIME), 3.0)
@unittest.skipIf(True, "Will exceed threshold")
def test_register_time(self):
# NOTE: _register is should be done quickly, currently about 0.2s
self.assertLess(sum(REGISTER_TIME), 1.0)
|
social_core/backends/gitlab.py | sg4e/social-core | 745 | 11133380 | """
GitLab OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/gitlab.html
Thanks to [@saily](https://github.com/saily) who published an
implementation for GitLab support on his blog post [Weblate with
GitLab as OAuth provider](http://widerin.net/blog/weblate-gitlab-oauth-login/).
His code was a great reference when working on this implementation.
"""
from .oauth import BaseOAuth2
class GitLabOAuth2(BaseOAuth2):
"""GitLab OAuth authentication backend"""
name = 'gitlab'
API_URL = 'https://gitlab.com'
AUTHORIZATION_URL = 'https://gitlab.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://gitlab.com/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
DEFAULT_SCOPE = ['read_user']
EXTRA_DATA = [
('id', 'id'),
('expires_in', 'expires'),
('refresh_token', 'refresh_token')
]
def api_url(self, path):
api_url = self.setting('API_URL') or self.API_URL
return '{0}{1}'.format(api_url.rstrip('/'), path)
def authorization_url(self):
return self.api_url('/oauth/authorize')
def access_token_url(self):
return self.api_url('/oauth/token')
def get_user_details(self, response):
"""Return user details from GitLab account"""
fullname, first_name, last_name = self.get_user_names(
response.get('name')
)
return {'username': response.get('username'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(self.api_url('/api/v4/user'), params={
'access_token': access_token
})
|
src/datasets/dataset_factory.py | xiaoyuliu/AttentionalPoolingAction | 270 | 11133383 | """A factory-pattern class which returns classification image/label pairs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets import mpii
from datasets import hmdb51
from datasets import charades
from datasets import hico
from datasets import jhmdb21
datasets_map = {
'mpii': mpii,
'hmdb51': hmdb51,
'charades': charades,
'hico': hico,
'jhmdb21': jhmdb21
}
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None,
**kwargs): # added by rgirdhar: allow other options
"""Given a dataset name and a split_name returns a Dataset.
Args:
name: String, the name of the dataset.
split_name: A train/test split name.
dataset_dir: The directory where the dataset files are stored.
file_pattern: The file pattern to use for matching the dataset source files.
reader: The subclass of tf.ReaderBase. If left as `None`, then the default
reader defined by each dataset is used.
Returns:
A `Dataset` class.
Raises:
ValueError: If the dataset `name` is unknown.
"""
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
return datasets_map[name].get_split(
split_name,
dataset_dir,
file_pattern,
reader, **kwargs)
|
var/spack/repos/builtin/packages/ncdu/package.py | LiamBindle/spack | 2,360 | 11133384 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ncdu(Package):
"""Ncdu is a disk usage analyzer with an ncurses interface. It is designed
to find space hogs on a remote server where you don't have an entire
gaphical setup available, but it is a useful tool even on regular desktop
systems. Ncdu aims to be fast, simple and easy to use, and should be able
to run in any minimal POSIX-like environment with ncurses installed.
"""
homepage = "https://dev.yorhel.nl/ncdu"
url = "https://dev.yorhel.nl/download/ncdu-1.11.tar.gz"
version('1.15.1', sha256='b02ddc4dbf1db139cc6fbbe2f54a282770380f0ca5c17089855eab52a9ea3fb0')
version('1.14.2', sha256='947a7f5c1d0cd4e338e72b4f5bc5e2873651442cec3cb012e04ad2c37152c6b1')
version('1.13', sha256='f4d9285c38292c2de05e444d0ba271cbfe1a705eee37c2b23ea7c448ab37255a')
version('1.12', sha256='820e4e4747a2a2ec7a2e9f06d2f5a353516362c22496a10a9834f871b877499a')
version('1.11', sha256='d0aea772e47463c281007f279a9041252155a2b2349b18adb9055075e141bb7b')
version('1.10', sha256='f5994a4848dbbca480d39729b021f057700f14ef72c0d739bbd82d862f2f0c67')
version('1.9', sha256='ea7349544a9da77764293d84e52862110ab49ee29b949158bc4bab908d3dd3a5')
version('1.8', sha256='42aaf0418c05e725b39b220166a9c604a9c54c0fbf7692c9c119b36d0ed5d099')
version('1.7', sha256='70dfe10b4c0843050ee17ab27b7ad4d65714682f117079b85d779f83431fb333')
depends_on("ncurses")
depends_on('pkgconfig', type='build')
def install(self, spec, prefix):
configure('--prefix=%s' % prefix,
'--with-ncurses=%s' % spec['ncurses'])
make()
make("install")
|
data-science-onramp/vertex-ai/modules/trainer/tfkeras_model/task.py | InstantDomain/python-docs-samples | 5,938 | 11133389 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_tfkeras_task]
"""Trains a Keras model to predict number of trips
started and ended at Citibike stations. """
# [START aiplatform_tfkeras_task_imports]
import argparse
import os
import tensorflow as tf
from trainer import utils
from trainer.tfkeras_model import model
# [END aiplatform_tfkeras_task_imports]
# [START aiplatform_tfkeras_task_args]
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-path",
type=str,
required=True,
help="path to input data"
)
parser.add_argument(
"--num-epochs",
type=int,
help="number of times to go through the data, default=20",
)
parser.add_argument(
"--batch-size",
type=int,
help="number of records to read during each training step, default=128",
)
parser.add_argument(
"--learning-rate",
type=float,
help="learning rate for gradient descent, default=.01",
)
parser.add_argument(
"--verbosity",
choices=["DEBUG", "ERROR", "FATAL", "INFO", "WARN"],
default="INFO",
)
parser.add_argument(
"--model-dir",
type=str,
help="Output directory for the model.",
default=os.getenv("AIP_MODEL_DIR"),
)
return parser.parse_args()
# [END aiplatform_tfkeras_task_args]
# [START aiplatform_tfkeras_task_train_and_evaluate]
# [START aiplatform_tfkeras_task_train_and_evaluate_load]
def train_and_evaluate(
input_path: str,
model_dir: str,
num_epochs: int = 5,
batch_size: int = 128,
learning_rate: float = 0.01
) -> None:
"""Trains and evaluates the Keras model.
Uses the Keras model defined in model.py. Saves the trained model in TensorFlow SavedModel
format to the path defined in part by the --job-dir argument."""
# Split datasets into training and testing
train_feature, eval_feature, train_target, eval_target = utils.load_data(input_path)
# [END aiplatform_tfkeras_task_train_and_evaluate_load]
# [START aiplatform_tfkeras_task_train_and_evaluate_dimensions]
# Extract dimensions of the data
num_train_examples, input_dim = train_feature.shape
num_eval_examples = eval_feature.shape[1]
output_dim = train_target.shape[1]
# [END aiplatform_tfkeras_task_train_and_evaluate_dimensions]
# [START aiplatform_tfkeras_task_train_and_evaluate_model]
# Create the Keras Model
keras_model = model.create_keras_model(
input_dim=input_dim,
output_dim=output_dim,
learning_rate=learning_rate,
)
# [END aiplatform_tfkeras_task_train_and_evaluate_model]
# [START aiplatform_tfkeras_task_train_and_evaluate_training_data]
# Pass a numpy array by passing DataFrame.values
training_dataset = model.input_fn(
features=train_feature.values,
labels=train_target.values,
shuffle=True,
num_epochs=num_epochs,
batch_size=batch_size,
)
# [END aiplatform_tfkeras_task_train_and_evaluate_training_data]
# [START aiplatform_tfkeras_task_train_and_evaluate_validation_data]
# Pass a numpy array by passing DataFrame.values
validation_dataset = model.input_fn(
features=eval_feature.values,
labels=eval_target.values,
shuffle=False,
num_epochs=num_epochs,
batch_size=num_eval_examples,
)
# [END aiplatform_tfkeras_task_train_and_evaluate_validation_data]
# [START aiplatform_tfkeras_task_train_and_evaluate_fit_export]
# Train model
keras_model.fit(
training_dataset,
steps_per_epoch=int(num_train_examples / batch_size),
epochs=num_epochs,
validation_data=validation_dataset,
validation_steps=1,
verbose=1,
)
# Export model
keras_model.save(model_dir)
print(f"Model exported to: {model_dir}")
# [END aiplatform_tfkeras_task_train_and_evaluate_fit_export]
# [END aiplatform_tfkeras_task_train_and_evaluate]
if __name__ == "__main__":
args = get_args()
kwargs = {}
if args.num_epochs:
kwargs["num-epochs"] = args.num_epochs
if args.batch_size:
kwargs["batch-size"] = args.batch_size
if args.learning_rate:
kwargs["learning-rate"] = args.learning_rate
tf.compat.v1.logging.set_verbosity(args.verbosity)
train_and_evaluate(args.input_path, args.model_dir, **kwargs)
# [END aiplatform_tfkeras_task]
|
examples/showcase/src/uiHelpers.py | takipsizad/pyjs | 739 | 11133393 | <gh_stars>100-1000
""" uiHelpers.py
This module contains various helper classes and functions to make it easier
to build a Pyjamas application.
"""
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.Widget import Widget
from pyjamas import DOM
from pyjamas import Window
from __pyjamas__ import wnd
#############################################################################
def indent(contents, all=None, left=None, right=None, top=None, bottom=None,
hIndent=None, vIndent=None):
""" Add a wrapper around the given contents to indent it.
The parameters are as follows:
'contents'
The contents to indent. This should be a widget or a panel.
'all'
The indent to use for all four sides. This is the first
argument, allowing you to call indent(c, 20) to indent the
contents on all sides by the same amount.
'left'
The left indent to use.
'right'
The right indent to use.
'top'
The top indent to use.
'bottom'
The bottom indent to use.
'hIndent'
The indent to use for the left and right sides.
'vIndent'
The indent to use for the top and bottom.
The contents will be wrapped in a panel which include whitespace on
each side of the panel as specified.
Upon completion, we return a Panel object contained the wrapped-up
contents.
"""
if all is not None:
left = all
right = all
top = all
bottom = all
if hIndent is not None:
left = hIndent
right = hIndent
if vIndent is not None:
top = vIndent
bottom = vIndent
wrapper = DockPanel()
wrapper.setSpacing(0)
wrapper.add(contents, DockPanel.CENTER)
if left > 0:
padding = Whitespace(width=left)
wrapper.add(padding, DockPanel.WEST)
if top > 0:
padding = Whitespace(height=top)
wrapper.add(padding, DockPanel.NORTH)
if right > 0:
padding = Whitespace(width=right)
wrapper.add(padding, DockPanel.EAST)
if bottom > 0:
padding = Whitespace(height=bottom)
wrapper.add(padding, DockPanel.SOUTH)
return wrapper
#############################################################################
def border(contents):
""" Draw a border around the given contents.
We return a Panel which wraps up the given contents and draws a border
around it.
"""
wrapper = VerticalPanel()
wrapper.add(contents)
wrapper.setBorderWidth(1)
return wrapper
#############################################################################
def colour(contents, colour):
""" Add colour to the given contents.
'contents' is a widget or panel to colour, and 'colour' is the HTML
colour code (eg, "#808080", etc) to use for the background colour.
We returned the given contents wrapped in a Panel which has the given
background colour attached.
"""
wrapper = VerticalPanel()
wrapper.add(contents)
DOM.setStyleAttribute(wrapper.getElement(), "background-color", colour)
return wrapper
#############################################################################
def prompt(msg, defaultReply=""):
""" Prompt the user to enter some text.
We return the entered text, or None if the user cancelled.
"""
return wnd().prompt(msg, defaultReply);
#############################################################################
class Whitespace(Widget):
""" A custom widget which has a fixed size and no contents.
This can be used to add arbitrary whitespace to your user interface.
"""
def __init__(self, width=0, height=0):
""" Standard initialiser.
'width' and 'height' are the dimensions to use for this whitespace,
in pixels.
"""
Widget.__init__(self)
self.setElement(DOM.createElement('div'))
self.setPixelSize(width, height)
#############################################################################
class PanelWithLabel(SimplePanel):
""" A generic panel with a label at the top.
"""
def __init__(self, label, contents):
""" Standard initialiser.
'label' is the string to show at the top, while 'contents' is a
panel or widget to show in the main body of the panel.
"""
SimplePanel.__init__(self)
label = HTML('<b>' + label + '</b>')
vPanel = VerticalPanel()
vPanel.add(indent(label, left=5))
vPanel.add(border(indent(contents, 10)))
self.add(vPanel)
#############################################################################
class PanelApp:
""" A generic multiple-panel web application.
This class makes it easy to handle multiple panels within a web
application. Panels are shown as they are required.
"""
def onModuleLoad(self):
""" Dynamically build our user interface when the web page is loaded.
"""
self._curPanelID = None # ID of currently-shown panel.
self._root = RootPanel()
self._panels = self.createPanels()
self.showPanel(self.getDefaultPanel())
def showPanel(self, panelID):
""" Show the panel with the given ID.
"""
if panelID == self._curPanelID: return
if self._curPanelID is not None:
self._root.remove(self._panels[self._curPanelID])
self._root.add(self._panels[panelID])
self._curPanelID = panelID
# ==============================
# == METHODS TO BE OVERRIDDEN ==
# ==============================
def createPanels(self):
""" Create the various panels to be used by this application.
This should be overridden by the subclass to create the various
panels the application will use. Upon completion, the subclass
should return a dictionary mapping the ID to use for each panel to
the panel to be displayed.
"""
Window.alert("Must be overridden.")
def getDefaultPanel(self):
""" Return the ID of the panel to show on system startup.
"""
Window.alert("Must be overridden.")
#############################################################################
class CommandWrapper:
""" A wrapper which lets you use a method as a deferred command handler.
The DeferredCommand module assumes that the command object it is given
will have an execute() method. This makes having multiple commands
within a single class difficult; the command wrapper lets you simply
pass an object and method name, and that method will be called when the
deferred command is executed.
"""
def __init__(self, object, handler):
""" Standard initialiser.
'object' is the object the command will be associated with, and
'handler' is the name of the method within that object to call to
execute the command.
"""
self._object = object
self._handler = handler
def execute(self):
""" Respond to the command being executed.
We call object.handler().
"""
handler = getattr(self._object, self._handler)
handler()
|
src/OFS/tests/test_userfolder.py | rbanffy/Zope | 289 | 11133406 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit tests for OFS.userfolder
"""
import unittest
from ZPublisher.utils import basic_auth_encode
# TODO class Test_readUserAccessFile(unittest.TestCase)
class BasicUserFolderTests(unittest.TestCase):
def _getTargetClass(self):
from OFS.userfolder import BasicUserFolder
return BasicUserFolder
def test_manage_users_security_initialized(self):
uf = self._getTargetClass()()
self.assertTrue(hasattr(uf, 'manage_users__roles__'))
class UserFolderTests(unittest.TestCase):
def setUp(self):
import transaction
transaction.begin()
def tearDown(self):
import transaction
from AccessControl.SecurityManagement import noSecurityManager
noSecurityManager()
transaction.abort()
def _getTargetClass(self):
from OFS.userfolder import UserFolder
return UserFolder
def _makeOne(self, app=None):
if app is None:
app = self._makeApp()
uf = self._getTargetClass()().__of__(app)
uf._doAddUser('user1', 'secret', ['role1'], [])
return uf
def _makeApp(self):
from Testing.makerequest import makerequest
from Testing.ZopeTestCase import ZopeLite
app = makerequest(ZopeLite.app())
# Set up a user and role
app._addRole('role1')
app.manage_role('role1', ['View'])
# Set up a published object accessible to user
app.addDTMLMethod('doc', file='')
app.doc.manage_permission('View', ['role1'], acquire=0)
# Rig the REQUEST so it looks like we traversed to doc
app.REQUEST.set('PUBLISHED', app.doc)
app.REQUEST.set('PARENTS', [app])
app.REQUEST.steps = ['doc']
return app
def _makeBasicAuthToken(self, creds='<PASSWORD>'):
return basic_auth_encode(creds)
def _login(self, uf, name):
from AccessControl.SecurityManagement import newSecurityManager
user = uf.getUserById(name)
user = user.__of__(uf)
newSecurityManager(None, user)
def test_class_conforms_to_IStandardUserFolder(self):
from AccessControl.interfaces import IStandardUserFolder
from zope.interface.verify import verifyClass
verifyClass(IStandardUserFolder, self._getTargetClass())
def testGetRolesInContext(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.getUser('user1')
app.manage_addLocalRoles('user1', ['Owner'])
roles = user.getRolesInContext(app)
self.assertTrue('role1' in roles)
self.assertTrue('Owner' in roles)
def testHasRole(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.getUser('user1')
self.assertTrue(user.has_role('role1', app))
def testHasLocalRole(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.getUser('user1')
app.manage_addLocalRoles('user1', ['Owner'])
self.assertTrue(user.has_role('Owner', app))
def testHasPermission(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.getUser('user1')
self.assertTrue(user.has_permission('View', app))
app.manage_role('role1', ['Add Folders'])
self.assertTrue(user.has_permission('Add Folders', app))
def testHasLocalRolePermission(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.getUser('user1')
app.manage_role('Owner', ['Add Folders'])
app.manage_addLocalRoles('user1', ['Owner'])
self.assertTrue(user.has_permission('Add Folders', app))
def testAuthenticate(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.getUser('user1')
self.assertTrue(user.authenticate('secret', app.REQUEST))
def testValidate(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.validate(app.REQUEST, self._makeBasicAuthToken(),
['role1'])
self.assertNotEqual(user, None)
self.assertEqual(user.getUserName(), 'user1')
def testNotValidateWithoutAuth(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.validate(app.REQUEST, '', ['role1'])
self.assertEqual(user, None)
def testValidateWithoutRoles(self):
# Note - calling uf.validate without specifying roles will cause
# the security machinery to determine the needed roles by looking
# at the object itself (or its container). I'm putting this note
# in to clarify because the original test expected failure but it
# really should have expected success, since the user and the
# object being checked both have the role 'role1', even though no
# roles are passed explicitly to the userfolder validate method.
app = self._makeApp()
uf = self._makeOne(app)
user = uf.validate(app.REQUEST, self._makeBasicAuthToken())
self.assertEqual(user.getUserName(), 'user1')
def testNotValidateWithEmptyRoles(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.validate(app.REQUEST, self._makeBasicAuthToken(), [])
self.assertEqual(user, None)
def testNotValidateWithWrongRoles(self):
app = self._makeApp()
uf = self._makeOne(app)
user = uf.validate(app.REQUEST, self._makeBasicAuthToken(),
['Manager'])
self.assertEqual(user, None)
def testAllowAccessToUser(self):
app = self._makeApp()
uf = self._makeOne(app)
self._login(uf, 'user1')
app.restrictedTraverse('doc')
def testDenyAccessToAnonymous(self):
from AccessControl import Unauthorized
app = self._makeApp()
self.assertRaises(Unauthorized, app.restrictedTraverse, 'doc')
|
typic/constraints/factory.py | wyfo/typical | 157 | 11133424 | from __future__ import annotations
import collections
import dataclasses
import enum
import inspect
import datetime
import ipaddress
import pathlib
import re
import uuid
from collections import deque, abc
from decimal import Decimal
from typing import (
Mapping,
Type,
Union,
Optional,
List,
Any,
Dict,
Hashable,
cast,
Set,
ClassVar,
Deque,
Tuple,
)
from typic.checks import (
isuniontype,
isoptionaltype,
isbuiltintype,
isconstrained,
isforwardref,
istypeddict,
isbuiltinsubtype,
isnamedtuple,
should_unwrap,
isclassvartype,
isenumtype,
isabstract,
)
from typic.compat import Literal, lru_cache, UnionType
from typic.types import dsn, email, frozendict, path, secret, url
from typic.util import (
origin,
get_args,
get_tag_for_types,
cached_signature,
cached_type_hints,
get_name,
TypeMap,
empty,
)
from .array import (
Array,
FrozenSetConstraints,
ListConstraints,
SetContraints,
TupleConstraints,
DequeConstraints,
)
from .common import (
MultiConstraints,
TypeConstraints,
EnumConstraints,
VT,
ConstraintsProtocolT,
DelayedConstraints,
ForwardDelayedConstraints,
LiteralConstraints,
)
from .mapping import (
MappingConstraints,
DictConstraints,
ObjectConstraints,
TypedDictConstraints,
)
from .number import (
IntContraints,
FloatContraints,
DecimalContraints,
NumberT,
)
from .text import BytesConstraints, StrConstraints
@lru_cache(maxsize=None)
def get_constraints(
t: Type[VT],
*,
nullable: bool = False,
name: str = None,
cls: Optional[Type] = ..., # type: ignore
) -> ConstraintsProtocolT[VT]:
while should_unwrap(t):
nullable = nullable or isoptionaltype(t)
t = get_args(t)[0]
if t is cls or t in __stack:
dc = DelayedConstraints(
t, nullable=nullable, name=name, factory=get_constraints
)
return cast(ConstraintsProtocolT, dc)
if isforwardref(t):
if cls is ...: # pragma: nocover
raise TypeError(
f"Cannot build constraints for {t} without an enclosing class."
)
fdc = ForwardDelayedConstraints(
t, # type: ignore
cls.__module__,
localns=getattr(cls, "__dict__", {}).copy(),
nullable=nullable,
name=name,
factory=get_constraints,
)
return cast(ConstraintsProtocolT, fdc)
if isconstrained(t):
c: ConstraintsProtocolT = t.__constraints__ # type: ignore
if (c.name, c.nullable) != (name, nullable):
return dataclasses.replace(c, name=name, nullable=nullable)
return c
if isenumtype(t):
ec = _from_enum_type(t, nullable=nullable, name=name) # type: ignore
return cast(ConstraintsProtocolT, ec)
if isabstract(t):
return cast(
ConstraintsProtocolT, _from_strict_type(t, nullable=nullable, name=name)
)
if isnamedtuple(t) or istypeddict(t):
handler = _from_class
else:
ot = origin(t)
if ot in {type, abc.Callable}:
handler = _from_strict_type # type: ignore
t = ot
else:
handler = _CONSTRAINT_BUILDER_HANDLERS.get_by_parent(ot, _from_class) # type: ignore
__stack.add(t)
c = handler(t, nullable=nullable, name=name, cls=cls)
__stack.clear()
return c
__stack: Set[Type] = set()
ConstraintsT = Union[
BytesConstraints,
DecimalContraints,
DelayedConstraints,
DequeConstraints,
DictConstraints,
EnumConstraints,
FloatContraints,
ForwardDelayedConstraints,
FrozenSetConstraints,
IntContraints,
ListConstraints,
MappingConstraints,
MultiConstraints,
ObjectConstraints,
SetContraints,
StrConstraints,
TupleConstraints,
TypeConstraints,
]
_ARRAY_CONSTRAINTS_BY_TYPE = TypeMap(
{
set: SetContraints,
list: ListConstraints,
tuple: TupleConstraints,
frozenset: FrozenSetConstraints,
collections.deque: DequeConstraints,
}
)
ArrayConstraintsT = Union[
SetContraints,
ListConstraints,
TupleConstraints,
FrozenSetConstraints,
DequeConstraints,
]
def _resolve_args(
*args, cls: Type = None, nullable: bool = False, multi: bool = True
) -> Optional[Union[ConstraintsProtocolT, Tuple[ConstraintsProtocolT, ...]]]:
largs: Deque = deque(args)
items: List[ConstraintsProtocolT] = []
while largs:
arg = largs.popleft()
if arg in {Any, Ellipsis}:
continue
if isuniontype(arg):
c = _from_union(arg, cls=cls, nullable=nullable)
# just extend the outer multi constraints if that's what we're building
if isinstance(c, MultiConstraints) and multi:
items.extend(c.constraints)
else:
items.append(c)
continue
items.append(get_constraints(arg, cls=cls, nullable=nullable))
if len(items) == 1:
return items[0]
if multi:
return cast(ConstraintsProtocolT, MultiConstraints((*items,)))
return (*items,)
def _from_array_type(
t: Type[Array], *, nullable: bool = False, name: str = None, cls: Type = None
) -> ArrayConstraintsT:
args = get_args(t)
constr_class = cast(
Type[ArrayConstraintsT], _ARRAY_CONSTRAINTS_BY_TYPE.get_by_parent(origin(t))
)
# If we don't have args, then return a naive constraint
if not args:
return constr_class(nullable=nullable, name=name)
if constr_class is TupleConstraints and ... not in args:
items = _resolve_args(*args, cls=cls, nullable=nullable, multi=False)
return constr_class(nullable=nullable, values=items, name=name) # type: ignore
items = _resolve_args(*args, cls=cls, nullable=nullable, multi=True)
return constr_class(nullable=nullable, values=items, name=name) # type: ignore
def _from_mapping_type(
t: Type[Mapping], *, nullable: bool = False, name: str = None, cls: Type = None
) -> Union[MappingConstraints, DictConstraints]:
if isbuiltintype(t):
return DictConstraints(nullable=nullable, name=name)
base = getattr(t, "__origin__", t)
constr_class: Union[Type[MappingConstraints], Type[DictConstraints]]
constr_class = MappingConstraints
if base is dict:
constr_class = DictConstraints
args = get_args(t)
if not args:
return constr_class(nullable=nullable, name=name)
key_arg, value_arg = args
key_items, value_items = (
_resolve_args(key_arg, cls=cls),
_resolve_args(value_arg, cls=cls),
)
return constr_class(
keys=key_items, values=value_items, nullable=nullable, name=name # type: ignore
)
SimpleT = Union[NumberT, str, bytes]
SimpleConstraintsT = Union[
IntContraints, FloatContraints, DecimalContraints, StrConstraints, BytesConstraints
]
_SIMPLE_CONSTRAINTS = TypeMap(
{
IntContraints.type: IntContraints,
FloatContraints.type: FloatContraints,
DecimalContraints.type: DecimalContraints,
StrConstraints.type: StrConstraints,
BytesConstraints.type: BytesConstraints,
}
)
def _from_simple_type(
t: Type[SimpleT], *, nullable: bool = False, name: str = None, cls: Type = None
) -> SimpleConstraintsT:
constr_class = cast(
Type[SimpleConstraintsT], _SIMPLE_CONSTRAINTS.get_by_parent(origin(t))
)
return constr_class(nullable=nullable, name=name)
def _resolve_params(
cls: Type,
**param: inspect.Parameter,
) -> Mapping[str, ConstraintsProtocolT]:
items: Dict[str, ConstraintsProtocolT] = {}
while param:
name, p = param.popitem()
anno = p.annotation
nullable = p.default in (None, Ellipsis) or isoptionaltype(anno)
if anno in {Any, Ellipsis, p.empty}:
continue
if isuniontype(anno) and not isforwardref(anno):
items[name] = _from_union(anno, nullable=nullable, name=name, cls=cls)
continue
else:
items[name] = get_constraints(anno, nullable=nullable, name=name, cls=cls)
return items
def _from_strict_type(
t: Type[VT], *, nullable: bool = False, name: str = None, cls: Type = None
) -> TypeConstraints:
return TypeConstraints(t, nullable=nullable, name=name)
def _from_enum_type(
t: Type[enum.Enum], *, nullable: bool = False, name: str = None, cls: Type = None
) -> EnumConstraints:
return EnumConstraints(t, nullable=nullable, name=name)
def _from_literal(
t: Type[VT], *, nullable: bool = False, name: str = None, cls: Type = None
) -> LiteralConstraints:
return LiteralConstraints(t, nullable=nullable, name=name)
def _from_union(
t: Type[VT], *, nullable: bool = False, name: str = None, cls: Type = None
) -> ConstraintsProtocolT:
_nullable: bool = isoptionaltype(t)
nullable = nullable or _nullable
_args = get_args(t)[:-1] if _nullable else get_args(t)
if len(_args) == 1:
return get_constraints(_args[0], nullable=nullable, name=name, cls=cls)
c = MultiConstraints(
(*(get_constraints(a, nullable=nullable, cls=cls) for a in _args),),
name=name,
tag=get_tag_for_types(_args),
)
return cast(ConstraintsProtocolT, c)
def _from_class(
t: Type[VT], *, nullable: bool = False, name: str = None, cls: Type = None
) -> ConstraintsProtocolT[VT]:
if not istypeddict(t) and not isnamedtuple(t) and isbuiltinsubtype(t):
return cast(
ConstraintsProtocolT, _from_strict_type(t, nullable=nullable, name=name)
)
try:
params: Dict[str, inspect.Parameter] = {**cached_signature(t).parameters}
hints = cached_type_hints(t)
for x in hints.keys() & params.keys():
p = params[x]
params[x] = inspect.Parameter(
p.name, p.kind, default=p.default, annotation=hints[x]
)
for x in hints.keys() - params.keys():
hint = hints[x]
if not isclassvartype(hint):
continue
# Hack in the classvars as "parameters" to allow for validation.
default = getattr(t, x, empty)
args = get_args(hint)
if not args:
hint = ClassVar[default.__class__] # type: ignore
params[x] = inspect.Parameter(
x, inspect.Parameter.KEYWORD_ONLY, default=default, annotation=hint
)
except (ValueError, TypeError):
return cast(
ConstraintsProtocolT, _from_strict_type(t, nullable=nullable, name=name)
)
name = name or get_name(t)
items: Optional[frozendict.FrozenDict[Hashable, ConstraintsT]] = (
frozendict.FrozenDict(_resolve_params(t, **params)) or None
)
required = frozenset(
(
pname
for pname, p in params.items()
if (
p.kind not in {p.VAR_POSITIONAL, p.VAR_KEYWORD} and p.default is p.empty
)
)
)
has_varargs = any(
p.kind in {p.VAR_KEYWORD, p.VAR_POSITIONAL} for p in params.values()
)
kwargs = {
"type": t,
"nullable": nullable,
"name": name,
"required_keys": required,
"items": items,
"total": not has_varargs,
}
cls = ObjectConstraints
if istypeddict(t):
cls = TypedDictConstraints
kwargs.update(type=dict, ttype=t, total=getattr(t, "__total__", bool(required)))
c = cls(**kwargs) # type: ignore
return cast(ConstraintsProtocolT, c)
_CONSTRAINT_BUILDER_HANDLERS = TypeMap(
{
set: _from_array_type,
frozenset: _from_array_type,
list: _from_array_type,
tuple: _from_array_type,
collections.deque: _from_array_type,
dict: _from_mapping_type, # type: ignore
int: _from_simple_type,
float: _from_simple_type,
Decimal: _from_simple_type,
str: _from_simple_type,
bytes: _from_simple_type,
bool: _from_strict_type,
datetime.datetime: _from_strict_type,
datetime.date: _from_strict_type,
datetime.time: _from_strict_type,
url.NetworkAddress: _from_strict_type,
url.URL: _from_strict_type,
url.AbsoluteURL: _from_strict_type,
url.RelativeURL: _from_strict_type,
dsn.DSN: _from_strict_type,
pathlib.Path: _from_strict_type,
path.FilePath: _from_strict_type,
path.DirectoryPath: _from_strict_type,
path.PathType: _from_strict_type,
url.HostName: _from_strict_type,
email.Email: _from_strict_type,
secret.SecretStr: _from_strict_type,
secret.SecretBytes: _from_strict_type,
uuid.UUID: _from_strict_type,
re.Pattern: _from_strict_type, # type: ignore
ipaddress.IPv4Address: _from_strict_type,
ipaddress.IPv6Address: _from_strict_type,
Union: _from_union, # type: ignore
UnionType: _from_union, # type: ignore
Literal: _from_literal, # type: ignore
}
)
|
kitsune/products/tests/test_templates.py | erdal-pb/kitsune | 929 | 11133426 | from django.conf import settings
from django.core.cache import cache
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.products.models import HOT_TOPIC_SLUG
from kitsune.products.tests import ProductFactory, TopicFactory
from kitsune.questions.models import QuestionLocale
from kitsune.search.tests import Elastic7TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.tests import ApprovedRevisionFactory, DocumentFactory, HelpfulVoteFactory
class ProductViewsTestCase(Elastic7TestCase):
search_tests = True
def test_products(self):
"""Verify that /products page renders products."""
# Create some products.
for i in range(3):
p = ProductFactory(visible=True)
locale = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(locale)
# GET the products page and verify the content.
r = self.client.get(reverse("products"), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(3, len(doc("#products-and-services li")))
def test_product_landing(self):
"""Verify that /products/<slug> page renders topics."""
# Create a product.
p = ProductFactory()
locale = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(locale)
# Create some topics.
TopicFactory(slug=HOT_TOPIC_SLUG, product=p, visible=True)
topics = TopicFactory.create_batch(11, product=p, visible=True)
# Create a document and assign the product and 10 topics.
d = DocumentFactory(products=[p], topics=topics[:10])
ApprovedRevisionFactory(document=d)
self.refresh()
# GET the product landing page and verify the content.
url = reverse("products.product", args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(11, len(doc("#help-topics li")))
eq_(p.slug, doc("#support-search input[name=product]").attr["value"])
def test_firefox_product_landing(self):
"""Verify that there are no firefox button at header in the firefox landing page"""
p = ProductFactory(slug="firefox")
url = reverse("products.product", args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(False, doc(".firefox-download-button").length)
def test_document_listing(self):
"""Verify /products/<product slug>/<topic slug> renders articles."""
# Create a topic and product.
p = ProductFactory()
t1 = TopicFactory(product=p)
# Create 3 documents with the topic and product and one without.
ApprovedRevisionFactory.create_batch(3, document__products=[p], document__topics=[t1])
ApprovedRevisionFactory()
self.refresh()
# GET the page and verify the content.
url = reverse("products.documents", args=[p.slug, t1.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(3, len(doc("#document-list > ul > li")))
eq_(p.slug, doc("#support-search input[name=product]").attr["value"])
def test_document_listing_order(self):
"""Verify documents are sorted by display_order and number of helpful votes."""
# Create topic, product and documents.
p = ProductFactory()
t = TopicFactory(product=p)
docs = []
# FIXME: Can't we do this with create_batch and build the document
# in the approvedrevisionfactory
for i in range(3):
doc = DocumentFactory(products=[p], topics=[t])
ApprovedRevisionFactory(document=doc)
docs.append(doc)
# Add a lower display order to the second document. It should be first now.
docs[1].display_order = 0
docs[1].save()
self.refresh()
url = reverse("products.documents", args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc("#document-list > ul > li:first-child > a").text(), docs[1].title)
# Add a helpful vote to the third document. It should be second now.
rev = docs[2].current_revision
HelpfulVoteFactory(revision=rev, helpful=True)
docs[2].save() # Votes don't trigger a reindex.
self.refresh()
cache.clear() # documents_for() is cached
url = reverse("products.documents", args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc("#document-list > ul > li:nth-child(2) > a").text(), docs[2].title)
# Add 2 helpful votes the first document. It should be second now.
rev = docs[0].current_revision
HelpfulVoteFactory(revision=rev, helpful=True)
HelpfulVoteFactory(revision=rev, helpful=True)
docs[0].save() # Votes don't trigger a reindex.
self.refresh()
cache.clear() # documents_for() is cached
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc("#document-list > ul > li:nth-child(2) > a").text(), docs[0].title)
def test_subtopics(self):
"""Verifies subtopics appear on document listing page."""
# Create a topic and product.
p = ProductFactory()
t = TopicFactory(product=p, visible=True)
# Create a documents with the topic and product
doc = DocumentFactory(products=[p], topics=[t])
ApprovedRevisionFactory(document=doc)
self.refresh()
# GET the page and verify no subtopics yet.
url = reverse("products.documents", args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(0, len(pqdoc("li.subtopic")))
# Create a subtopic, it still shouldn't show up because no
# articles are assigned.
subtopic = TopicFactory(parent=t, product=p, visible=True)
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(0, len(pqdoc("li.subtopic")))
# Add a document to the subtopic, now it should appear.
doc.topics.add(subtopic)
self.refresh()
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(1, len(pqdoc("li.subtopic")))
|
leo/modes/shtml.py | ATikhonov2/leo-editor | 1,550 | 11133430 | # Leo colorizer control file for shtml mode.
# This file is in the public domain.
# Properties for shtml mode.
properties = {
"commentEnd": "-->",
"commentStart": "<!--",
}
# Attributes dict for shtml_main ruleset.
shtml_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for shtml_tags ruleset.
shtml_tags_attributes_dict = {
"default": "MARKUP",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for shtml_ssi ruleset.
shtml_ssi_attributes_dict = {
"default": "KEYWORD3",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for shtml_ssi_expression ruleset.
shtml_ssi_expression_attributes_dict = {
"default": "LITERAL1",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for shtml mode.
attributesDictDict = {
"shtml_main": shtml_main_attributes_dict,
"shtml_ssi": shtml_ssi_attributes_dict,
"shtml_ssi_expression": shtml_ssi_expression_attributes_dict,
"shtml_tags": shtml_tags_attributes_dict,
}
# Keywords dict for shtml_main ruleset.
shtml_main_keywords_dict = {}
# Keywords dict for shtml_tags ruleset.
shtml_tags_keywords_dict = {}
# Keywords dict for shtml_ssi ruleset.
shtml_ssi_keywords_dict = {
"cgi": "keyword2",
"cmd": "keyword2",
"config": "keyword1",
"echo": "keyword1",
"errmsg": "keyword2",
"exec": "keyword1",
"file": "keyword2",
"flastmod": "keyword1",
"fsize": "keyword1",
"include": "keyword1",
"sizefmt": "keyword2",
"timefmt": "keyword2",
"var": "keyword2",
}
# Keywords dict for shtml_ssi_expression ruleset.
shtml_ssi_expression_keywords_dict = {}
# Dictionary of keywords dictionaries for shtml mode.
keywordsDictDict = {
"shtml_main": shtml_main_keywords_dict,
"shtml_ssi": shtml_ssi_keywords_dict,
"shtml_ssi_expression": shtml_ssi_expression_keywords_dict,
"shtml_tags": shtml_tags_keywords_dict,
}
# Rules for shtml_main ruleset.
def shtml_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="keyword3", begin="<!--#", end="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="shtml::ssi",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<SCRIPT", end="</SCRIPT>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="html::javascript",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<STYLE", end="</STYLE>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="html::css",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="<!", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="xml::dtd-tags",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="shtml::tags",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule6(colorer, s, i):
return colorer.match_span(s, i, kind="literal2", begin="&", end=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=True)
# Rules dict for shtml_main ruleset.
rulesDict1 = {
"&": [shtml_rule6,],
"<": [shtml_rule0,shtml_rule1,shtml_rule2,shtml_rule3,shtml_rule4,shtml_rule5,],
}
# Rules for shtml_tags ruleset.
def shtml_rule7(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule8(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
# Rules dict for shtml_tags ruleset.
rulesDict2 = {
"\"": [shtml_rule7,],
"'": [shtml_rule8,],
"=": [shtml_rule9,],
}
# Rules for shtml_ssi ruleset.
def shtml_rule10(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="shtml::ssi-expression",exclude_match=True,
no_escape=False, no_line_break=False, no_word_break=False)
def shtml_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule12(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for shtml_ssi ruleset.
rulesDict3 = {
"\"": [shtml_rule10,],
"0": [shtml_rule12,],
"1": [shtml_rule12,],
"2": [shtml_rule12,],
"3": [shtml_rule12,],
"4": [shtml_rule12,],
"5": [shtml_rule12,],
"6": [shtml_rule12,],
"7": [shtml_rule12,],
"8": [shtml_rule12,],
"9": [shtml_rule12,],
"=": [shtml_rule11,],
"@": [shtml_rule12,],
"A": [shtml_rule12,],
"B": [shtml_rule12,],
"C": [shtml_rule12,],
"D": [shtml_rule12,],
"E": [shtml_rule12,],
"F": [shtml_rule12,],
"G": [shtml_rule12,],
"H": [shtml_rule12,],
"I": [shtml_rule12,],
"J": [shtml_rule12,],
"K": [shtml_rule12,],
"L": [shtml_rule12,],
"M": [shtml_rule12,],
"N": [shtml_rule12,],
"O": [shtml_rule12,],
"P": [shtml_rule12,],
"Q": [shtml_rule12,],
"R": [shtml_rule12,],
"S": [shtml_rule12,],
"T": [shtml_rule12,],
"U": [shtml_rule12,],
"V": [shtml_rule12,],
"W": [shtml_rule12,],
"X": [shtml_rule12,],
"Y": [shtml_rule12,],
"Z": [shtml_rule12,],
"a": [shtml_rule12,],
"b": [shtml_rule12,],
"c": [shtml_rule12,],
"d": [shtml_rule12,],
"e": [shtml_rule12,],
"f": [shtml_rule12,],
"g": [shtml_rule12,],
"h": [shtml_rule12,],
"i": [shtml_rule12,],
"j": [shtml_rule12,],
"k": [shtml_rule12,],
"l": [shtml_rule12,],
"m": [shtml_rule12,],
"n": [shtml_rule12,],
"o": [shtml_rule12,],
"p": [shtml_rule12,],
"q": [shtml_rule12,],
"r": [shtml_rule12,],
"s": [shtml_rule12,],
"t": [shtml_rule12,],
"u": [shtml_rule12,],
"v": [shtml_rule12,],
"w": [shtml_rule12,],
"x": [shtml_rule12,],
"y": [shtml_rule12,],
"z": [shtml_rule12,],
}
# Rules for shtml_ssi_expression ruleset.
def shtml_rule13(colorer, s, i):
return colorer.match_mark_following(s, i, kind="keyword2", pattern="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
def shtml_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="!=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule19(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule20(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="&&",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def shtml_rule21(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="||",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
# Rules dict for shtml_ssi_expression ruleset.
rulesDict4 = {
"!": [shtml_rule15,],
"$": [shtml_rule13,],
"&": [shtml_rule20,],
"<": [shtml_rule16,shtml_rule17,],
"=": [shtml_rule14,],
">": [shtml_rule18,shtml_rule19,],
"|": [shtml_rule21,],
}
# x.rulesDictDict for shtml mode.
rulesDictDict = {
"shtml_main": rulesDict1,
"shtml_ssi": rulesDict3,
"shtml_ssi_expression": rulesDict4,
"shtml_tags": rulesDict2,
}
# Import dict for shtml mode.
importDict = {}
|
botenv/lib/python3.9/site-packages/redis/commands/json/decoders.py | 0xtuytuy/unit-crypto-ski-week-poap-bot | 483 | 11133445 | <filename>botenv/lib/python3.9/site-packages/redis/commands/json/decoders.py
import copy
import re
from ..helpers import nativestr
def bulk_of_jsons(d):
"""Replace serialized JSON values with objects in a
bulk array response (list).
"""
def _f(b):
for index, item in enumerate(b):
if item is not None:
b[index] = d(item)
return b
return _f
def decode_dict_keys(obj):
"""Decode the keys of the given dictionary with utf-8."""
newobj = copy.copy(obj)
for k in obj.keys():
if isinstance(k, bytes):
newobj[k.decode("utf-8")] = newobj[k]
newobj.pop(k)
return newobj
def unstring(obj):
"""
Attempt to parse string to native integer formats.
One can't simply call int/float in a try/catch because there is a
semantic difference between (for example) 15.0 and 15.
"""
floatreg = "^\\d+.\\d+$"
match = re.findall(floatreg, obj)
if match != []:
return float(match[0])
intreg = "^\\d+$"
match = re.findall(intreg, obj)
if match != []:
return int(match[0])
return obj
def decode_list(b):
"""
Given a non-deserializable object, make a best effort to
return a useful set of results.
"""
if isinstance(b, list):
return [nativestr(obj) for obj in b]
elif isinstance(b, bytes):
return unstring(nativestr(b))
elif isinstance(b, str):
return unstring(b)
return b
|
datasets/jigsaw_unintended_bias/jigsaw_unintended_bias.py | WojciechKusa/datasets | 10,608 | 11133471 | # coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jigsaw Unintended Bias in Toxicity Classification dataset"""
import os
import pandas as pd
import datasets
_DESCRIPTION = """\
A collection of comments from the defunct Civil Comments platform that have been annotated for their toxicity.
"""
_HOMEPAGE = "https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/"
_LICENSE = "CC0 (both the dataset and underlying text)"
class JigsawUnintendedBias(datasets.GeneratorBasedBuilder):
"""A collection of comments from the defunct Civil Comments platform that have been annotated for their toxicity."""
VERSION = datasets.Version("1.1.0")
@property
def manual_download_instructions(self):
return """\
To use jigsaw_unintended_bias you have to download it manually from Kaggle: https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data
You can manually download the data from it's homepage or use the Kaggle CLI tool (follow the instructions here: https://www.kaggle.com/docs/api)
Please extract all files in one folder and then load the dataset with:
`datasets.load_dataset('jigsaw_unintended_bias', data_dir='/path/to/extracted/data/')`"""
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features(
{
"target": datasets.Value("float32"),
"comment_text": datasets.Value("string"),
"severe_toxicity": datasets.Value("float32"),
"obscene": datasets.Value("float32"),
"identity_attack": datasets.Value("float32"),
"insult": datasets.Value("float32"),
"threat": datasets.Value("float32"),
"asian": datasets.Value("float32"),
"atheist": datasets.Value("float32"),
"bisexual": datasets.Value("float32"),
"black": datasets.Value("float32"),
"buddhist": datasets.Value("float32"),
"christian": datasets.Value("float32"),
"female": datasets.Value("float32"),
"heterosexual": datasets.Value("float32"),
"hindu": datasets.Value("float32"),
"homosexual_gay_or_lesbian": datasets.Value("float32"),
"intellectual_or_learning_disability": datasets.Value("float32"),
"jewish": datasets.Value("float32"),
"latino": datasets.Value("float32"),
"male": datasets.Value("float32"),
"muslim": datasets.Value("float32"),
"other_disability": datasets.Value("float32"),
"other_gender": datasets.Value("float32"),
"other_race_or_ethnicity": datasets.Value("float32"),
"other_religion": datasets.Value("float32"),
"other_sexual_orientation": datasets.Value("float32"),
"physical_disability": datasets.Value("float32"),
"psychiatric_or_mental_illness": datasets.Value("float32"),
"transgender": datasets.Value("float32"),
"white": datasets.Value("float32"),
"created_date": datasets.Value("string"),
"publication_id": datasets.Value("int32"),
"parent_id": datasets.Value("float"),
"article_id": datasets.Value("int32"),
"rating": datasets.ClassLabel(names=["rejected", "approved"]),
"funny": datasets.Value("int32"),
"wow": datasets.Value("int32"),
"sad": datasets.Value("int32"),
"likes": datasets.Value("int32"),
"disagree": datasets.Value("int32"),
"sexual_explicit": datasets.Value("float"),
"identity_annotator_count": datasets.Value("int32"),
"toxicity_annotator_count": datasets.Value("int32"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('jigsaw_unintended_bias', data_dir=...)`. Manual download instructions: {self.manual_download_instructions}"
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"path": os.path.join(data_dir, "train.csv"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split("test_private_leaderboard"),
# These kwargs will be passed to _generate_examples
gen_kwargs={"path": os.path.join(data_dir, "test_private_expanded.csv"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split("test_public_leaderboard"),
# These kwargs will be passed to _generate_examples
gen_kwargs={"path": os.path.join(data_dir, "test_public_expanded.csv"), "split": "test"},
),
]
def _generate_examples(self, split: str = "train", path: str = None):
"""Yields examples."""
# This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset
# The key is not important, it's more here for legacy reason (legacy from tfds)
# Avoid loading everything into memory at once
all_data = pd.read_csv(path, chunksize=50000)
for data in all_data:
if split != "train":
data = data.rename(columns={"toxicity": "target"})
for _, row in data.iterrows():
example = row.to_dict()
ex_id = example.pop("id")
yield (ex_id, example)
|
objectModel/Python/cdm/objectmodel/cdm_import.py | jocubeit/CDM | 265 | 11133523 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Optional, TYPE_CHECKING
import warnings
from cdm.utilities import logger, ResolveOptions
from cdm.enums import CdmObjectType
from cdm.enums import CdmLogCode
from cdm.utilities.string_utils import StringUtils
from .cdm_object_simple import CdmObjectSimple
if TYPE_CHECKING:
from cdm.objectmodel import CdmCorpusContext, CdmDocumentDefinition
from cdm.utilities import FriendlyFormatNode, VisitCallback
class CdmImport(CdmObjectSimple):
def __init__(self, ctx: 'CdmCorpusContext', corpus_path: str, moniker: str) -> None:
super().__init__(ctx)
self._TAG = CdmImport.__name__
self.corpus_path = corpus_path # type: str
self.moniker = moniker # type: str
# --- internal ---
self._document = None # type: Optional[CdmDocumentDefinition]
@property
def doc(self) -> Optional['CdmDocumentDefinition']:
warnings.warn('This property is deprecated and it is likely to be removed soon..', DeprecationWarning)
return self._document
@property
def object_type(self) -> 'CdmObjectType':
return CdmObjectType.IMPORT
def copy(self, res_opt: Optional['ResolveOptions'] = None, host: Optional['CdmImport'] = None) -> 'CdmImport':
if not res_opt:
res_opt = ResolveOptions(wrt_doc=self, directives=self.ctx.corpus.default_resolution_directives)
if not host:
copy = CdmImport(self.ctx, self.corpus_path, self.moniker)
else:
copy = host
copy.ctx = self.ctx
copy.corpus_path = self.corpus_path
copy.moniker = self.moniker
copy._document = self._document.copy(res_opt) if self._document else None
return copy
def fetch_object_definition_name(self) -> Optional[str]:
return None
def validate(self) -> bool:
if not bool(self.corpus_path):
missing_fields = ['corpus_path']
logger.error(self.ctx, self._TAG, 'validate', self.at_corpus_path, CdmLogCode.ERR_VALDN_INTEGRITY_CHECK_FAILURE, self.at_corpus_path, ', '.join(map(lambda s: '\'' + s + '\'', missing_fields)))
return False
return True
def visit(self, path_from: str, pre_children: 'VisitCallback', post_children: 'VisitCallback') -> bool:
# not much to do
if pre_children and pre_children(self, path_from):
return False
if post_children and post_children(self, path_from):
return True
return False
|
python/jittor/test/test_searchsorted_op.py | Exusial/jittor | 2,571 | 11133525 | # ***************************************************************
# Copyright (c) 2020 Jittor. Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
# All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
except:
skip_this_test = True
@unittest.skipIf(skip_this_test, "No Torch Found")
class TestSearchsorted(unittest.TestCase):
def test_searchsorted_cpu(self):
for i in range(1,3):
s = np.sort(np.random.rand(*((10,)*i)),-1)
v = np.random.rand(*((10,)*i))
s_jt = jt.array(s)
v_jt = jt.array(v)
s_tc = torch.from_numpy(s)
v_tc = torch.from_numpy(v)
y_tc = torch.searchsorted(s_tc, v_tc, right=True)
y_jt = jt.searchsorted(s_jt, v_jt, right=True)
assert np.allclose(y_jt.numpy(), y_tc.data)
y_jt = jt.searchsorted(s_jt, v_jt, right=False)
y_tc = torch.searchsorted(s_tc, v_tc, right=False)
assert np.allclose(y_jt.numpy(), y_tc.data)
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
@jt.flag_scope(use_cuda=1)
def test_searchsorted_gpu(self):
self.test_searchsorted_cpu()
if __name__ == "__main__":
unittest.main() |
dadmatools/models/flair/parser/utils/embedding.py | njzr/DadmaTools | 161 | 11133544 | <filename>dadmatools/models/flair/parser/utils/embedding.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
import torch
class Embedding(object):
def __init__(self, tokens, vectors, unk=None):
super(Embedding, self).__init__()
self.tokens = tokens
self.vectors = torch.tensor(vectors)
self.pretrained = {w: v for w, v in zip(tokens, vectors)}
self.unk = unk
def __len__(self):
return len(self.tokens)
def __contains__(self, token):
return token in self.pretrained
@property
def dim(self):
return self.vectors.size(1)
@property
def unk_index(self):
if self.unk is not None:
return self.tokens.index(self.unk)
else:
raise AttributeError
@classmethod
def load(cls, path, unk=None):
with open(path, 'r') as f:
lines = [line for line in f]
splits = [line.split() for line in lines]
tokens, vectors = zip(*[(s[0], list(map(float, s[1:])))
for s in splits])
return cls(tokens, vectors, unk=unk)
|
torchbenchmark/models/drq/__init__.py | xuzhao9/benchmark | 384 | 11133627 | <filename>torchbenchmark/models/drq/__init__.py<gh_stars>100-1000
import copy
import math
import pickle as pkl
import numpy as np
import torch
import os
import sys
import torch.nn as nn
import torch.nn.functional as F
from gym import spaces
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from .utils import FrameStack, set_seed_everywhere, eval_mode
from .drq import DRQAgent
from .config import DRQConfig
from .replay_buffer import ReplayBuffer
class MockEnv:
def __init__(self, obs):
self._norm_action_space = spaces.Box(
low=-1.0,
high=1.0,
shape=[1],
dtype=np.float32)
self._observation_space = spaces.Box(
low=0,
high=255,
shape=[9, 84, 84],
dtype=np.uint8
)
self.obs = obs
self._max_episode_steps = 250
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
def step(self, action):
reward = 0.0
done = False
info_state = [0.016243, 3.1355, -0.0052817, -0.01073]
info = dict()
info["internal_state"] = info_state
info["discount"] = 1.0
return (self.obs, reward, done, info)
def seed(self, seed=None):
self._norm_action_space.seed(seed)
self._observation_space.seed(seed)
def reset(self):
return self.obs
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._norm_action_space
def make_env(cfg):
if cfg.env == 'ball_in_cup_catch':
domain_name = 'ball_in_cup'
task_name = 'catch'
elif cfg.env == 'point_mass_easy':
domain_name = 'point_mass'
task_name = 'easy'
else:
domain_name = cfg.env.split('_')[0]
task_name = '_'.join(cfg.env.split('_')[1:])
# per dreamer: https://github.com/danijar/dreamer/blob/02f0210f5991c7710826ca7881f19c64a012290c/wrappers.py#L26
camera_id = 2 if domain_name == 'quadruped' else 0
current_dir = os.path.dirname(os.path.realpath(__file__))
mockobs = pkl.load(open(os.path.join(current_dir, cfg.obs_path), "rb"))
low = np.amin(mockobs)
high = np.amax(mockobs)
mockobs = np.random.randint(low=11, high=228, size=mockobs.shape, dtype=np.uint8)
env = MockEnv(mockobs)
env = FrameStack(env, k=cfg.frame_stack)
env.seed(cfg.seed)
assert env.action_space.low.min() >= -1
assert env.action_space.high.max() <= 1
return env
class Model(BenchmarkModel):
task = REINFORCEMENT_LEARNING.OTHER_RL
def __init__(self, device=None, jit=False):
super(Model, self).__init__()
self.device = device
self.jit = jit
self.cfg = DRQConfig()
set_seed_everywhere(self.cfg.seed)
self.env = make_env(self.cfg)
obs_shape = self.env.observation_space.shape
action_shape = self.env.action_space.shape
action_range = [
float(self.env.action_space.low.min()),
float(self.env.action_space.high.max())
]
self.agent = DRQAgent(self.cfg, self.device, obs_shape, action_shape, action_range)
self.replay_buffer = ReplayBuffer(self.env.observation_space.shape,
self.env.action_space.shape,
self.cfg.replay_buffer_capacity,
self.cfg.image_pad, self.device)
self.step = 0
def get_module(self):
obs = self.env.reset()
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
return self.agent.actor, (obs, )
def train(self, niter=2):
if self.jit:
raise NotImplementedError()
episode, episode_reward, episode_step, done = 0, 0, 1, True
for step in range(niter):
obs = self.env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
if step < self.cfg.num_seed_steps:
action = self.env.action_space.sample()
else:
with eval_mode(self.agent):
action = self.agent.act(obs, sample=True)
# run training update
if self.step >= self.cfg.num_seed_steps:
for _ in range(self.cfg.num_train_iters):
self.agent.update(self.replay_buffer, None,
self.step)
next_obs, reward, done, info = self.env.step(action)
# allow infinite bootstrap
done = float(done)
done_no_max = 0 if episode_step + 1 == self.env._max_episode_steps else done
episode_reward += reward
self.replay_buffer.add(obs, action, reward, next_obs, done,
done_no_max)
obs = next_obs
episode_step += 1
self.step += 1
def eval(self, niter=1):
if self.jit:
raise NotImplementedError()
average_episode_reward = 0
for episode in range(niter):
obs = self.env.reset()
episode_reward = 0
episode_step = 0
with eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
obs, reward, done, info = self.env.step(action)
episode_reward += reward
episode_step += 1
average_episode_reward += episode_reward
average_episode_reward /= float(niter)
|
datadog_checks_base/tests/openmetrics/test_transformers/test_temporal_percent.py | vbarbaresi/integrations-core | 663 | 11133633 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.testing import requires_py3
from ..utils import get_check
pytestmark = [
requires_py3,
pytest.mark.openmetrics,
pytest.mark.openmetrics_transformers,
pytest.mark.openmetrics_transformers_temporal_percent,
]
def test_named(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total{foo="bar"} 5.2
"""
)
check = get_check(
{
'metrics': [
{
'process_cpu_seconds': {
'name': 'process_cpu_usage',
'type': 'temporal_percent',
'scale': 'second',
}
}
],
}
)
dd_run_check(check)
aggregator.assert_metric(
'test.process_cpu_usage', 520, metric_type=aggregator.RATE, tags=['endpoint:test', 'foo:bar']
)
aggregator.assert_all_metrics_covered()
def test_integer(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total{foo="bar"} 5.2
"""
)
check = get_check(
{'metrics': [{'process_cpu_seconds': {'name': 'process_cpu_usage', 'type': 'temporal_percent', 'scale': 1}}]}
)
dd_run_check(check)
aggregator.assert_metric(
'test.process_cpu_usage', 520, metric_type=aggregator.RATE, tags=['endpoint:test', 'foo:bar']
)
aggregator.assert_all_metrics_covered()
|
ikalog/scenes/result_detail.py | fetus-hina/IkaLog | 285 | 11133648 | <reponame>fetus-hina/IkaLog
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import datetime
import math
import os
import pickle
import re
import sys
import threading
import traceback
from datetime import datetime
import cv2
import numpy as np
from ikalog.api import APIClient
from ikalog.scenes.stateful_scene import StatefulScene
from ikalog.inputs.filters import OffsetFilter
from ikalog.utils import *
from ikalog.utils.player_name import *
class ResultDetail(StatefulScene):
def evaluate_image_accuracy(self, frame):
r_win = self.mask_win.match_score(frame)[1]
r_lose = self.mask_lose.match_score(frame)[1]
r_x = self.mask_x.match_score(frame)[1]
loss_win = (1.0 - r_win) ** 2
loss_lose = (1.0 - r_lose) ** 2
loss_x = (1.0 - r_x) ** 2
return 1.0 - math.sqrt((loss_win + loss_lose + loss_x) / 3)
#
# AKAZE ベースのオフセット/サイズ調整
#
def result_detail_normalizer(self, img):
# キーポイントとして不要な部分を削除
img = copy.deepcopy(img)
cv2.rectangle(img, (0, 000), (680, 720), (0, 0, 0), -1)
# 特徴画像の生成
white_filter = matcher.MM_WHITE()
dark_filter = matcher.MM_DARK(visibility=(0, 16))
img_w = white_filter(img)
img_dark = 255 - dark_filter(img)
img_features = img_dark + img_w
img_features[:, 1000:1280] = \
img_dark[:, 1000:1280] - img_w[:, 1000:1280]
# cv2.imshow('features', img_features)
# cv2.waitKey(10000)
return img_features
def get_keypoints(self, img):
detector = cv2.AKAZE_create()
keypoints, descriptors = detector.detectAndCompute(
img,
None,
)
return keypoints, descriptors
def filter_matches(self, kp1, kp2, matches, ratio=0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append(kp1[m.queryIdx])
mkp2.append(kp2[m.trainIdx])
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs
def tuples_to_keypoints(self, tuples):
new_l = []
for point in tuples:
pt, size, angle, response, octave, class_id = point
new_l.append(cv2.KeyPoint(
pt[0], pt[1], size, angle, response, octave, class_id))
return new_l
def keypoints_to_tuples(self, points):
new_l = []
for point in points:
new_l.append((point.pt, point.size, point.angle, point.response, point.octave,
point.class_id))
return new_l
def load_model_from_file(self, filename):
f = open(filename, 'rb')
l = pickle.load(f)
f.close()
self.ref_image_geometry = l[0]
self.ref_keypoints = self.tuples_to_keypoints(l[1])
self.ref_descriptors = l[2]
def save_model_to_file(self, filename):
f = open(filename, 'wb')
pickle.dump([
self.ref_image_geometry,
self.keypoints_to_tuples(self.ref_keypoints),
self.ref_descriptors,
], f)
f.close()
def rebuild_model(self, dest_filename, src_filename=None, img=None, normalizer_func=None):
if img is None:
img = imread(src_filename, 0)
assert img is not None
if normalizer_func is not None:
img = normalizer_func(img)
assert img is not None
self.ref_keypoints, self.ref_descriptors = \
self.get_keypoints(img)
self.ref_image_geometry = img.shape[:2]
self.save_model_to_file(dest_filename)
IkaUtils.dprint('%s: Created model data %s' % (self, dest_filename))
def load_akaze_model(self):
model_filename = IkaUtils.get_path(
'data', 'result_detail_features.akaze.model')
try:
self.load_model_from_file(model_filename)
if self.ref_keypoints == None:
raise
except:
IkaUtils.dprint(
'%s: Failed to load akaze model. trying to rebuild...' % self)
self.rebuild_model(
model_filename,
img=imread('data/result_detail_features.png'),
normalizer_func=self.result_detail_normalizer
)
self.load_model_from_file(model_filename)
def auto_warp(self, context):
# 画面のオフセットを自動検出して image を返す (AKAZE利用)
frame = context['engine'].get('frame', None)
if frame is None:
return None
keypoints, descs = self.get_keypoints(
self.result_detail_normalizer(frame))
matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
raw_matches = matcher.knnMatch(
descs,
trainDescriptors=self.ref_descriptors,
k=2
)
p2, p1, kp_pairs = self.filter_matches(
keypoints,
self.ref_keypoints,
raw_matches,
)
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
raise
w = 1280
h = 720
corners = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
pts2 = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
pts1 = np.float32(cv2.perspectiveTransform(
corners.reshape(1, -1, 2), H).reshape(-1, 2) + (0, 0))
M = cv2.getPerspectiveTransform(pts1, pts2)
# out = cv2.drawKeypoints(img2, keypoints1, None)
new_frame = cv2.warpPerspective(frame, M, (w, h))
# 変形した画像がマスクと一致するか?
matched = ImageUtils.match_with_mask(
new_frame, self.winlose_gray, 0.997, 0.22)
if matched:
return new_frame
IkaUtils.dprint('%s: auto_warp() function broke the image.' % self)
return None
def adjust_method_generic(self, context, l):
frame = context['engine']['frame']
# WIN/LOSE の表示部分の上側にある黒枠の幅を測る
img1 = frame[:30, 30:50, :]
img2 = np.sum(img1, axis=2)
img2 = np.sum(img2, axis=1)
img3 = np.array(range(img2.shape[0]))
img3[img2 > 0] = 0
v_margin = np.amax(img3)
if v_margin > 0:
my = v_margin + 1
mx = int(my * 1280 / 720)
new_frame = cv2.resize(frame[my: -my, :], (1280, 720))
l.append({
'frame': new_frame,
'score': self.evaluate_image_accuracy(new_frame),
'desc': 'Wrong resolution & aspect'
})
new_frame = cv2.resize(frame[my: -my, mx:-mx], (1280, 720))
l.append({
'frame': new_frame,
'score': self.evaluate_image_accuracy(new_frame),
'desc': 'Wrong resolution'
})
l.append({
'frame': frame,
'score': self.evaluate_image_accuracy(frame),
'acceptable': True,
})
def adjust_method_offset(self, context, l):
# Detect slide offset
filter = OffsetFilter(self)
filter.enable()
# filter が必要とするので...
self.out_width = 1280
self.out_height = 720
best_match = (context['engine']['frame'], 0.0, 0, 0)
offset_list = [0, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5]
gray_frame = cv2.cvtColor(context['engine']['frame'], cv2.COLOR_BGR2GRAY)
for ox in offset_list:
for oy in offset_list:
filter.offset = (ox, oy)
img = filter.execute(gray_frame)
score = self.evaluate_image_accuracy(img)
if best_match[1] < score:
best_match = (img, score, ox, oy)
if 0:
l.append({
'frame': img,
'score': score,
'desc': 'Offset (%s, %s)' % (ox, oy),
'offset': (ax, ay)
})
if best_match[2] != 0 or best_match[3] != 0:
filter.offset = (best_match[2], best_match[3])
new_frame = filter.execute(context['engine']['frame'])
l.append({
'frame': new_frame,
'score': score,
'desc': 'Offset (%s, %s)' % (best_match[2], best_match[3]),
'acceptable': True,
'offset': (best_match[2], best_match[3]),
})
def adjust_image(self, context):
l = []
self.adjust_method_generic(context, l)
self.adjust_method_offset(context, l)
if len(l) > 0:
best = sorted(l, key=lambda x: x['score'], reverse=True)[0]
img = best['frame']
if best.get('desc', None):
IkaUtils.dprint(
'%s: Capture setting might be wrong. %s (recover score=%f)' %
(self, best['desc'], best['score']))
self._call_plugins_later(
'on_result_detail_log',
params={'desc': best['desc']}
)
if best.get('offset',None):
self._call_plugins('on_result_detail_calibration', best.get('offset'))
else:
# Should not reach here
IkaUtils.dprint('%s: [BUG] Failed to normalize image' % self)
img = context['engine']['frame']
if 0:
for e in l:
print(e['score'], e.get('desc', '(none)'))
return img
def async_recoginiton_worker(self, context):
IkaUtils.dprint('%s: weapons recoginition started.' % self)
weapons_list = []
for player in context['game']['players']:
weapons_list.append(player.get('img_weapon', None))
# local
try:
if self._client_local is not None:
weapon_response_list = self._client_local.recoginize_weapons(
weapons_list)
for entry_id in range(len(weapon_response_list)):
context['game']['players'][entry_id]['weapon'] = \
weapon_response_list[entry_id]
except:
IkaUtils.dprint('Exception occured in weapon recoginization.')
IkaUtils.dprint(traceback.format_exc())
# remote
try:
if self._client_remote is not None:
weapon_response_list = self._client_remote.recoginize_weapons(
weapons_list)
for entry_id in range(len(weapon_response_list)):
context['game']['players'][entry_id]['weapon'] = \
weapon_response_list[entry_id]
except:
IkaUtils.dprint('Exception occured in weapon recoginization.')
IkaUtils.dprint(traceback.format_exc())
IkaUtils.dprint('%s: weapons recoginition done.' % self)
if 0:
self._detect_names_per_my_kill(context)
self._analyze_kills_per_weapon(context)
self._analyze_kills_per_player(context)
self._call_plugins_later('on_result_detail')
self._call_plugins_later('on_game_individual_result')
def is_entry_me(self, img_entry):
# ヒストグラムから、入力エントリが自分かを判断
if len(img_entry.shape) > 2 and img_entry.shape[2] != 1:
img_me = cv2.cvtColor(img_entry[:, 0:43], cv2.COLOR_BGR2GRAY)
else:
img_me = img_entry[:, 0:43]
img_me = cv2.threshold(img_me, 230, 255, cv2.THRESH_BINARY)[1]
me_score = np.sum(img_me)
me_score_normalized = 0
try:
me_score_normalized = me_score / (43 * 45 * 255 / 10)
except ZeroDivisionError as e:
me_score_normalized = 0
#print("score=%3.3f" % me_score_normalized)
return (me_score_normalized > 1)
def guess_fest_title_ja(self, img_fest_title):
img_fest_title_hsv = cv2.cvtColor(img_fest_title, cv2.COLOR_BGR2HSV)
yellow = cv2.inRange(img_fest_title_hsv[:, :, 0], 32 - 2, 32 + 2)
yellow2 = cv2.inRange(img_fest_title_hsv[:, :, 2], 240, 255)
img_fest_title_mask = yellow & yellow2
is_fes = np.sum(img_fest_title_mask) > img_fest_title_mask.shape[
0] * img_fest_title_mask.shape[1] * 16
# 文字と判断したところを 1 にして縦に足し算
img_fest_title_hist = np.sum(
img_fest_title_mask / 255, axis=0) # 列毎の検出dot数
a = np.array(range(len(img_fest_title_hist)), dtype=np.int32)
b = np.extract(img_fest_title_hist > 0, a)
x1 = np.amin(b)
x2 = np.amax(b)
if (x2 - x1) < 4:
return None, None, None
# 最小枠で crop
img_fest_title_new = img_fest_title[:, x1:x2]
# ボーイ/ガールは経験上横幅 56 ドット
gender_x1 = x2 - 36
gender_x2 = x2
img_fest_gender = img_fest_title_mask[:, gender_x1:gender_x2]
# ふつうの/まことの/スーパー/カリスマ/えいえん
img_fest_level = img_fest_title_mask[:, 0:52]
try:
if self.fest_gender_recoginizer:
gender = self.fest_gender_recoginizer.match(
cv2.cvtColor(img_fest_gender, cv2.COLOR_GRAY2BGR))
except:
IkaUtils.dprint(traceback.format_exc())
gender = None
try:
if self.fest_level_recoginizer:
level = self.fest_level_recoginizer.match(
cv2.cvtColor(img_fest_level, cv2.COLOR_GRAY2BGR))
except:
IkaUtils.dprint(traceback.format_exc())
level = None
team = None
return gender, level, team
def guess_fest_title_en_NA(self, img_fest_title):
IkaUtils.dprint(
'%s: Fest recoginiton in this language is not implemented'
% self
)
return None, None, None
def guess_fest_title_en_UK(self, img_fest_title):
IkaUtils.dprint(
'%s: Fest recoginiton in this language is not implemented'
% self
)
return None, None, None
def guess_fest_title(self, img_fest_title):
guess_fest_title_funcs = {
'ja': self.guess_fest_title_ja,
'en_NA': self.guess_fest_title_en_NA,
'en_UK': self.guess_fest_title_en_UK,
}
func = None
for lang in Localization.get_game_languages():
func = guess_fest_title_funcs.get(lang, None)
if func is not None:
break
if func is None:
IkaUtils.dprint(
'%s: Fest recoginiton in this language is not implemented'
% self
)
return None, None, None
return func(img_fest_title)
def analyze_team_colors(self, context, img):
# スクリーンショットからチームカラーを推測
assert 'won' in context['game']
assert img is not None
if context['game']['won']:
my_team_color_bgr = img[115:116, 1228:1229]
counter_team_color_bgr = img[452:453, 1228:1229]
else:
counter_team_color_bgr = img[115:116, 1228:1229]
my_team_color_bgr = img[452:453, 1228:1229]
my_team_color = {
'rgb': cv2.cvtColor(my_team_color_bgr, cv2.COLOR_BGR2RGB).tolist()[0][0],
'hsv': cv2.cvtColor(my_team_color_bgr, cv2.COLOR_BGR2HSV).tolist()[0][0],
}
counter_team_color = {
'rgb': cv2.cvtColor(counter_team_color_bgr, cv2.COLOR_BGR2RGB).tolist()[0][0],
'hsv': cv2.cvtColor(counter_team_color_bgr, cv2.COLOR_BGR2HSV).tolist()[0][0],
}
return (my_team_color, counter_team_color)
def _detect_names_per_my_kill(self, context):
all_players = context['game']['players']
me = IkaUtils.getMyEntryFromContext(context)
counter_team = \
list(filter(lambda x: x['team'] != me['team'], all_players))
img_name_counter_team = \
list(map(lambda e: e['img_name_normalized'], counter_team))
ct_name_classifier = PlayerNameClassifier(img_name_counter_team)
for kill_index in range(len(context['game'].get('kill_list', []))):
kill = context['game']['kill_list'][kill_index]
if kill.get('img_kill_hid', None) is None:
continue
player_index = ct_name_classifier.predict(kill['img_kill_hid'])
if player_index is None:
continue
if 1:
IkaUtils.dprint('%s: my kill %d -> player %d' %
(self, kill_index, player_index))
kill['player'] = counter_team[player_index]
def _analyze_kills_per_weapon(self, context):
r = {}
for kill in context['game'].get('kill_list', []):
if 'player' in kill:
weapon = kill['player']['weapon']
r[weapon] = r.get(weapon, 0) + 1
context['game']['kills_per_weapon'] = r
IkaUtils.dprint('%s: _analyze_kills_per_weapon result: %s' % (self, r))
return r
def _analyze_kills_per_player(self, context):
for kill in context['game'].get('kill_list', []):
if 'player' in kill:
player = kill['player']
player['my_kills'] = player.get('my_kills', 0) + 1
if 0:
IkaUtils.dprint('%s: _analyze_kills_per_player' % self)
for player in context['game']['players']:
IkaUtils.dprint(' player %d: my_kills = %d' % (
context['game']['players'].index(player),
player['my_kills']
))
def analyze_entry(self, img_entry):
# 各プレイヤー情報のスタート左位置
entry_left = 610
# 各プレイヤー報の横幅
entry_width = 610
# 各プレイヤー情報の高さ
entry_height = 46
# 各エントリ内での名前スタート位置と長さ
entry_xoffset_weapon = 760 - entry_left
entry_xoffset_weapon_me = 719 - entry_left
entry_width_weapon = 47
entry_xoffset_name = 809 - entry_left
entry_xoffset_name_me = 770 - entry_left
entry_width_name = 180
entry_xoffset_nawabari_score = 995 - entry_left
entry_width_nawabari_score = 115
entry_xoffset_score_p = entry_xoffset_nawabari_score + entry_width_nawabari_score
entry_width_score_p = 20
entry_xoffset_kd = 1185 - entry_left
entry_width_kd = 31
entry_height_kd = 21
me = self.is_entry_me(img_entry)
if me:
weapon_left = entry_xoffset_weapon_me
name_left = entry_xoffset_name_me
rank_left = 2
else:
weapon_left = entry_xoffset_weapon
name_left = entry_xoffset_name
rank_left = 43
img_rank = img_entry[20:45, rank_left:rank_left + 43]
img_weapon = img_entry[:, weapon_left:weapon_left + entry_width_weapon]
img_name = img_entry[:, name_left:name_left + entry_width_name]
img_score = img_entry[
:, entry_xoffset_nawabari_score:entry_xoffset_nawabari_score + entry_width_nawabari_score]
img_score_p = img_entry[
:, entry_xoffset_score_p:entry_xoffset_score_p + entry_width_score_p]
ret, img_score_p_thresh = cv2.threshold(cv2.cvtColor(
img_score_p, cv2.COLOR_BGR2GRAY), 230, 255, cv2.THRESH_BINARY)
img_kills = img_entry[0:entry_height_kd,
entry_xoffset_kd:entry_xoffset_kd + entry_width_kd]
img_deaths = img_entry[entry_height_kd:entry_height_kd *
2, entry_xoffset_kd:entry_xoffset_kd + entry_width_kd]
img_fes_title = img_name[0:(entry_height // 2), :]
img_fes_title_hsv = cv2.cvtColor(img_fes_title, cv2.COLOR_BGR2HSV)
yellow = cv2.inRange(img_fes_title_hsv[:, :, 0], 32 - 2, 32 + 2)
yellow2 = cv2.inRange(img_fes_title_hsv[:, :, 2], 240, 255)
img_fes_title_mask = yellow & yellow2
is_fes = np.sum(img_fes_title_mask) > img_fes_title_mask.shape[
0] * img_fes_title_mask.shape[1] * 16
if is_fes:
fes_gender, fes_level, fes_team = self.guess_fest_title(
img_fes_title
)
# フェス中ではなく、 p の表示があれば(avg = 55.0) ナワバリ。なければガチバトル
isRankedBattle = (not is_fes) and (
np.average(img_score_p_thresh[:, :]) < 16)
isNawabariBattle = (not is_fes) and (not isRankedBattle)
entry = {
"me": me,
"img_rank": img_rank,
"img_weapon": img_weapon,
"img_name": img_name,
"img_name_normalized": normalize_player_name(img_name),
"img_score": img_score,
"img_kills": img_kills,
"img_deaths": img_deaths,
}
if is_fes:
entry['img_fes_title'] = img_fes_title
if fes_gender and ('ja' in fes_gender):
entry['gender'] = fes_gender['ja']
if fes_level and ('ja' in fes_level):
entry['prefix'] = fes_level['ja']
if fes_gender and ('en' in fes_gender):
entry['gender_en'] = fes_gender['en']
if fes_level and ('boy' in fes_level):
entry['prefix_en'] = fes_level['boy']
if self.udemae_recoginizer and isRankedBattle:
try:
entry['udemae_pre'] = self.udemae_recoginizer.match(
entry['img_score']).upper()
except:
IkaUtils.dprint('Exception occured in Udemae recoginization.')
IkaUtils.dprint(traceback.format_exc())
if self.number_recoginizer:
try:
entry['rank'] = self.number_recoginizer.match_digits(
entry['img_rank'])
entry['kills'] = self.number_recoginizer.match_digits(
entry['img_kills'])
entry['deaths'] = self.number_recoginizer.match_digits(
entry['img_deaths'])
if isNawabariBattle:
entry['score'] = self.number_recoginizer.match_digits(
entry['img_score'])
except:
IkaUtils.dprint('Exception occured in K/D recoginization.')
IkaUtils.dprint(traceback.format_exc())
return entry
def extract_entries(self, context, img=None):
if img is None:
img = self.adjust_image(context)
# 各プレイヤー情報のスタート左位置
entry_left = 610
# 各プレイヤー情報の横幅
entry_width = 630
# 各プレイヤー情報の高さ
entry_height = 45
entry_top = [101, 166, 231, 296, 431, 496, 561, 626]
img_entries = []
for entry_id in range(len(entry_top)): # 0..7
top = entry_top[entry_id]
img_entry = img[top:top + entry_height,
entry_left:entry_left + entry_width]
img_entries.append(img_entry)
return img_entries
def is_entries_still_sliding(self, img_entries):
white_filter = matcher.MM_WHITE()
array0to14 = np.array(range(15), dtype=np.int32)
x_pos_list = []
for img_entry in img_entries:
img_XX = img_entry[:, 1173 - 610: 1173 + 13 - 610] # -> 2D
img_XX_hist = np.sum(white_filter(img_XX), axis=0) # -> 1D
img_XX_hist_x = np.extract(img_XX_hist > 0, array0to14[
0:img_XX_hist.shape[0]])
if img_XX_hist_x.shape[0] == 0:
continue
img_XX_hist_x_avg = np.average(img_XX_hist_x)
x_pos_list.append(img_XX_hist_x_avg)
x_avg_min = np.amin(x_pos_list)
x_avg_max = np.amax(x_pos_list)
x_diff = int(x_avg_max - x_avg_min)
if 0: # debug
print('is_entries_still_sliding: x_pos_list %s min %f max %f diff %d' %
(x_pos_list, x_avg_min, x_avg_max, x_diff))
return x_diff
def analyze(self, context):
context['game']['players'] = []
weapon_list = []
img = self.adjust_image(context)
img_entries = self.extract_entries(context, img)
# Adjust img_entries rect using result of
# self.is_entries_still_sliding().
# This allows more accurate weapon classification.
diff_x = self.is_entries_still_sliding(img_entries)
if diff_x > 0:
white_filter = matcher.MM_WHITE()
index = 7
# Find the last player's index.
while (0 < index) and \
(np.sum(white_filter(img_entries[index])) < 1000):
index -= 1
# adjust the player's rect 3 times.
for i in range(3):
diff_x = self.is_entries_still_sliding(img_entries)
img_entry = img_entries[index]
w = img_entry.shape[1] - diff_x
img_entries[index][:, 0: w] = img_entry[:, diff_x: w + diff_x]
if 0:
cv2.imshow('a', img_entries[0])
cv2.imshow('b', img_entries[index])
cv2.waitKey(0)
for entry_id in range(len(img_entries)):
img_entry = img_entries[entry_id]
e = self.analyze_entry(img_entry)
if e.get('rank', None) is None:
continue
# team, rank_in_team
e['team'] = 1 if entry_id < 4 else 2
e['rank_in_team'] = entry_id + \
1 if e['team'] == 1 else entry_id - 3
# won
if e['me']:
context['game']['won'] = (entry_id < 4)
context['game']['players'].append(e)
if 0:
e_ = e.copy()
for f in list(e.keys()):
if f.startswith('img_'):
del e_[f]
print(e_)
if 0:
worker = threading.Thread(
target=self.async_recoginiton_worker, args=(context,))
worker.start()
else:
self.async_recoginiton_worker(context)
# チームカラー
team_colors = self.analyze_team_colors(context, img)
context['game']['my_team_color'] = team_colors[0]
context['game']['counter_team_color'] = team_colors[1]
# フェス関係
context['game']['is_fes'] = ('prefix' in context['game']['players'][0])
# そのほか
# context['game']['timestamp'] = datetime.now()
context['game']['image_scoreboard'] = \
copy.deepcopy(context['engine']['frame'])
self._call_plugins_later('on_result_detail_still')
return True
def reset(self):
super(ResultDetail, self).reset()
self._last_event_msec = - 100 * 1000
self._match_start_msec = - 100 * 1000
self._last_frame = None
self._diff_pixels = []
def _state_default(self, context):
if self.matched_in(context, 30 * 1000):
return False
if self.is_another_scene_matched(context, 'GameTimerIcon'):
return False
frame = context['engine']['frame']
if frame is None:
return False
matched = ImageUtils.match_with_mask(
context['engine']['frame'], self.winlose_gray, 0.997, 0.22)
if matched:
self._match_start_msec = context['engine']['msec']
self._switch_state(self._state_tracking)
return matched
def _state_tracking(self, context):
frame = context['engine']['frame']
if frame is None:
return False
# マッチ1: 既知のマスクでざっくり
matched = ImageUtils.match_with_mask(
context['engine']['frame'], self.winlose_gray, 0.997, 0.22)
# マッチ2: マッチ1を満たした場合は、白文字が安定するまで待つ
# 条件1: 前回のイメージとの白文字の diff が 0 pixel になること
# 条件2: K/D数の手前にある"X"印がの位置が縦方向で合っていること
diff_pixels = None
img_current_h_i16 = None
matched_diff0 = False
matched_diffX = False
if matched:
img_current_bgr = frame[626:626 + 45, 640:1280]
img_current_hsv = cv2.cvtColor(img_current_bgr, cv2.COLOR_BGR2HSV)
img_current_h_i16 = np.array(img_current_hsv[:, :, 1], np.int16)
if matched and (self._last_frame is not None):
img_diff = abs(img_current_h_i16 - self._last_frame)
img_diff_u8 = np.array(img_diff, np.uint8)
img_white = self._white_filter(img_current_bgr)
img_diff_u8[img_white < 128] = 0
img_diff_u8[img_diff_u8 < 16] = 0
img_diff_u8[img_diff_u8 > 1] = 255
# cv2.imshow('DIFF', img_diff_u8)
# cv2.imshow('white', img_white)
diff_pixels = int(np.sum(img_diff_u8) / 255)
if img_current_h_i16 is not None:
self._last_frame = img_current_h_i16
if diff_pixels is not None:
matched_diff0 = (diff_pixels == 0)
# 白色マスクがぴったり合わなかった場合には X 印によるマッチを行う
# ・is_entries_still_sliding() の値(X印の散らばり度)
# が 0 であれば matched_diffX = True
# ・is_entries_still_sliding() の返却値(X印の散らばり度)
# の履歴の最小値と最新値が一致したら妥協で matched_diffX = True
if (diff_pixels is not None) and (not matched_diff0):
# FIXME: adjust_image は非常にコストが高い
img = self.adjust_image(context)
img_entries = self.extract_entries(context, img)
diff_x = self.is_entries_still_sliding(img_entries)
matched_diffX = (diff_x == 0)
if not matched_diffX:
self._diff_pixels.append(diff_x)
if len(self._diff_pixels) > 4:
self._diff_pixels.pop(0)
matched_diffX = \
(np.amin(self._diff_pixels) == matched_diffX)
# escaped: 1000ms 以上の非マッチが続きシーンを抜けたことが確定
# matched2: 白文字が安定している(条件1 or 条件2を満たしている)
# triggered: すでに一定時間以内にイベントが取りがされた
escaped = not self.matched_in(context, 1000)
matched2 = matched_diff0 or matched_diffX
triggered = self.matched_in(
context, 30 * 1000, attr='_last_event_msec')
if matched2 and (not triggered):
self.analyze(context)
# self.dump(context)
# self._call_plugins('on_result_detail')
# self._call_plugins('on_game_individual_result')
self._last_event_msec = context['engine']['msec']
triggered = True
if matched:
return True
if escaped:
if (not triggered) and (len(self._diff_pixels) > 0):
IkaUtils.dprint(''.join((
'%s: 戦績画面を検出しましたが静止画を認識できませんでした。考えられる原因\n' % self,
' ・HDMIキャプチャデバイスからのノイズ入力が多い\n',
' ・ブロックノイズが多いビデオファイルを処理している\n',
' ・正しいフォーマットで画像が入力されていない\n',
' min(diff_pixels): %s' % min(self._diff_pixels),
)))
self._match_start_msec = - 100 * 1000
self._last_frame = None
self._diff_pixels = []
self._switch_state(self._state_default)
return False
def dump(self, context):
matched = True
analyzed = True
won = IkaUtils.getWinLoseText(
context['game']['won'], win_text="win", lose_text="lose", unknown_text="unknown")
fes = context['game'].get('is_fes', False)
print("matched %s analyzed %s result %s fest %s" %
(matched, analyzed, won, fes))
print('--------')
for e in context['game']['players']:
udemae = e['udemae_pre'] if ('udemae_pre' in e) else None
rank = e['rank'] if ('rank' in e) else None
kills = e['kills'] if ('kills' in e) else None
deaths = e['deaths'] if ('deaths' in e) else None
weapon = e['weapon'] if ('weapon' in e) else None
score = e['score'] if ('score' in e) else None
me = '*' if e['me'] else ''
if 'prefix' in e:
prefix = e['prefix']
prefix_ = re.sub('の', '', prefix)
gender = e['gender']
else:
prefix_ = ''
gender = ''
print("team %s rank_in_team %s rank %s udemae %s %s/%s weapon %s score %s %s%s %s" % (
e.get('team', None),
e.get('rank_in_team', None),
e.get('rank', None),
e.get('udemae_pre', None),
e.get('kills', None),
e.get('deaths', None),
e.get('weapon', None),
e.get('score', None),
prefix_, gender,
me,))
print('--------')
def _analyze(self, context):
frame = context['engine']['frame']
return True
def _init_scene(self, debug=False):
self.mask_win = IkaMatcher(
651, 47, 99, 33,
img_file='result_detail.png',
threshold=0.60,
orig_threshold=0.20,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='result_detail:WIN',
debug=debug,
)
self.mask_lose = IkaMatcher(
651, 378, 99, 33,
img_file='result_detail.png',
threshold=0.60,
orig_threshold=0.40,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='result_detail:LOSE',
debug=debug,
)
self.mask_x = IkaMatcher(
1173, 101, 14, 40,
img_file='result_detail.png',
threshold=0.60,
orig_threshold=0.40,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='result_detail:X',
debug=False,
)
languages = Localization.get_game_languages()
for lang in languages:
mask_file = IkaUtils.get_path('masks', lang, 'result_detail.png')
if os.path.exists(mask_file):
break
if not os.path.exists(mask_file):
mask_file = IkaUtils.get_path('masks', 'result_detail.png')
winlose = imread(mask_file)
self.winlose_gray = cv2.cvtColor(winlose, cv2.COLOR_BGR2GRAY)
self._white_filter = matcher.MM_WHITE()
self.udemae_recoginizer = UdemaeRecoginizer()
self.number_recoginizer = NumberRecoginizer()
# for SplatFest (ja)
self.fest_gender_recoginizer = character_recoginizer.FesGenderRecoginizer()
self.fest_level_recoginizer = character_recoginizer.FesLevelRecoginizer()
self.load_akaze_model()
self._client_local = APIClient(local_mode=True)
# self._client_remote = APIClient(local_mode=False, base_uri='http://localhost:8000')
self._client_remote = None
if __name__ == "__main__":
ResultDetail.main_func()
|
Graph Traversal/BFS/bfs.py | Subrato-oid/cs-algorithms | 239 | 11133651 | import collections
def bfs(graph, root):
visited, queue = [], collections.deque([root])
visited.append(root)
while queue:
vertex = queue.popleft()
for neighbour in graph[vertex]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)
return visited
if __name__ == '__main__':
graph = {0: [1,3], 1: [2], 2: [3], 3: [1,2]}
print bfs(graph, 0)
|
steamctl/commands/hlmaster/__init__.py | rossengeorgiev/steamctl | 138 | 11133654 | <filename>steamctl/commands/hlmaster/__init__.py
from steamctl.argparser import register_command
@register_command('hlmaster', help='Query master server and server information')
def setup_arg_parser(cp):
def print_help(*args, **kwargs):
cp.print_help()
cp.set_defaults(_cmd_func=print_help)
sub_cp = cp.add_subparsers(metavar='<subcommand>',
dest='subcommand',
title='List of sub-commands',
description='',
)
scp_query = sub_cp.add_parser("query", help="Query HL Master for servers")
scp_query.add_argument('filter', type=str)
scp_query.add_argument('--ip-only', action='store_true', help='Show short info about each server')
scp_query.add_argument('-n', '--num-servers', default=20, type=int, help="Number of result to return (Default: 20)")
scp_query.add_argument('-m', '--master', default=None, type=str, help="Master server (default: hl2master.steampowered.com:27011)")
scp_query.set_defaults(_cmd_func=__name__ + '.cmds:cmd_hlmaster_query')
scp_info = sub_cp.add_parser("info", help="Query info from a goldsrc or source server")
scp_info.add_argument('server', type=str)
scp_info.add_argument('-i', '--info', action='store_true', help='Show server info')
scp_info.add_argument('-r', '--rules', action='store_true', help='Show server rules')
scp_info.add_argument('-p', '--players', action='store_true', help='Show player list')
scp_info.add_argument('-s', '--short', action='store_true', help='Print server info in short form')
scp_info.set_defaults(_cmd_func=__name__ + '.cmds:cmd_hlmaster_info')
|
trove/common/db/mysql/models.py | sapcc/trove | 244 | 11133679 | <gh_stars>100-1000
# Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
from trove.common import cfg
from trove.common.db import models
from trove.common.db.mysql import data as mysql_settings
from trove.common.i18n import _
CONF = cfg.CONF
class MySQLSchema(models.DatastoreSchema):
"""Represents a MySQL database and its properties."""
# Defaults
__charset__ = "utf8"
__collation__ = "utf8_general_ci"
dbname = re.compile(r"^[A-Za-z0-9_-]+[\s\?\#\@]*[A-Za-z0-9_-]+$")
# Complete list of acceptable values
collation = mysql_settings.collation
charset = mysql_settings.charset
def __init__(self, name=None, collate=None, character_set=None,
deserializing=False):
super(MySQLSchema, self).__init__(name=name,
deserializing=deserializing)
if not deserializing:
if collate:
self.collate = collate
if character_set:
self.character_set = character_set
@property
def _max_schema_name_length(self):
return 64
def _is_valid_schema_name(self, value):
# must match the dbname regex, and
# cannot contain a '\' character.
return not any([
not self.dbname.match(value),
("%r" % value).find("\\") != -1
])
@property
def collate(self):
"""Get the appropriate collate value."""
if not self._collate and not self._character_set:
return self.__collation__
elif not self._collate:
return self.charset[self._character_set][0]
else:
return self._collate
@collate.setter
def collate(self, value):
"""Validate the collation and set it."""
if not value:
pass
elif self._character_set:
if value not in self.charset[self._character_set]:
msg = (_("%(val)s not a valid collation for charset %(char)s.")
% {'val': value, 'char': self._character_set})
raise ValueError(msg)
self._collate = value
else:
if value not in self.collation:
raise ValueError(_("'%s' not a valid collation.") % value)
self._collate = value
self._character_set = self.collation[value]
@property
def character_set(self):
"""Get the appropriate character set value."""
if not self._character_set:
return self.__charset__
else:
return self._character_set
@character_set.setter
def character_set(self, value):
"""Validate the character set and set it."""
if not value:
pass
elif value not in self.charset:
raise ValueError(_("'%s' not a valid character set.") % value)
else:
self._character_set = value
def verify_dict(self):
# Also check the collate and character_set values if set, initialize
# them if not.
super(MySQLSchema, self).verify_dict()
if self.__dict__.get('_collate'):
self.collate = self._collate
else:
self._collate = None
if self.__dict__.get('_character_set'):
self.character_set = self._character_set
else:
self._character_set = None
class MySQLUser(models.DatastoreUser):
"""Represents a MySQL User and its associated properties."""
not_supported_chars = re.compile(r"""^\s|\s$|'|"|;|`|,|/|\\""")
def _is_valid_string(self, value):
if (not value or
self.not_supported_chars.search(value) or
("%r" % value).find("\\") != -1):
return False
else:
return True
def _is_valid_user_name(self, value):
return self._is_valid_string(value)
def _is_valid_password(self, value):
return self._is_valid_string(value)
def _is_valid_host_name(self, value):
if value in [None, "%"]:
# % is MySQL shorthand for "everywhere". Always permitted.
# Null host defaults to % anyway.
return True
if CONF.hostname_require_valid_ip:
try:
# '%' works as a MySQL wildcard, but it is not a valid
# part of an IPNetwork
netaddr.IPNetwork(value.replace('%', '1'))
except (ValueError, netaddr.AddrFormatError):
return False
else:
return True
else:
# If it wasn't required, anything else goes.
return True
def _build_database_schema(self, name):
return MySQLSchema(name)
def deserialize_schema(self, value):
return MySQLSchema.deserialize(value)
@property
def _max_user_name_length(self):
return 16
@property
def schema_model(self):
return MySQLSchema
|
transit-network-analysis-tools/parallel_cpap.py | ArcGIS/public-transit-tools | 130 | 11133692 | ############################################################################
## Tool name: Transit Network Analysis Tools
## Created by: <NAME>, Esri
## Last updated: 6 August 2021
############################################################################
"""Do the core logic for the Create Percent Access Polygons tool in parallel
for maximum efficiency.
This version of the tool is for ArcGIS Pro only.
Copyright 2021 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=logging-fstring-interpolation
from concurrent import futures
import sys
import os
import time
import uuid
import shutil
import traceback
import argparse
import logging
import arcpy
from AnalysisHelpers import FACILITY_ID_FIELD, FROM_BREAK_FIELD, TO_BREAK_FIELD, TIME_FIELD, FIELDS_TO_PRESERVE, \
MSG_STR_SPLITTER
# Set logging for the main process.
# LOGGER logs everything from the main process to stdout using a specific format that the tool
# can parse and write to the geoprocessing message feed.
LOG_LEVEL = logging.INFO # Set to logging.DEBUG to see verbose debug messages
LOGGER = logging.getLogger(__name__) # pylint:disable=invalid-name
LOGGER.setLevel(LOG_LEVEL)
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(LOG_LEVEL)
# Used by script tool to split message text from message level to add correct message type to GP window
console_handler.setFormatter(logging.Formatter("%(levelname)s" + MSG_STR_SPLITTER + "%(message)s"))
LOGGER.addHandler(console_handler)
DELETE_INTERMEDIATE_OD_OUTPUTS = True # Set to False for debugging purposes
def parallel_counter(time_lapse_polygons, raster_template, scratch_folder, combo):
"""Calculate percent access polygons for the designated facility, from break, to break combo.
Args:
time_lapse_polygons (feature class catalog path): Time lapse polygons
raster_template (feature class catalog path): Raster-like polygons template
scratch_folder (folder): Folder location to write intermediate outputs
combo (list): facility_id, from_break, to_break
Returns:
dict: job result parameters
"""
# Create a job ID and a folder and scratch gdb for this job
job_id = uuid.uuid4().hex
job_folder = os.path.join(scratch_folder, job_id)
os.mkdir(job_folder)
scratch_gdb = os.path.join(job_folder, "scratch.gdb")
arcpy.management.CreateFileGDB(job_folder, "scratch.gdb")
# Setup the logger. Logs for each parallel process are not written to the console but instead to a
# process-specific log file.
log_file = os.path.join(job_folder, 'log.log')
logger = logging.getLogger("PercAccPoly_" + job_id)
logger.setLevel(logging.DEBUG)
if len(logger.handlers) <= 1:
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
formatter = logging.Formatter("%(process)d | %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Prepare a dictionary to store info about the analysis results
job_result = {
"jobId": job_id,
"jobFolder": job_folder,
"logFile": log_file,
"polygons": None
}
# Parse parameters for this process
facility_id, from_break, to_break = combo
logger.info(f"Processing FacilityID {facility_id}, FromBreak {from_break}, ToBreak {to_break}...")
# Select the subset of polygons for this FacilityID/FromBreak/ToBreak combo
selected_polys_layer = "SelectedPolys_" + job_id
if facility_id is None:
facility_query = arcpy.AddFieldDelimiters(time_lapse_polygons, FACILITY_ID_FIELD) + " IS NULL"
else:
facility_query = arcpy.AddFieldDelimiters(time_lapse_polygons, FACILITY_ID_FIELD) + " = " + str(facility_id)
query = facility_query + " AND " + \
arcpy.AddFieldDelimiters(time_lapse_polygons, FROM_BREAK_FIELD) + " = " + str(from_break) + " AND " + \
arcpy.AddFieldDelimiters(time_lapse_polygons, TO_BREAK_FIELD) + " = " + str(to_break)
arcpy.management.MakeFeatureLayer(time_lapse_polygons, selected_polys_layer, where_clause=query)
logger.info(f"{int(arcpy.management.GetCount(selected_polys_layer).getOutput(0))} time lapse polygons selected.")
# Do a spatial join in order to count the number of time lapse polygons intersect each "cell" in the raster-like
# polygon template. We are effectively applying the template to a specific set of time lapse polygons, doing the
# count, and creating the raw output. The result is a polygon feature class of raster-like cells with a field
# called Join_Count that shows the number of input time lapse polygons that intersect the cell using the specified
# match_option.
# Create a FieldMappings object for Spatial Join to preserve informational input fields
field_mappings = arcpy.FieldMappings()
for field in FIELDS_TO_PRESERVE:
fmap = arcpy.FieldMap()
fmap.addInputField(time_lapse_polygons, field)
fmap.mergeRule = "First"
field_mappings.addFieldMap(fmap)
# Do the spatial join
temp_spatial_join_fc = os.path.join(scratch_gdb, "SpatialJoin")
t0 = time.time()
arcpy.analysis.SpatialJoin(
raster_template,
selected_polys_layer,
temp_spatial_join_fc,
"JOIN_ONE_TO_ONE", # Output keeps only one copy of each "cell" when multiple time lapse polys intersect it
"KEEP_COMMON", # Delete any "cells" that don't overlap the time lapse polys being considered
field_mapping=field_mappings, # Preserve some fields from the original data
match_option="HAVE_THEIR_CENTER_IN"
)
logger.info(f"Finished spatial join in {time.time() - t0} seconds.")
# Dissolve all the little cells that were reached the same number of times to make the output more manageable
# Currently, the feature class contains a large number of little square polygons representing raster cells. The
# Join_Count field added by Spatial Join says how many of the input time lapse polygons overlapped the cell. We
# don't need all the little squares. We can dissolve them so that we have one polygon per unique value of
# Join_Count.
dissolved_polygons = os.path.join(scratch_gdb, "DissolvedPolys")
t0 = time.time()
arcpy.management.Dissolve(temp_spatial_join_fc, dissolved_polygons, FIELDS_TO_PRESERVE + ["Join_Count"])
logger.info(f"Finished dissolve in {time.time() - t0} seconds.")
job_result["polygons"] = dissolved_polygons
return job_result
def count_percent_access_polygons(time_lapse_polygons, raster_template, output_fc, max_processes):
"""Add counts to percent access polygons using parallel processing.
Args:
time_lapse_polygons (feature class catalog path): Time lapse polygons
raster_template (feature class catalog path): Raster template
output_fc (catalog path): Path to final output feature class
max_processes (int): Number of allowed parallel processes.
"""
# Scratch folder to store intermediate outputs from the parallel processes
scratch_folder = os.path.join(
arcpy.env.scratchFolder, "PercAccPoly_" + uuid.uuid4().hex) # pylint: disable=no-member
LOGGER.info(f"Intermediate outputs for parallel processes will be written to {scratch_folder}.")
os.mkdir(scratch_folder)
# Figure out the unique combinations of FacilityID, FromBreak, and ToBreak in the input data. Each of these
# will be processed separately and get a separate output. Also count the number of unique times of day that
# were used in the original analysis so we can calculate % later.
unique_output_combos = []
unique_times = []
fields = [
FACILITY_ID_FIELD,
FROM_BREAK_FIELD,
TO_BREAK_FIELD,
TIME_FIELD
]
for row in arcpy.da.SearchCursor(time_lapse_polygons, fields): # pylint: disable=no-member
unique_output_combos.append((row[0], row[1], row[2]))
unique_times.append(row[3])
unique_output_combos = sorted(set(unique_output_combos))
total_jobs = len(unique_output_combos)
num_time_steps = len(set(unique_times))
# For each set of time lapse polygons, generate the cell-like counts. Do this in parallel for maximum efficiency.
LOGGER.info("Counting polygons overlapping each cell parallel...")
completed_jobs = 0 # Track the number of jobs completed so far to use in logging
all_polygons = []
# Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes
with futures.ProcessPoolExecutor(max_workers=max_processes) as executor:
# Each parallel process calls the solve_od_cost_matrix() function with the od_inputs dictionary for the
# given origin and destination OID ranges and time of day.
jobs = {executor.submit(
parallel_counter, time_lapse_polygons, raster_template, scratch_folder, combo
): combo for combo in unique_output_combos}
# As each job is completed, add some logging information and store the results to post-process later
for future in futures.as_completed(jobs):
completed_jobs += 1
LOGGER.info(f"Finished polygon cell calculation chunk {completed_jobs} of {total_jobs}.")
try:
# The OD cost matrix job returns a results dictionary. Retrieve it.
result = future.result()
except Exception:
# If we couldn't retrieve the result, some terrible error happened. Log it.
LOGGER.error("Failed to get result from parallel processing.")
errs = traceback.format_exc().splitlines()
for err in errs:
LOGGER.error(err)
raise
# Log failed analysis
if not result["polygons"]:
LOGGER.warning(f"No output polygons generated for job id {result['jobId']}")
else:
all_polygons.append(result["polygons"])
LOGGER.info("Parallel processing complete. Merging results to output feature class...")
# Merge all individual output feature classes into one feature class.
arcpy.management.Merge(all_polygons, output_fc)
# Calculate a field showing the Percent of times each polygon was reached.
percent_field = "Percent"
arcpy.management.AddField(output_fc, percent_field, "DOUBLE")
expression = f"float(!Join_Count!) * 100.0 / float({num_time_steps})"
arcpy.management.CalculateField(output_fc, percent_field, expression)
LOGGER.info(f"Output feature class successfully created at {output_fc}")
# Cleanup
# Delete the job folders if the job succeeded
if DELETE_INTERMEDIATE_OD_OUTPUTS:
LOGGER.info("Deleting intermediate outputs...")
try:
shutil.rmtree(scratch_folder, ignore_errors=True)
except Exception: # pylint: disable=broad-except
# If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.
LOGGER.warning(f"Unable to delete intermediate output folder {scratch_folder}.")
if __name__ == "__main__":
# This script should always be launched via subprocess as if it were being called from the command line.
# Create the parser
parser = argparse.ArgumentParser(description=globals().get("__doc__", ""), fromfile_prefix_chars='@')
# Define Arguments supported by the command line utility
# --time-lapse-polygons parameter
help_string = "The full catalog path to the feature class containing input time lapse polygons."
parser.add_argument(
"-p", "--time-lapse-polygons", action="store", dest="time_lapse_polygons", help=help_string, required=True)
# --raster-template parameter
help_string = "The full catalog path to the polygon raster template created in earlier steps."
parser.add_argument(
"-r", "--raster-template", action="store", dest="raster_template", help=help_string, required=True)
# --output-fc parameter
help_string = "The full catalog path to the output feature class."
parser.add_argument(
"-o", "--output-fc", action="store", dest="output_fc", help=help_string, required=True)
# --max-processes parameter
help_string = "Maximum number parallel processes to use."
parser.add_argument(
"-mp", "--max-processes", action="store", dest="max_processes", type=int, help=help_string, required=True)
# Get arguments as dictionary.
args = vars(parser.parse_args())
# Count intersecting percent access polygon cells in parallel
start_time = time.time()
count_percent_access_polygons(**args)
run_time = round((time.time() - start_time) / 60, 2)
LOGGER.info(f"Parallel percent access polygon cell calculation completed in {run_time} minutes")
|
viberbot/api/messages/typed_message.py | TeamCodeCreator/ViberBot | 169 | 11133698 | from future.utils import python_2_unicode_compatible
from abc import abstractmethod
from viberbot.api.messages.message import Message
class TypedMessage(Message):
def __init__(self, message_type, tracking_data=None, keyboard=None, min_api_version=None, alt_text=None):
super(TypedMessage, self).__init__(tracking_data, keyboard, min_api_version, alt_text)
self._message_type = message_type
@abstractmethod
def to_dict(self):
message_data = super(TypedMessage, self).to_dict()
message_data['type'] = self._message_type
return message_data
@abstractmethod
def from_dict(self, message_data):
super(TypedMessage, self).from_dict(message_data)
return self
@abstractmethod
def validate(self):
"""
validates message has all the required fields before send
"""
return self._message_type is not None
@python_2_unicode_compatible
def __str__(self):
return super(TypedMessage, self).__str__()
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py | htlcnn/ironpython-stubs | 182 | 11133723 | <reponame>htlcnn/ironpython-stubs
class dotTemporaryStatesEnum(Enum):
""" enum dotTemporaryStatesEnum,values: DOT_TEMPORARY_STATE_ACCEPTED (8),DOT_TEMPORARY_STATE_ACTIVE (6),DOT_TEMPORARY_STATE_DELETED (3),DOT_TEMPORARY_STATE_DM_ONGOING (4),DOT_TEMPORARY_STATE_MODIFIED (2),DOT_TEMPORARY_STATE_NEW (1),DOT_TEMPORARY_STATE_ORIGINAL (7),DOT_TEMPORARY_STATE_REJECTED (9),DOT_TEMPORARY_STATE_UNCHANGED (5),DOT_TEMPORARY_STATE_UNKNOWN (0),DOT_TEMPORARY_STATE_USE_EXISTING_REPRESENTATION (10) """
DOT_TEMPORARY_STATE_ACCEPTED=None
DOT_TEMPORARY_STATE_ACTIVE=None
DOT_TEMPORARY_STATE_DELETED=None
DOT_TEMPORARY_STATE_DM_ONGOING=None
DOT_TEMPORARY_STATE_MODIFIED=None
DOT_TEMPORARY_STATE_NEW=None
DOT_TEMPORARY_STATE_ORIGINAL=None
DOT_TEMPORARY_STATE_REJECTED=None
DOT_TEMPORARY_STATE_UNCHANGED=None
DOT_TEMPORARY_STATE_UNKNOWN=None
DOT_TEMPORARY_STATE_USE_EXISTING_REPRESENTATION=None
value__=None
|
training/utils.py | oseiskar/autosubsync | 244 | 11133770 | <filename>training/utils.py
"""
Leftover utilities useful for Jypyter notebooks and development but not
strictly required.
"""
def read_srt_to_data_frame(fn):
"Read SRT file to a pandas.DataFrame"
from autosubsync import srt_io
import pandas as pd
rows = [list(x) for x in srt_io.read_file_tuples(fn)]
df = pd.DataFrame(rows, columns=['seq', 'begin', 'end', 'text'])
return df.set_index('seq')
|
pajbot/models/pleblist.py | JoachimFlottorp/pajbot | 128 | 11133818 | import logging
from pajbot import utils
from pajbot.managers.db import Base
from sqlalchemy import INT, TEXT, Column, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy_utc import UtcDateTime
log = logging.getLogger("pajbot")
class PleblistSong(Base):
__tablename__ = "pleblist_song"
id = Column(INT, primary_key=True)
stream_id = Column(INT, ForeignKey("stream.id", ondelete="CASCADE"), index=True, nullable=False)
user_id = Column(INT, ForeignKey("user.id", ondelete="SET NULL"), nullable=True)
youtube_id = Column(TEXT, index=True, nullable=False)
date_added = Column(UtcDateTime(), nullable=False)
date_played = Column(UtcDateTime(), nullable=True)
skip_after = Column(INT, nullable=True)
song_info = relationship(
"PleblistSongInfo",
uselist=False,
primaryjoin="PleblistSongInfo.pleblist_song_youtube_id==PleblistSong.youtube_id",
foreign_keys="PleblistSongInfo.pleblist_song_youtube_id",
cascade="save-update,merge,expunge",
lazy="joined",
)
def __init__(self, stream_id, youtube_id, **options):
self.id = None
self.stream_id = stream_id
self.user_id = options.get("user_id", None)
self.youtube_id = youtube_id
self.date_added = utils.now()
self.date_played = None
self.skip_after = options.get("skip_after", None)
if self.skip_after is not None and self.skip_after < 0:
# Make sure skip_after cannot be a negative number
self.skip_after = None
def jsonify(self):
return {
"id": self.id,
"youtube_id": self.youtube_id,
"skip_after": self.skip_after,
"info": self.song_info.jsonify() if self.song_info is not None else None,
}
@property
def link(self):
return f"youtu.be/{self.youtube_id}"
class PleblistSongInfo(Base):
__tablename__ = "pleblist_song_info"
pleblist_song_youtube_id = Column(TEXT, primary_key=True, autoincrement=False)
title = Column(TEXT, nullable=False)
duration = Column(INT, nullable=False)
default_thumbnail = Column(TEXT, nullable=False)
def __init__(self, youtube_id, title, duration, default_thumbnail):
self.pleblist_song_youtube_id = youtube_id
self.title = title
self.duration = duration
self.default_thumbnail = default_thumbnail
def jsonify(self):
return {"title": self.title, "duration": self.duration, "default_thumbnail": self.default_thumbnail}
|
running_modes/curriculum_learning/update_watcher.py | lilleswing/Reinvent-1 | 183 | 11133823 | import json
import os
import time
from running_modes.configurations import GeneralConfigurationEnvelope
from running_modes.configurations.curriculum_learning import CurriculumLearningComponents, \
CurriculumLearningConfiguration
class UpdateWatcher:
def __init__(self, runner):
self.runner = runner
def check_for_update(self, step):
if os.path.isfile(self.runner.config.update_lock):
with open(self.runner.config.general_configuration_path) as file:
sigma = self.runner.config.sigma
json_input = file.read().replace('\r', '').replace('\n', '')
configuration = json.loads(json_input)
self.runner.envelope = GeneralConfigurationEnvelope(**configuration)
config_components = CurriculumLearningComponents(**self.runner.envelope.parameters)
self.runner.config = CurriculumLearningConfiguration(**config_components.curriculum_learning)
# NOTE: We are keeping sigma unchanged
self.runner.config.sigma = sigma
self.runner.scoring_function = self.runner.setup_scoring_function(config_components.scoring_function)
self.runner.logger.save_diversity_memory_checkpoint(self.runner.diversity_filter, step)
self.runner.diversity_filter = self.runner._setup_diversity_filter(config_components.diversity_filter) #FIXME <== bad practice
self.runner.inception = self.runner.setup_inception(config_components.inception)
# self._margin_guard.update_widnow_start(step)
self.runner.logger.log_message(f"updating the run parameters at step {step}")
self.runner.logger.log_out_input_configuration(self.runner.envelope, step)
os.remove(self.runner.config.update_lock)
def check_for_pause(self):
"""Can be used for pausing the runner for a user defined interval of seconds"""
if os.path.isfile(self.runner.config.pause_lock):
pause_limit = self.runner.config.pause_limit
while pause_limit > 0:
self.runner.logger.log_message(f"Pausing for {pause_limit} seconds !")
time.sleep(1.0)
pause_limit -= 1
if not os.path.isfile(self.runner.config.pause_lock):
pause_limit = 0
if os.path.isfile(self.runner.config.pause_lock):
os.remove(self.runner.config.pause_lock)
def check_for_scheduled_update(self, step: int):
if self.runner.config.scheduled_update_step == step:
with open(self.runner.config.update_lock, 'a'):
os.utime(self.runner.config.update_lock, None)
|
tests/features/mail/test_terminal.py | cercos/masonite | 1,816 | 11133856 | <filename>tests/features/mail/test_terminal.py
from tests import TestCase
from src.masonite.mail import Mailable
class Welcome(Mailable):
def build(self):
return (
self.to("<EMAIL>")
.subject("Masonite 4")
.from_("<EMAIL>")
.text("Hello from Masonite!")
.html("<h1>Hello from Masonite!</h1>")
)
class Other(Mailable):
def build(self):
return (
self.to("<EMAIL>")
.subject("Other")
.from_("<EMAIL>")
.text("Hello from Masonite!")
.html("<h1>Hello from Masonite!</h1>")
.driver("terminal")
)
class TestTerminalDriver(TestCase):
def test_send_mailable(self):
self.application.make("mail").mailable(
Welcome().attach("invoice", "tests/integrations/storage/invoice.pdf")
).send(driver="terminal")
def test_define_driver_with_mailable(self):
self.application.make("mail").mailable(
Other().attach("invoice", "tests/integrations/storage/invoice.pdf")
).send()
|
src/server/jobs/schema.py | monosidev/monosi | 156 | 11133895 | <reponame>monosidev/monosi
from server.pipeline import monitors_pipeline
from .base import CollectorJob
class SchemaCollectorJob(CollectorJob):
def pipelines(self):
return [monitors_pipeline]
def configuration(self):
return { 'monitors': [ { 'type': 'schema' } ] }
|
LeetCode/python/1052.py | ZintrulCre/LeetCode_Archiver | 279 | 11133900 | class Solution(object):
def maxSatisfied(self, customers, grumpy, X):
"""
:type customers: List[int]
:type grumpy: List[int]
:type X: int
:rtype: int
"""
res, n = 0, len(customers)
for i in range(n):
res += customers[i] * (grumpy[i] ^ 1)
for i in range(X):
if i >= n:
return res
res += customers[i] * grumpy[i]
temp = res
for i in range(X, n):
temp = temp - customers[i - X] * grumpy[i - X] + customers[i] * grumpy[i]
res = max(res, temp)
return res
|
interactive_demo/models/mobile_resnet_generator.py | wxy347/gan-compression | 1,005 | 11133926 | <filename>interactive_demo/models/mobile_resnet_generator.py
import functools
from torch import nn
from models.mobile_modules import SeparableConv2d
class MobileResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
super(MobileResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, dropout_rate, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
SeparableConv2d(in_channels=dim, out_channels=dim,
kernel_size=3, padding=p, stride=1),
norm_layer(dim), nn.ReLU(True)
]
conv_block += [nn.Dropout(dropout_rate)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
SeparableConv2d(in_channels=dim, out_channels=dim,
kernel_size=3, padding=p, stride=1),
norm_layer(dim)
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class MobileResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf, norm_layer=nn.InstanceNorm2d,
dropout_rate=0, n_blocks=9, padding_type='reflect'):
assert (n_blocks >= 0)
super(MobileResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
n_blocks1 = n_blocks // 3
n_blocks2 = n_blocks1
n_blocks3 = n_blocks - n_blocks1 - n_blocks2
for i in range(n_blocks1):
model += [MobileResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate,
use_bias=use_bias)]
for i in range(n_blocks2):
model += [MobileResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate,
use_bias=use_bias)]
for i in range(n_blocks3):
model += [MobileResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate,
use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
# input = input.clamp(-1, 1)
# for i, module in enumerate(self.model):
# print(i, input.size())
# print(module)
# if isinstance(module, nn.Conv2d):
# print(module.stride)
# input = module(input)
# return input
return self.model(input)
|
src/tox/interpreters/windows/__init__.py | snsnlou/tox | 2,811 | 11133961 | <reponame>snsnlou/tox
from __future__ import unicode_literals
from threading import Lock
import tox
from ..common import base_discover
from ..py_spec import CURRENT
from ..via_path import check_with_path
@tox.hookimpl
def tox_get_python_executable(envconfig):
spec, path = base_discover(envconfig)
if path is not None:
return path
# second check if the py.exe has it (only for non path specs)
if spec.path is None:
py_exe = locate_via_pep514(spec)
if py_exe is not None:
return py_exe
# third check if the literal base python is on PATH
candidates = [envconfig.basepython]
# fourth check if the name is on PATH
if spec.name is not None and spec.name != envconfig.basepython:
candidates.append(spec.name)
# or check known locations
if spec.major is not None and spec.minor is not None:
if spec.name == "python":
# The standard names are in predictable places.
candidates.append(r"c:\python{}{}\python.exe".format(spec.major, spec.minor))
return check_with_path(candidates, spec)
_PY_AVAILABLE = []
_PY_LOCK = Lock()
def locate_via_pep514(spec):
with _PY_LOCK:
if not _PY_AVAILABLE:
from . import pep514
_PY_AVAILABLE.extend(pep514.discover_pythons())
_PY_AVAILABLE.append(CURRENT)
for cur_spec in _PY_AVAILABLE:
if cur_spec.satisfies(spec):
return cur_spec.path
|
gerapy_auto_extractor/patterns/title.py | Insutanto/GerapyAutoExtractor | 214 | 11133962 | METAS = [
'//meta[starts-with(@property, "og:title")]/@content',
'//meta[starts-with(@name, "og:title")]/@content',
'//meta[starts-with(@property, "title")]/@content',
'//meta[starts-with(@name, "title")]/@content',
'//meta[starts-with(@property, "page:title")]/@content',
]
|
kur/loss/loss.py | greedyuser/kur | 867 | 11133976 | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..utils import get_subclasses
###############################################################################
def keras_wrap(model, target, output, loss):
""" Convenience function for wrapping a Keras loss function.
"""
# pylint: disable=import-error
import keras.objectives as O
import keras.backend as K
# pylint: enable=import-error
if isinstance(loss, str):
loss = O.get(loss)
shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
ins = [
(target, K.placeholder(
ndim=len(shape),
dtype=K.dtype(model.outputs[target].value),
name=target
))
]
out = loss(ins[0][1], output)
return ins, out
###############################################################################
class Loss:
""" Base class for all loss functions (also called objective functions).
"""
###########################################################################
@classmethod
def get_name(cls):
""" Returns the name of the loss function.
# Return value
A lower-case string unique to this loss function.
"""
return cls.__name__.lower()
###########################################################################
@staticmethod
def get_all_losses():
""" Returns an iterator to the names of all loss functions.
"""
for cls in get_subclasses(Loss):
yield cls
###########################################################################
@staticmethod
def get_loss_by_name(name):
""" Finds a loss function class with the given name.
"""
name = name.lower()
for cls in Loss.get_all_losses():
if cls.get_name() == name:
return cls
raise ValueError('No such loss function with name "{}"'.format(name))
###########################################################################
def __init__(self, weight=None):
""" Creates a new loss function.
# Arguments
weight: float. A relative weight to apply to this loss function.
This is only meaningful in models which have multiple loss
functions.
"""
if weight is not None:
raise ValueError('Loss function weights have not been implemented '
'yet.')
self.weight = 1.0 if weight is None else weight
###########################################################################
def get_weight(self):
""" Returns the loss function weight.
"""
return self.weight
###########################################################################
def get_loss(self, model, target, output):
""" Returns the loss tensor for this output.
# Arguments
model: Model instance.
target: str. The name of the output layer to apply the loss
function to.
output: tensor (implemented-specific). The symbolic tensor for this
output layer.
# Return value
A tuple of the form:
```python
(
# Input tensors
[
(input_name, placeholder),
(input_name, placeholder),
...
],
# Output value
loss_value
)
```
The derived class is required to return all required input
placeholder, including placeholders for the target model outputs.
"""
raise NotImplementedError
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
|
runtimes/alibi-explain/tests/helpers/tf_model.py | SeldonIO/MLServer | 191 | 11134013 | import os
from pathlib import Path
import tensorflow as tf
from mlserver import MLModel
from mlserver.codecs import NumpyCodec
from mlserver.types import InferenceRequest, InferenceResponse
def get_tf_mnist_model_uri() -> Path:
return Path(os.path.dirname(__file__)).parent / "data" / "tf_mnist" / "model.h5"
class TFMNISTModel(MLModel):
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
np_codec = NumpyCodec
model_input = payload.inputs[0]
input_data = np_codec.decode(model_input)
output_data = self._model(input_data).numpy()
return InferenceResponse(
model_name=self.name,
outputs=[np_codec.encode("predict", output_data)],
)
async def load(self) -> bool:
self._model = tf.keras.models.load_model(get_tf_mnist_model_uri())
self.ready = True
return self.ready
|
conans/test/integration/deprecated/test_deprecated.py | Wonders11/conan | 6,205 | 11134016 | import unittest
from conans.test.utils.tools import TestClient, GenConanfile
class DeprecatedTestCase(unittest.TestCase):
def test_no_deprecated(self):
t = TestClient()
t.save({'taskflow.py': GenConanfile("cpp-taskflow", "1.0").with_deprecated("''")})
t.run("create taskflow.py")
self.assertNotIn("Please, consider changing your requirements.", t.out)
def test_deprecated_simple(self):
t = TestClient()
t.save({'taskflow.py': GenConanfile("cpp-taskflow", "1.0").with_deprecated("True")})
t.run("create taskflow.py")
self.assertIn("cpp-taskflow/1.0: WARN: Recipe 'cpp-taskflow/1.0' is deprecated. "
"Please, consider changing your requirements.", t.out)
t.run("create taskflow.py conan/stable")
self.assertIn("cpp-taskflow/1.0@conan/stable: WARN: Recipe 'cpp-taskflow/1.0@conan/stable' "
"is deprecated. Please, consider changing your requirements.", t.out)
def test_deprecated_with(self):
t = TestClient()
t.save({'taskflow.py': GenConanfile("cpp-taskflow", "1.0").with_deprecated('"taskflow"')})
t.run("create taskflow.py")
self.assertIn("cpp-taskflow/1.0: WARN: Recipe 'cpp-taskflow/1.0' is deprecated in "
"favor of 'taskflow'. Please, consider changing your requirements.", t.out)
t.run("create taskflow.py conan/stable")
self.assertIn("cpp-taskflow/1.0@conan/stable: WARN: Recipe 'cpp-taskflow/1.0@conan/stable' "
"is deprecated in favor of 'taskflow'. Please, consider "
"changing your requirements.", t.out)
|
src_python/habitat_sim/registry.py | narekvslife/habitat-sim | 1,457 | 11134019 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import re
from typing import DefaultDict, Optional, Type
__all__ = ["registry"]
def _camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
class _Registry:
r"""registry is a central source of truth in Habitat-Sim
Taken from Pythia, it is inspired from Redux's
concept of global store. registry maintains mappings of various information
to unique keys. Special functions in registry can be used as decorators to
register different kind of classes.
Import the global registry object using ``from habitat_sim import registry``.
Then use various decorators for registering
different kind of classes with unique keys
- Register a movement function : ``@registry.register_move_fn``
"""
_mapping: DefaultDict[str, dict] = collections.defaultdict(dict)
@classmethod
def register_move_fn(
cls,
controller: Optional[Type] = None,
*,
name: Optional[str] = None,
body_action: Optional[bool] = None,
):
r"""Registers a new control with Habitat-Sim. Registered controls can
then be retrieved via `get_move_fn()`
See `new-actions <new-actions.html>`_ for an example of how to add new actions
*outside* the core habitat_sim package.
:param controller: The class of the controller to register. Must inherit from `agent.SceneNodeControl`.
If :py:`None`, will return a wrapper for use with decorator syntax.
:param name: The name to register the control with. If :py:`None`, will
register with the name of the controller converted to snake case,
i.e. a controller with class name ``MoveForward`` will be registered as
``move_forward``.
:param body_action: Whether or not this action manipulates the agent's body
(thereby also moving the sensors) or manipulates just the sensors.
This is a non-optional keyword arguement and must be set (this is done
for readability).
"""
assert (
body_action is not None
), "body_action must be explicitly set to True or False"
from habitat_sim.agent.controls.controls import SceneNodeControl
def _wrapper(controller: Type[SceneNodeControl]):
assert issubclass(
controller, SceneNodeControl
), "All controls must inherit from habitat_sim.agent.SceneNodeControl"
cls._mapping["move_fn"][
_camel_to_snake(controller.__name__) if name is None else name
] = controller(body_action)
return controller
if controller is None:
return _wrapper
else:
return _wrapper(controller)
@classmethod
def register_noise_model(
cls, noise_model: Optional[Type] = None, *, name: Optional[str] = None
):
r"""Registers a new sensor noise model with Habitat-Sim
:param noise_model: The class of the noise model to register
If `None`, will return a wrapper for use with decorator syntax
:param name: The name to register the noise model with
If `None`, will register with the name of the noise_model
"""
from habitat_sim.sensors.noise_models.sensor_noise_model import SensorNoiseModel
def _wrapper(noise_model: Type[SensorNoiseModel]):
assert issubclass(
noise_model, SensorNoiseModel
), "All noise_models must inherit from habitat_sim.sensor.SensorNoiseModel"
cls._mapping["sensor_noise_model"][
noise_model.__name__ if name is None else name
] = noise_model
return noise_model
if noise_model is None:
return _wrapper
else:
return _wrapper(noise_model)
@classmethod
def register_pose_extractor(
cls, pose_extractor: Optional[Type] = None, *, name: Optional[str] = None
):
r"""Registers a new pose extractor model with Habitat-Sim
:param pose_extractor: The class of the pose extractor to register
If `None`, will return a wrapper for use with decorator syntax
:param name: The name to register the noise model with
If `None`, will register with the name of the pose_extractor
"""
from habitat_sim.utils.data.pose_extractor import PoseExtractor
def _wrapper(pose_extractor: Type[PoseExtractor]):
assert issubclass(
pose_extractor, PoseExtractor
), "All pose_extractors must inherit from habitat_sim.utils.data.PoseExtractor"
cls._mapping["pose_extractor"][
pose_extractor.__name__ if name is None else name
] = pose_extractor
return pose_extractor
if pose_extractor is None:
return _wrapper
else:
return _wrapper(pose_extractor)
@classmethod
def _get_impl(cls, _type, name: str):
return cls._mapping[_type].get(name, None)
@classmethod
def get_move_fn(cls, name: str):
r"""Retrieve the move_fn register under ``name``
:param name: The name provided to `register_move_fn`
"""
return cls._get_impl("move_fn", name)
@classmethod
def get_noise_model(cls, name: str):
r"""Retrieve the noise_model registered under ``name``
:param name: The name provided to `register_noise_model`
"""
return cls._get_impl("sensor_noise_model", name)
@classmethod
def get_pose_extractor(cls, name: str):
r"""Retrieve the pose_extractor registered under ``name``
:param name: The name provided to `register_pose_extractor`
"""
return cls._get_impl("pose_extractor", name)
registry = _Registry()
|
examples/tutorial/employee.py | nucleic/atom | 222 | 11134021 | <gh_stars>100-1000
# --------------------------------------------------------------------------------------
# Copyright (c) 2013-2021, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
"""Simple example of a class hierarchy built on atom.
"""
import datetime
from atom.api import Atom, Bool, Int, Range, Str, Tuple, Typed, Value, observe
class Person(Atom):
"""A simple class representing a person object."""
last_name = Str()
first_name = Str()
age = Range(low=0)
dob = Value(datetime.date(1970, 1, 1))
debug = Bool(False)
@observe("age")
def debug_print(self, change):
"""Prints out a debug message whenever the person's age changes."""
if self.debug:
templ = "{first} {last} is {age} years old."
s = templ.format(
first=self.first_name,
last=self.last_name,
age=self.age,
)
print(s)
class Employer(Person):
"""An employer is a person who runs a company."""
# The name of the company
company_name = Str()
class Employee(Person):
"""An employee is person with a boss and a phone number."""
# The employee's boss
boss = Typed(Employer)
# The employee's phone number as a tuple of 3 ints
phone = Tuple(Int())
# This method will be called automatically by atom when the
# employee's phone number changes
def _observe_phone(self, val):
if val["type"] == "update":
msg = "received new phone number for %s: %s"
print(msg % (self.first_name, val["value"]))
if __name__ == "__main__":
# Create an employee with a boss
boss_john = Employer(
first_name="John",
last_name="Paw",
company_name="Packrat's Cats",
)
employee_mary = Employee(
first_name="Mary",
last_name="Sue",
boss=boss_john,
phone=(555, 555, 5555),
)
employee_mary.phone = (100, 100, 100)
employee_mary.age = 40 # no debug message
employee_mary.debug = True
employee_mary.age = 50
|
test_installation_script.py | textext/textext | 508 | 11134025 | """
This file is part of TexText, an extension for the vector
illustration program Inkscape.
Copyright (c) 2006-2021 TexText developers.
TexText is released under the 3-Clause BSD license. See
file LICENSE.txt or go to https://github.com/textext/textext
for full license details.
This script creates fake executables (pdflatex, python2.7, etc...)
to test requirements check in setup.py
All fake executables are created in temporary directory and safely
deleted at the end.
setup.py is running with PATH set to temporary directory, so
actual environment does not affect this test
"""
import os
import shutil
import sys
import subprocess
import tempfile
class TempDirectory(object):
def __init__(self):
self.name = None
def __enter__(self):
self.name = tempfile.mkdtemp()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.name is not None:
shutil.rmtree(self.name)
class FakeExecutablesMaker(object):
def __init__(self, dirname):
assert os.path.isdir(dirname)
self.dirname = dirname
def __call__(self, name, output="", channel='stdout'):
assert channel in ["stdout", "stderr"]
command_name = os.path.join(self.dirname, name)
with open(command_name, "w") as fout:
fout.write("#!%s\n" % sys.executable)
fout.write("from __future__ import print_function\n")
fout.write("import sys\n")
fout.write("print(%s, file=sys.%s)" % (repr(output), channel))
os.chmod(command_name, 0o755)
def test_configuration(fake_commands, expected_exit_code):
with TempDirectory() as f:
env = dict(os.environ, PATH=f.name)
make_fake_executable = FakeExecutablesMaker(f.name)
for args in fake_commands:
make_fake_executable(*args)
ret_code = subprocess.call([sys.executable,
'setup.py',
"--skip-extension-install"],
env=env
)
assert ret_code == expected_exit_code, '%d != %d' % (ret_code, expected_exit_code)
print("\033[92m ====> Test %s successfully with expected exit code %d passed!\033[0m\n" % (
fake_commands, expected_exit_code))
REQUIREMENT_CHECK_SUCCESS = 0
REQUIREMENT_CHECK_UNKNOWN = 64
REQUIREMENT_CHECK_ERROR = 65
good_configurations = []
# Definition of working combinations of Inkscape and LaTeX
for latex in [("pdflatex",), ("lualatex",), ("xelatex",)]:
good_configurations.append([("inkscape", "Inkscape 1.0 (4035a4fb49, 2020-05-01)"), latex])
good_configurations.append([("inkscape", "Inkscape 1.1.1 (3bf5ae0d25, 2021-09-20)"), latex])
good_configurations.append([("inkscape", "Inkscape 1.2-dev (1dd7bebcbd, 2021-12-20)"), latex])
# Test: Installation of working combinations must succeed
for good_configuration in good_configurations:
test_configuration(good_configuration, REQUIREMENT_CHECK_SUCCESS)
# Test: If one component of the working combinations is missing
# installation must fail
for good_configuration in good_configurations:
for i in range(len(good_configuration)):
# good configuration without one element is bad
bad_configuration = good_configuration[:i] + good_configuration[i + 1:]
print(bad_configuration)
test_configuration(bad_configuration, REQUIREMENT_CHECK_ERROR)
# Test wrong Inkscape version and no pdflatex installed
test_configuration([
("inkscape", "Inkscape 0.92.3 (2405546, 2018-03-11)"),
], REQUIREMENT_CHECK_ERROR)
# Test wrong Inkscape version and pdflatex installed
test_configuration([
("inkscape", "Inkscape 0.92.3 (2405546, 2018-03-11)"),
("pdflatex",)
], REQUIREMENT_CHECK_ERROR)
# Test what's happening when no version information
# is returned by Inkscape
test_configuration([
("inkscape",),
("pdflatex",),
], REQUIREMENT_CHECK_UNKNOWN)
# Test: Nothing is installed -> Installation must fail
test_configuration([
], REQUIREMENT_CHECK_ERROR)
|
torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/__init__.py | Chillee/benchmark | 5,013 | 11134042 | from .bert import BERTEmbedding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.