max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/test_runtime/async_benchmark.py | evgps/mmdetection_trashcan | 367 | 11070234 | <reponame>evgps/mmdetection_trashcan
import asyncio
import os
import shutil
import urllib
import mmcv
import torch
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.utils.contextmanagers import concurrent
from mmdet.utils.profiling import profile_time
async def main():
"""Benchmark between async and synchronous inference interfaces.
Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:
async sync
7981.79 ms 9660.82 ms
8074.52 ms 9660.94 ms
7976.44 ms 9406.83 ms
Async variant takes about 0.83-0.85 of the time of the synchronous
interface.
"""
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(
project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
checkpoint_file = os.path.join(
project_dir,
'checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
if not os.path.exists(checkpoint_file):
url = ('http://download.openmmlab.com/mmdetection/v2.0'
'/mask_rcnn/mask_rcnn_r50_fpn_1x_coco'
'/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cuda:0'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
# queue is used for concurrent inference of multiple images
streamqueue = asyncio.Queue()
# queue size defines concurrency level
streamqueue_size = 4
for _ in range(streamqueue_size):
streamqueue.put_nowait(torch.cuda.Stream(device=device))
# test a single image and show the results
img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))
# warmup
await async_inference_detector(model, img)
async def detect(img):
async with concurrent(streamqueue):
return await async_inference_detector(model, img)
num_of_images = 20
with profile_time('benchmark', 'async'):
tasks = [
asyncio.create_task(detect(img)) for _ in range(num_of_images)
]
async_results = await asyncio.gather(*tasks)
with torch.cuda.stream(torch.cuda.default_stream()):
with profile_time('benchmark', 'sync'):
sync_results = [
inference_detector(model, img) for _ in range(num_of_images)
]
result_dir = os.path.join(project_dir, 'demo')
model.show_result(
img,
async_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_async.jpg'))
model.show_result(
img,
sync_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_sync.jpg'))
if __name__ == '__main__':
asyncio.run(main())
|
autogl/module/hpo/suggestion/__init__.py | dedsec-9/AutoGL | 824 | 11070254 | # Files in this folder are reproduced from https://github.com/tobegit3hub/advisor with some changes.
|
gluoncv/auto/estimators/center_net/__init__.py | Kh4L/gluon-cv | 5,447 | 11070256 | <gh_stars>1000+
"""SSD Estimator implementations"""
from .center_net import CenterNetEstimator
|
ortools/linear_solver/samples/basic_example.py | AlohaChina/or-tools | 8,273 | 11070263 | <filename>ortools/linear_solver/samples/basic_example.py
#!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal example to call the GLOP solver."""
# [START program]
# [START import]
from ortools.linear_solver import pywraplp
from ortools.init import pywrapinit
# [END import]
def main():
# [START solver]
# Create the linear solver with the GLOP backend.
solver = pywraplp.Solver.CreateSolver('GLOP')
# [END solver]
# [START variables]
# Create the variables x and y.
x = solver.NumVar(0, 1, 'x')
y = solver.NumVar(0, 2, 'y')
print('Number of variables =', solver.NumVariables())
# [END variables]
# [START constraints]
# Create a linear constraint, 0 <= x + y <= 2.
ct = solver.Constraint(0, 2, 'ct')
ct.SetCoefficient(x, 1)
ct.SetCoefficient(y, 1)
print('Number of constraints =', solver.NumConstraints())
# [END constraints]
# [START objective]
# Create the objective function, 3 * x + y.
objective = solver.Objective()
objective.SetCoefficient(x, 3)
objective.SetCoefficient(y, 1)
objective.SetMaximization()
# [END objective]
# [START solve]
solver.Solve()
# [END solve]
# [START print_solution]
print('Solution:')
print('Objective value =', objective.Value())
print('x =', x.solution_value())
print('y =', y.solution_value())
# [END print_solution]
if __name__ == '__main__':
pywrapinit.CppBridge.InitLogging('basic_example.py')
cpp_flags = pywrapinit.CppFlags()
cpp_flags.logtostderr = True
cpp_flags.log_prefix = False
pywrapinit.CppBridge.SetFlags(cpp_flags)
main()
# [END program]
|
python/example_code/emr/pyspark_top_product_keyword.py | iconara/aws-doc-sdk-examples | 5,166 | 11070276 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to write a script that queries historical Amazon review data that is
stored in a public Amazon S3 bucket. The query returns the top reviewed products from
a category that contain a keyword in their product titles.
This script is intended to be run as an Amazon EMR job step and uses PySpark to manage
running the query on the cluster instances.
To learn more about the Amazon Customer Reviews Dataset, see the README:
https://s3.amazonaws.com/amazon-reviews-pds/readme.html
"""
# snippet-start:[emr.python.spark.top_category_reviews]
import argparse
from pyspark.sql import SparkSession
from pyspark.sql import functions as func
def query_review_data(category, title_keyword, count, output_uri):
"""
Query the Amazon review dataset for top reviews from a category that contain a
keyword in their product titles. The output of the query is written as JSON
to the specified output URI.
:param category: The category to query, such as Books or Grocery.
:param title_keyword: The keyword that must be included in each returned product
title.
:param count: The number of results to return.
:param output_uri: The URI where the output JSON files are stored, typically an
Amazon S3 bucket, such as 's3://example-bucket/review-output'.
"""
with SparkSession.builder.getOrCreate() as spark:
input_uri = f's3://amazon-reviews-pds/parquet/product_category={category}'
df = spark.read.parquet(input_uri)
query_agg = df.filter(df.verified_purchase == 'Y') \
.where(func.lower(func.col('product_title')).like(f'%{title_keyword}%')) \
.groupBy('product_title') \
.agg({'star_rating': 'avg', 'review_id': 'count'}) \
.filter(func.col('count(review_id)') >= 50) \
.sort(func.desc('avg(star_rating)')) \
.limit(count) \
.select(func.col('product_title').alias('product'),
func.col('count(review_id)').alias('review_count'),
func.col('avg(star_rating)').alias('review_avg_stars'))
query_agg.write.mode('overwrite').json(output_uri)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--category')
parser.add_argument('--title_keyword')
parser.add_argument('--count', type=int)
parser.add_argument('--output_uri')
args = parser.parse_args()
query_review_data(args.category, args.title_keyword, args.count, args.output_uri)
# snippet-end:[emr.python.spark.top_category_reviews]
|
pb_bss/evaluation/module_srmr.py | fgnt/pb_bss | 171 | 11070283 | <filename>pb_bss/evaluation/module_srmr.py
import numpy as np
def srmr(signal, sample_rate, n_cochlear_filters=23, low_freq=125, min_cf=4, max_cf=128, fast=False, norm=False,):
"""
Wrapper around the SRMRpy package to allow an independent axis
Note: The results of this implementation are slightly different from the Matlab implementation, but a high
correlation between the behavior of both implementations is still present.
However, activating the fast implementation or norm drastically changes the absolute values of the results due to
changes in the gammatone package. Please make sure to check the correlation between the
Matlab implementation and this implementation before activating either the normalized or fast SRMR.
>>> import paderbox as pb
>>> a = pb.testing.testfile_fetcher.get_file_path('speech_bab_0dB.wav')
>>> a = pb.io.load_audio(a)
>>> srmr(a, 16000) # doctest: +ELLIPSIS
1.8659610077...
>>> srmr([a, a], 16000)
array([1.86596101, 1.86596101])
"""
try:
import srmrpy
except ImportError:
raise AssertionError(
'To use this srmr implementation, install the SRMRpy package from\n'
'https://github.com/jfsantos/SRMRpy\n'
)
signal = np.asarray(signal)
if signal.ndim >= 2:
for i in range(signal.ndim-1):
assert signal.shape[i] < 30, (i, signal.shape) # NOQA
srmrs = []
for i in np.ndindex(*signal.shape[:-1]):
# TODO: Add option to also return the SRMR per gammatone filterbank (typically unused in evaluations)
srmrs.append(srmrpy.srmr(signal[i], sample_rate, n_cochlear_filters=n_cochlear_filters, low_freq=low_freq,
min_cf=min_cf, max_cf=max_cf, fast=fast, norm=norm)[0])
return np.array(srmrs).reshape(signal.shape[:-1])
elif signal.ndim == 1:
# TODO: Add option to also return the SRMR per gammatone filterbank (typically unused in evaluations)
return srmrpy.srmr(signal, sample_rate, n_cochlear_filters=n_cochlear_filters, low_freq=low_freq,
min_cf=min_cf, max_cf=max_cf, fast=fast, norm=norm)[0]
else:
raise NotImplementedError(signal.ndim)
|
tests/test_wstrust_response.py | Yoni-Mantzur/azure-activedirectory-library-for-python | 258 | 11070285 | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import unittest
import os
import six
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
from adal.constants import XmlNamespaces, Errors, WSTrustVersion
from adal.wstrust_response import WSTrustResponse
from adal.wstrust_response import findall_content
_namespaces = XmlNamespaces.namespaces
_call_context = {'log_context' : {'correlation-id':'test-corr-id'}}
class Test_wstrustresponse(unittest.TestCase):
def test_parse_error_happy_path(self):
errorResponse = '''
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://www.w3.org/2005/08/addressing" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<s:Header>
<a:Action s:mustUnderstand="1">http://www.w3.org/2005/08/addressing/soap/fault</a:Action>
<o:Security s:mustUnderstand="1" xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
<u:Timestamp u:Id="_0">
<u:Created>2013-07-30T00:32:21.989Z</u:Created>
<u:Expires>2013-07-30T00:37:21.989Z</u:Expires>
</u:Timestamp>
</o:Security>
</s:Header>
<s:Body>
<s:Fault>
<s:Code>
<s:Value>s:Sender</s:Value>
<s:Subcode>
<s:Value xmlns:a="http://docs.oasis-open.org/ws-sx/ws-trust/200512">a:RequestFailed</s:Value>
</s:Subcode>
</s:Code>
<s:Reason>
<s:Text xml:lang="en-US">MSIS3127: The specified request failed.</s:Text>
</s:Reason>
</s:Fault>
</s:Body>
</s:Envelope>'''
wstrustResponse = WSTrustResponse(_call_context, errorResponse, WSTrustVersion.WSTRUST13)
exception_text = "Server returned error in RSTR - ErrorCode: RequestFailed : FaultMessage: MSIS3127: The specified request failed"
with six.assertRaisesRegex(self, Exception, exception_text) as cm:
wstrustResponse.parse()
def test_token_parsing_happy_path(self):
wstrustFile = open(os.path.join(os.getcwd(), 'tests', 'wstrust', 'RSTR.xml'))
wstrustResponse = WSTrustResponse(_call_context, wstrustFile.read(), WSTrustVersion.WSTRUST13)
wstrustResponse.parse()
wstrustFile.close()
self.assertEqual(wstrustResponse.token_type, 'urn:oasis:names:tc:SAML:1.0:assertion', 'TokenType did not match expected value: ' + wstrustResponse.token_type)
attribute_values = ET.fromstring(wstrustResponse.token).findall('saml:AttributeStatement/saml:Attribute/saml:AttributeValue', _namespaces)
self.assertEqual(2, len(attribute_values))
self.assertEqual('1TIu064jGEmmf+hnI+F0Jg==', attribute_values[1].text)
def test_rstr_none(self):
with six.assertRaisesRegex(self, Exception, 'Received empty RSTR response body.') as cm:
wstrustResponse = WSTrustResponse(_call_context, None, WSTrustVersion.WSTRUST13)
wstrustResponse.parse()
def test_rstr_empty_string(self):
with six.assertRaisesRegex(self, Exception, 'Received empty RSTR response body.') as cm:
wstrustResponse = WSTrustResponse(_call_context, '', WSTrustVersion.WSTRUST13)
wstrustResponse.parse()
def test_rstr_unparseable_xml(self):
with six.assertRaisesRegex(self, Exception, 'Failed to parse RSTR in to DOM'):
wstrustResponse = WSTrustResponse(_call_context, '<This is not parseable as an RSTR', WSTrustVersion.WSTRUST13)
wstrustResponse.parse()
def test_findall_content_with_comparison(self):
content = """
<saml:Assertion xmlns:saml="SAML:assertion">
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
foo
</ds:Signature>
</saml:Assertion>"""
sample = ('<ns0:Wrapper xmlns:ns0="namespace0">'
+ content
+ '</ns0:Wrapper>')
# Demonstrating how XML-based parser won't give you the raw content as-is
element = ET.fromstring(sample).findall('{SAML:assertion}Assertion')[0]
assertion_via_xml_parser = ET.tostring(element)
self.assertNotEqual(content, assertion_via_xml_parser)
self.assertNotIn(b"<ds:Signature>", assertion_via_xml_parser)
# The findall_content() helper, based on Regex, will return content as-is.
self.assertEqual([content], findall_content(sample, "Wrapper"))
def test_findall_content_for_real(self):
with open(os.path.join(os.getcwd(), 'tests', 'wstrust', 'RSTR.xml')) as f:
rstr = f.read()
wstrustResponse = WSTrustResponse(_call_context, rstr, WSTrustVersion.WSTRUST13)
wstrustResponse.parse()
self.assertIn("<X509Data>", rstr)
self.assertIn(b"<X509Data>", wstrustResponse.token) # It is in bytes
if __name__ == '__main__':
unittest.main()
|
py/abd/abdmail_mailer.py | aevri/phabricator-tools | 150 | 11070287 | """Send mails to interested parties about pre-specified conditions."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdmail_mailer
#
# Public Classes:
# Mailer
# .noUsersOnBranch
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import types
class Mailer(object):
"""Send mails to interested parties about pre-specified conditions."""
def __init__(self, mail_sender, admin_emails, repository_name, uri):
"""Initialise, simply store the supplied parameters.
:mail_sender: supports send(to, cc, subject, message)
:admin_emails: list of string, who to tell when no appropriate users
:repository_name: the repository that is in context
:uri: the address of the phabricator instance
"""
self._mail_sender = mail_sender
assert not isinstance(
admin_emails, types.StringTypes), "should be list not string"
self._admin_emails = admin_emails
self._repository_name = repository_name
self._uri = uri
def noUsersOnBranch(self, branch_name, branch_base, emails):
# TODO: determine which of 'emails' we're permitted to send to
msg = textwrap.dedent("""\
No registered Phabricator users were found when
trying to create a review from a branch.
repository: {repo}
branch: {branch}
base branch: {base_branch}
unknown emails: {emails}
If you appear in the 'unknown emails' list then
please register by visiting this link, simply
logging in and registering your email address will
resolve the issue:
{uri}
You are receiving this message because you are
either in the unknown email list or an admin.
If you want to / have to use a different email address
to register with Phabricator then you will need to
ensure the latest commit on your branch uses the
correct email address.
You can view your email address like so:
$ git config --global user.email
and set it like so:
$ git config --global user.email "name@server.<EMAIL>"
If you only want to change your email address
for the git repo you are currently in, then
drop the '--global' bit:
$ git config user.email "<EMAIL>"
You should push the branches again but with
a commit that use the right email address.
The no-fuss way to do this is the following:
$ git checkout {branch}
$ git commit --reuse-message=HEAD --reset-author --allow-empty
$ git push origin {branch}
This will copy the message from the last commit
on the branch and create a new, empty commit
with the new authorship information.
""").format(
repo=self._repository_name,
branch=branch_name,
base_branch=branch_base,
emails=str(emails),
uri=self._uri
)
to = []
to.extend(self._admin_emails)
to.extend(emails)
self._mail_sender.send(
to_addresses=to,
subject="user exception",
message=msg)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 <NAME> L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
pyBN/utils/_tests/test_markov_blanket.py | seuzmj/pyBN | 126 | 11070293 | """
**************
Markov Blanket
Unit Test
**************
"""
__author__ = """<NAME> <<EMAIL>>"""
import unittest
import os
from os.path import dirname
import numpy as np
import pandas as pd
from pyBN.readwrite.read import read_bn
from pyBN.independence.markov_blanket import markov_blanket
class ConstraintTestsTestCase(unittest.TestCase):
def setUp(self):
self.dpath = os.path.join(dirname(dirname(dirname(dirname(__file__)))),'data')
self.bn = read_bn(os.path.join(self.dpath,'cmu.bn'))
def tearDown(self):
pass
def test_markov_blanket(self):
self.assertDictEqual(markov_blanket(self.bn),
{'Alarm': ['Earthquake', 'Burglary', 'JohnCalls', 'MaryCalls'],
'Burglary': ['Alarm', 'Earthquake'],
'Earthquake': ['Alarm', 'Burglary'],
'JohnCalls': ['Alarm'],
'MaryCalls': ['Alarm']}) |
pyimgsaliency/__init__.py | yhenon/pyImSaliency | 165 | 11070304 | <reponame>yhenon/pyImSaliency
from .saliency import *
from .binarise import *
from .saliency_mbd import *
from .evaluate import *
|
chainer_chemistry/dataset/preprocessors/mol_preprocessor.py | pfnet/chainerchem | 184 | 11070320 | from rdkit import Chem
from chainer_chemistry.dataset.preprocessors.base_preprocessor import BasePreprocessor # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset # NOQA
class MolPreprocessor(BasePreprocessor):
"""preprocessor class specified for rdkit mol instance
Args:
add_Hs (bool): If True, implicit Hs are added.
kekulize (bool): If True, Kekulizes the molecule.
"""
def __init__(self, add_Hs=False, kekulize=False):
super(MolPreprocessor, self).__init__()
self.add_Hs = add_Hs
self.kekulize = kekulize
def prepare_smiles_and_mol(self, mol):
"""Prepare `smiles` and `mol` used in following preprocessing.
This method is called before `get_input_features` is called, by parser
class.
This method may be overriden to support custom `smile`/`mol` extraction
Args:
mol (mol): mol instance
Returns (tuple): (`smiles`, `mol`)
"""
# Note that smiles expression is not unique.
# we obtain canonical smiles which is unique in `mol`
canonical_smiles = Chem.MolToSmiles(mol, isomericSmiles=False,
canonical=True)
mol = Chem.MolFromSmiles(canonical_smiles)
if self.add_Hs:
mol = Chem.AddHs(mol)
if self.kekulize:
Chem.Kekulize(mol)
return canonical_smiles, mol
def get_label(self, mol, label_names=None):
"""Extracts label information from a molecule.
This method extracts properties whose keys are
specified by ``label_names`` from a molecule ``mol``
and returns these values as a list.
The order of the values is same as that of ``label_names``.
If the molecule does not have a
property with some label, this function fills the corresponding
index of the returned list with ``None``.
Args:
mol (rdkit.Chem.Mol): molecule whose features to be extracted
label_names (None or iterable): list of label names.
Returns:
list of str: label information. Its length is equal to
that of ``label_names``. If ``label_names`` is ``None``,
this function returns an empty list.
"""
if label_names is None:
return []
label_list = []
for label_name in label_names:
if mol.HasProp(label_name):
label_list.append(mol.GetProp(label_name))
else:
label_list.append(None)
# TODO(Nakago): Review implementation
# Label -1 work in case of classification.
# However in regression, assign -1 is not a good strategy...
# label_list.append(-1)
# Failed to GetProp for label, skip this case.
# print('no label')
# raise MolFeatureExtractionError
return label_list
def get_input_features(self, mol):
"""get molecule's feature representation, descriptor.
Each subclass must override this method.
Args:
mol (Mol): molecule whose feature to be extracted.
`mol` is prepared by the method `prepare_smiles_and_mol`.
"""
raise NotImplementedError
def create_dataset(self, *args, **kwargs):
return NumpyTupleDataset(*args)
def process(self, filepath):
# Not used now...
pass
|
projects_oss/detr/detr/__init__.py | sstsai-adl/d2go | 687 | 11070325 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from . import models, util, datasets
__all__ = ["models", "util", "datasets"]
|
tools/setup.py | mrjrty/rpaframework | 518 | 11070329 | #!/usr/bin/env python3
import os
import sys
poetry_path = os.path.expanduser('~/.poetry/lib')
sys.path.append(os.path.realpath(poetry_path))
try:
from poetry.core.masonry.builders.sdist import SdistBuilder
except ImportError:
from poetry.masonry.builders.sdist import SdistBuilder
from poetry.factory import Factory
poetry = Factory().create_poetry(os.getcwd())
builder = SdistBuilder(poetry, None, None)
setup = builder.build_setup()
with open('setup.py', 'wb') as fd:
fd.write(setup)
|
all_ensemble_models/mean_model.py | LuChungYing/yt8m-ensenble-model | 196 | 11070347 | <reponame>LuChungYing/yt8m-ensenble-model
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
class MeanModel(models.BaseModel):
"""Mean model."""
def create_model(self, model_input, **unused_params):
"""Creates a logistic model.
model_input: 'batch' x 'num_features' x 'num_methods' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
output = tf.reduce_mean(model_input, axis=2)
return {"predictions": output}
|
akshare/economic/cons.py | J-Z-Z/akshare | 721 | 11070354 | <filename>akshare/economic/cons.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2019/10/21 21:11
Desc: 宏观经济配置文件
"""
# bitcoin
bitcoin_url = "https://datacenter-api.jin10.com/crypto_currency/list"
bitcoin_payload = {
"_": "1572857040275",
}
bitcoin_headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_bitcoin_current",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36"
}
# urls-china
JS_CHINA_CPI_YEARLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_cpi_yoy_all.js?v={}&_={}"
)
JS_CHINA_CPI_MONTHLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_cpi_mom_all.js?v={}&_={}"
)
JS_CHINA_M2_YEARLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_m2_money_supply_yoy_all.js?v={}&_={}"
)
JS_CHINA_PPI_YEARLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_ppi_yoy_all.js?v={}&_={}"
)
JS_CHINA_PMI_YEARLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_manufacturing_pmi_all.js?v={}&_={}"
)
JS_CHINA_GDP_YEARLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_gdp_yoy_all.js?v={}&_={}"
)
JS_CHINA_CX_PMI_YEARLY_URL = "https://cdn.jin10.com/dc/reports/dc_chinese_caixin_manufacturing_pmi_all.js?v={}&_={}"
JS_CHINA_CX_SERVICE_PMI_YEARLY_URL = "https://cdn.jin10.com/dc/reports/dc_chinese_caixin_services_pmi_all.js?v={}&_={}"
JS_CHINA_FX_RESERVES_YEARLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_fx_reserves_all.js?v={}&_={}"
)
JS_CHINA_ENERGY_DAILY_URL = (
"https://cdn.jin10.com/dc/reports/dc_qihuo_energy_report_all.js?v={}&_={}"
)
JS_CHINA_NON_MAN_PMI_MONTHLY_URL = (
"https://cdn.jin10.com/dc/reports/dc_chinese_non_manufacturing_pmi_all.js?v={}&_={}"
)
JS_CHINA_RMB_DAILY_URL = "https://cdn.jin10.com/dc/reports/dc_rmb_data_all.js?v={}&_={}"
JS_CHINA_MARKET_MARGIN_SZ_URL = "https://cdn.jin10.com/dc/reports/dc_market_margin_sz_all.js?v={}&_={}"
JS_CHINA_MARKET_MARGIN_SH_URL = "https://cdn.jin10.com/dc/reports/dc_market_margin_sse_all.js?v={}&_={}"
JS_CHINA_REPORT_URL = "https://cdn.jin10.com/dc/reports/dc_sge_report_all.js?v={}&_={}"
# urls-usa
JS_USA_INTEREST_RATE_URL = (
"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={}&_={}"
)
JS_USA_NON_FARM_URL = (
"https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v={}&_={}"
)
JS_USA_UNEMPLOYMENT_RATE_URL = (
"https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js??v={}&_={}"
)
JS_USA_EIA_CRUDE_URL = (
"https://cdn.jin10.com/dc/reports/dc_eia_crude_oil_all.js?v={}&_={}"
)
JS_USA_INITIAL_JOBLESS_URL = (
"https://cdn.jin10.com/dc/reports/dc_initial_jobless_all.js?v={}&_={}"
)
JS_USA_CORE_PCE_PRICE_URL = (
"https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v={}&_={}"
)
JS_USA_CPI_MONTHLY_URL = "https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v={}&_={}"
JS_USA_LMCI_URL = "https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v={}&_={}"
JS_USA_ADP_NONFARM_URL = (
"https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v={}&_={}"
)
JS_USA_GDP_MONTHLY_URL = "https://cdn.jin10.com/dc/reports/dc_usa_gdp_all.js?v={}&_={}"
JS_USA_EIA_CRUDE_PRODUCE_URL = (
"https://cdn.jin10.com/dc/reports/dc_eia_crude_oil_produce_all.js?v={}&_={}"
)
# urls-euro
JS_EURO_RATE_DECISION_URL = (
"https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v={}&_={}"
)
# urls-constitute
JS_CONS_GOLD_ETF_URL = "https://cdn.jin10.com/dc/reports/dc_etf_gold_all.js?v={}&_={}"
JS_CONS_SLIVER_ETF_URL = (
"https://cdn.jin10.com/dc/reports/dc_etf_sliver_all.js?v={}&_={}"
)
JS_CONS_OPEC_URL = "https://cdn.jin10.com/dc/reports/dc_opec_report_all.js??v={}&_={}"
usa_name_url_map = {
"美联储决议报告": "//datacenter.jin10.com/reportType/dc_usa_interest_rate_decision",
"美国非农就业人数报告": "//datacenter.jin10.com/reportType/dc_nonfarm_payrolls",
"美国失业率报告": "//datacenter.jin10.com/reportType/dc_usa_unemployment_rate",
"美国CPI月率报告": "//datacenter.jin10.com/reportType/dc_usa_cpi",
"美国初请失业金人数报告": "//datacenter.jin10.com/reportType/dc_initial_jobless",
"美国核心PCE物价指数年率报告": "//datacenter.jin10.com/reportType/dc_usa_core_pce_price",
"美国EIA原油库存报告": "//datacenter.jin10.com/reportType/dc_eia_crude_oil",
"美联储劳动力市场状况指数报告": "//datacenter.jin10.com/reportType/dc_usa_lmci",
"美国ADP就业人数报告": "//datacenter.jin10.com/reportType/dc_adp_nonfarm_employment",
"美国国内生产总值(GDP)报告": "//datacenter.jin10.com/reportType/dc_usa_gdp",
"美国原油产量报告": "//datacenter.jin10.com/reportType/dc_eia_crude_oil_produce",
"美国零售销售月率报告": "//datacenter.jin10.com/reportType/dc_usa_retail_sales",
"美国商品期货交易委员会CFTC外汇类非商业持仓报告": "//datacenter.jin10.com/reportType/dc_cftc_nc_report",
"美国NFIB小型企业信心指数报告": "//datacenter.jin10.com/reportType/dc_usa_nfib_small_business",
"贝克休斯钻井报告": "//datacenter.jin10.com/reportType/dc_rig_count_summary",
"美国谘商会消费者信心指数报告": "//datacenter.jin10.com/reportType/dc_usa_cb_consumer_confidence",
"美国FHFA房价指数月率报告": "//datacenter.jin10.com/reportType/dc_usa_house_price_index",
"美国个人支出月率报告": "//datacenter.jin10.com/reportType/dc_usa_personal_spending",
"美国生产者物价指数(PPI)报告": "//datacenter.jin10.com/reportType/dc_usa_ppi",
"美国成屋销售总数年化报告": "//datacenter.jin10.com/reportType/dc_usa_exist_home_sales",
"美国成屋签约销售指数月率报告": "//datacenter.jin10.com/reportType/dc_usa_pending_home_sales",
"美国S&P/CS20座大城市房价指数年率报告": "//datacenter.jin10.com/reportType/dc_usa_spcs20",
"美国进口物价指数报告": "//datacenter.jin10.com/reportType/dc_usa_import_price",
"美国营建许可总数报告": "//datacenter.jin10.com/reportType/dc_usa_building_permits",
"美国商品期货交易委员会CFTC商品类非商业持仓报告": "//datacenter.jin10.com/reportType/dc_cftc_c_report",
"美国挑战者企业裁员人数报告": "//datacenter.jin10.com/reportType/dc_usa_job_cuts",
"美国实际个人消费支出季率初值报告": "//datacenter.jin10.com/reportType/dc_usa_real_consumer_spending",
"美国贸易帐报告": "//datacenter.jin10.com/reportType/dc_usa_trade_balance",
"美国经常帐报告": "//datacenter.jin10.com/reportType/dc_usa_current_account",
"美国API原油库存报告": "//datacenter.jin10.com/reportType/dc_usa_api_crude_stock",
"美国工业产出月率报告": "//datacenter.jin10.com/reportType/dc_usa_industrial_production",
"美国耐用品订单月率报告": "//datacenter.jin10.com/reportType/dc_usa_durable_goods_orders",
"美国工厂订单月率报告": "//datacenter.jin10.com/reportType/dc_usa_factory_orders",
"Markit服务业PMI终值": "//datacenter.jin10.com/reportType/dc_usa_services_pmi",
"商业库存月率": "//datacenter.jin10.com/reportType/dc_usa_business_inventories",
"美国ISM非制造业PMI": "//datacenter.jin10.com/reportType/dc_usa_ism_non_pmi",
"NAHB房产市场指数": "//datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index",
"新屋开工总数年化": "//datacenter.jin10.com/reportType/dc_usa_house_starts",
"美国新屋销售总数年化": "//datacenter.jin10.com/reportType/dc_usa_new_home_sales",
"美国Markit制造业PMI初值报告": "//datacenter.jin10.com/reportType/dc_usa_pmi",
"美国ISM制造业PMI报告": "//datacenter.jin10.com/reportType/dc_usa_ism_pmi",
"美国密歇根大学消费者信心指数初值报告": "//datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"美国出口价格指数报告": "//datacenter.jin10.com/reportType/dc_usa_export_price",
"美国核心生产者物价指数(PPI)报告": "//datacenter.jin10.com/reportType/dc_usa_core_ppi",
"美国核心CPI月率报告": "//datacenter.jin10.com/reportType/dc_usa_core_cpi",
"美国EIA俄克拉荷马州库欣原油库存报告": "//datacenter.jin10.com/reportType/dc_eia_cushing_oil",
"美国EIA精炼油库存报告": "//datacenter.jin10.com/reportType/dc_eia_distillates_stocks",
"美国EIA天然气库存报告": "//datacenter.jin10.com/reportType/dc_eia_natural_gas",
"美国EIA汽油库存报告": "//datacenter.jin10.com/reportType/dc_eia_gasoline",
}
china_name_url_map = {
"郑州商品交易所期货每日行情": "//datacenter.jin10.com/reportType/dc_czce_futures_data",
"中国CPI年率报告": "//datacenter.jin10.com/reportType/dc_chinese_cpi_yoy",
"中国PPI年率报告": "//datacenter.jin10.com/reportType/dc_chinese_ppi_yoy",
"中国以美元计算出口年率报告": "//datacenter.jin10.com/reportType/dc_chinese_exports_yoy",
"中国以美元计算进口年率报告": "//datacenter.jin10.com/reportType/dc_chinese_imports_yoy",
"中国以美元计算贸易帐报告": "//datacenter.jin10.com/reportType/dc_chinese_trade_balance",
"中国规模以上工业增加值年率报告": "//datacenter.jin10.com/reportType/dc_chinese_industrial_production_yoy",
"中国官方制造业PMI报告": "//datacenter.jin10.com/reportType/dc_chinese_manufacturing_pmi",
"中国财新制造业PMI终值报告": "//datacenter.jin10.com/reportType/dc_chinese_caixin_manufacturing_pmi",
"中国财新服务业PMI报告": "//datacenter.jin10.com/reportType/dc_chinese_caixin_services_pmi",
"中国外汇储备报告": "//datacenter.jin10.com/reportType/dc_chinese_fx_reserves",
"中国M2货币供应年率报告": "//datacenter.jin10.com/reportType/dc_chinese_m2_money_supply_yoy",
"中国GDP年率报告": "//datacenter.jin10.com/reportType/dc_chinese_gdp_yoy",
"人民币汇率中间价报告": "//datacenter.jin10.com/reportType/dc_rmb_data",
"在岸人民币成交量报告": "//datacenter.jin10.com/reportType/dc_dollar_rmb_report",
"上海期货交易所期货合约行情": "//datacenter.jin10.com/reportType/dc_shfe_futures_data",
"中国CPI月率报告": "//datacenter.jin10.com/reportType/dc_chinese_cpi_mom",
"大连商品交易所期货每日行情": "//datacenter.jin10.com/reportType/dc_dce_futures_data",
"中国金融期货交易所期货每日行情": "//datacenter.jin10.com/reportType/dc_cffex_futures_data",
"同业拆借报告": "//datacenter.jin10.com/reportType/dc_shibor",
"香港同业拆借报告": "//datacenter.jin10.com/reportType/dc_hk_market_info",
"深圳融资融券报告": "//datacenter.jin10.com/reportType/dc_market_margin_sz",
"上海融资融券报告": "//datacenter.jin10.com/reportType/dc_market_margin_sse",
"上海黄金交易所报告": "//datacenter.jin10.com/reportType/dc_sge_report",
"上海期货交易所仓单日报": "//datacenter.jin10.com/reportType/dc_shfe_daily_stock",
"大连商品交易所仓单日报": "//datacenter.jin10.com/reportType/dc_dce_daily_stock",
"郑州商品交易所仓单日报": "//datacenter.jin10.com/reportType/dc_czce_daily_stock",
"上海期货交易所指定交割仓库库存周报": "//datacenter.jin10.com/reportType/dc_shfe_weekly_stock",
"CCI指数5500大卡动力煤价格报告": "//datacenter.jin10.com/reportType/dc_cci_report",
"沿海六大电厂库存动态报告": "//datacenter.jin10.com/reportType/dc_qihuo_energy_report",
"国内期货市场实施热度报告": "//datacenter.jin10.com/reportType/dc_futures_market_realtime",
"中国官方非制造业PMI报告": "//datacenter.jin10.com/reportType/dc_chinese_non_manufacturing_pmi",
}
euro_name_url_map = {
"欧元区未季调贸易帐报告": "//datacenter.jin10.com/reportType/dc_eurozone_trade_balance_mom",
"欧元区季度GDP年率报告": "//datacenter.jin10.com/reportType/dc_eurozone_gdp_yoy",
"欧元区CPI年率报告": "//datacenter.jin10.com/reportType/dc_eurozone_cpi_yoy",
"欧元区PPI月率报告": "//datacenter.jin10.com/reportType/dc_eurozone_ppi_mom",
"欧元区零售销售月率报告": "//datacenter.jin10.com/reportType/dc_eurozone_retail_sales_mom",
"欧元区季调后就业人数季率报告": "//datacenter.jin10.com/reportType/dc_eurozone_employment_change_qoq",
"欧元区失业率报告": "//datacenter.jin10.com/reportType/dc_eurozone_unemployment_rate_mom",
"欧元区CPI月率报告": "//datacenter.jin10.com/reportType/dc_eurozone_cpi_mom",
"欧元区经常帐报告": "//datacenter.jin10.com/reportType/dc_eurozone_current_account_mom",
"欧元区工业产出月率报告": "//datacenter.jin10.com/reportType/dc_eurozone_industrial_production_mom",
"欧元区制造业PMI初值报告": "//datacenter.jin10.com/reportType/dc_eurozone_manufacturing_pmi",
"欧元区服务业PMI终值报告": "//datacenter.jin10.com/reportType/dc_eurozone_services_pmi",
"欧元区ZEW经济景气指数报告": "//datacenter.jin10.com/reportType/dc_eurozone_zew_economic_sentiment",
"欧元区Sentix投资者信心指数报告": "//datacenter.jin10.com/reportType/dc_eurozone_sentix_investor_confidence",
}
world_central_bank_map = {
"美联储决议报告": "//datacenter.jin10.com/reportType/dc_usa_interest_rate_decision",
"欧洲央行决议报告": "//datacenter.jin10.com/reportType/dc_interest_rate_decision",
"新西兰联储决议报告": "//datacenter.jin10.com/reportType/dc_newzealand_interest_rate_decision",
"中国央行决议报告": "//datacenter.jin10.com/reportType/dc_china_interest_rate_decision",
"瑞士央行决议报告": "//datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision",
"英国央行决议报告": "//datacenter.jin10.com/reportType/dc_english_interest_rate_decision",
"澳洲联储决议报告": "//datacenter.jin10.com/reportType/dc_australia_interest_rate_decision",
"日本央行决议报告": "//datacenter.jin10.com/reportType/dc_japan_interest_rate_decision",
"印度央行决议报告": "//datacenter.jin10.com/reportType/dc_india_interest_rate_decision",
"俄罗斯央行决议报告": "//datacenter.jin10.com/reportType/dc_russia_interest_rate_decision",
"巴西央行决议报告": "//datacenter.jin10.com/reportType/dc_brazil_interest_rate_decision",
}
constitute_report_map = {
"全球最大黄金ETF—SPDR Gold Trust持仓报告": "//datacenter.jin10.com/reportType/dc_etf_gold",
"全球最大白银ETF--iShares Silver Trust持仓报告": "//datacenter.jin10.com/reportType/dc_etf_sliver",
"芝加哥商业交易所(CME)能源类商品成交量报告": "//datacenter.jin10.com/reportType/dc_cme_energy_report",
"美国商品期货交易委员会CFTC外汇类非商业持仓报告": "//datacenter.jin10.com/reportType/dc_cftc_nc_report",
"美国商品期货交易委员会CFTC商品类非商业持仓报告": "//datacenter.jin10.com/reportType/dc_cftc_c_report",
"芝加哥商业交易所(CME)金属类商品成交量报告": "//datacenter.jin10.com/reportType/dc_cme_report",
"芝加哥商业交易所(CME)外汇类商品成交量报告": "//datacenter.jin10.com/reportType/dc_cme_fx_report",
"伦敦金属交易所(LME)库存报告": "//datacenter.jin10.com/reportType/dc_lme_report",
"伦敦金属交易所(LME)持仓报告": "//datacenter.jin10.com/reportType/dc_lme_traders_report",
"美国商品期货交易委员会CFTC商品类商业持仓报告": "//datacenter.jin10.com/reportType/dc_cftc_merchant_goods",
"美国商品期货交易委员会CFTC外汇类商业持仓报告": "//datacenter.jin10.com/reportType/dc_cftc_merchant_currency",
}
other_map = {
"投机情绪报告": "//datacenter.jin10.com/reportType/dc_ssi_trends",
"外汇实时波动监控": "//datacenter.jin10.com/reportType/dc_myFxBook_heat_map",
"外汇相关性报告": "//datacenter.jin10.com/reportType/dc_myFxBook_correlation",
"加密货币实时行情": "//datacenter.jin10.com/reportType/dc_bitcoin_current",
}
main_map = {
"全球最大黄金ETF—SPDR Gold Trust持仓报告": "//datacenter.jin10.com/reportType/dc_etf_gold",
"全球最大白银ETF--iShares Silver Trust持仓报告": "//datacenter.jin10.com/reportType/dc_etf_sliver",
"美国非农就业人数报告": "//datacenter.jin10.com/reportType/dc_nonfarm_payrolls",
"投机情绪报告": "//datacenter.jin10.com/reportType/dc_ssi_trends",
"数据达人 — 复合报告": "//datacenter.jin10.com/reportType/dc_complex_report?complexType=1",
"投行订单": "//datacenter.jin10.com/banks_orders",
"行情报价": "//datacenter.jin10.com/price_wall",
"美国EIA原油库存报告": "//datacenter.jin10.com/reportType/dc_eia_crude_oil",
"欧佩克报告": "//datacenter.jin10.com/reportType/dc_opec_report",
}
|
test/run/t69.py | timmartin/skulpt | 2,671 | 11070381 | <reponame>timmartin/skulpt<gh_stars>1000+
x = {1:2}
del x[1]
print len(x)
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/integer_array_casting.py | AlexShypula/CodeGen | 241 | 11070396 | <reponame>AlexShypula/CodeGen<filename>codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/integer_array_casting.py
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_196a45f8932c033f06f6a086488b268404e77353d16c9bc6407a417f237da6db(
unittest.TestCase
):
def test3(self):
int0 = f_filled(None)
assert 0 == int0
if __name__ == "__main__":
unittest.main()
|
loss/robust_loss_pytorch/tests/distribution_test.py | milesgray/CALAE | 522 | 11070409 | <filename>loss/robust_loss_pytorch/tests/distribution_test.py
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import scipy.stats
import torch
from robust_loss_pytorch import distribution
class TestDistribution(parameterized.TestCase):
def setUp(self):
self._distribution = distribution.Distribution()
super(TestDistribution, self).setUp()
torch.manual_seed(0)
np.random.seed(0)
def testSplineCurveIsC1Smooth(self):
"""Tests that partition_spline_curve() and its derivative are continuous."""
x1 = np.linspace(0., 8., 10000, dtype=np.float64)
x2 = x1 + 1e-7
var_x1 = torch.autograd.Variable(torch.tensor(x1), requires_grad=True)
y1 = distribution.partition_spline_curve(var_x1)
sum_y1 = torch.sum(y1)
sum_y1.backward()
dy1 = var_x1.grad.detach().numpy()
y1 = y1.detach().numpy()
var_x2 = torch.autograd.Variable(torch.tensor(x2), requires_grad=True)
y2 = distribution.partition_spline_curve(var_x2)
sum_y2 = torch.sum(y2)
sum_y2.backward()
dy2 = var_x2.grad.detach().numpy()
y2 = y2.detach().numpy()
np.testing.assert_allclose(y1, y2, atol=1e-6, rtol=1e-6)
np.testing.assert_allclose(dy1, dy2, atol=1e-6, rtol=1e-6)
def testAnalyaticalPartitionIsCorrect(self):
"""Tests _analytical_base_partition_function against some golden data."""
# Here we enumerate a set of positive rational numbers n/d alongside
# numerically approximated values of Z(n / d) up to 10 digits of precision,
# stored as (n, d, Z(n/d)). This was generated with an external mathematica
# script.
ground_truth_rational_partitions = (
(1, 7, 4.080330073), (1, 6, 4.038544331), (1, 5, 3.984791180),
(1, 4, 3.912448576), (1, 3, 3.808203509), (2, 5, 3.735479786),
(3, 7, 3.706553276), (1, 2, 3.638993131), (3, 5, 3.553489270),
(2, 3, 3.501024540), (3, 4, 3.439385624), (4, 5, 3.404121259),
(1, 1, 3.272306973), (6, 5, 3.149249092), (5, 4, 3.119044506),
(4, 3, 3.068687433), (7, 5, 3.028084866), (3, 2, 2.965924889),
(8, 5, 2.901059987), (5, 3, 2.855391798), (7, 4, 2.794052016),
(7, 3, 2.260434598), (5, 2, 2.218882601), (8, 3, 2.190349858),
(3, 1, 2.153202857), (4, 1, 2.101960916), (7, 2, 2.121140098),
(5, 1, 2.080000512), (9, 2, 2.089161164), (6, 1, 2.067751267),
(7, 1, 2.059929623), (8, 1, 2.054500222), (10, 3, 2.129863884),
(11, 3, 2.113763384), (13, 3, 2.092928254), (14, 3, 2.085788350),
(16, 3, 2.075212740), (11, 2, 2.073116001), (17, 3, 2.071185791),
(13, 2, 2.063452243), (15, 2, 2.056990258)) # pyformat: disable
for numer, denom, z_true in ground_truth_rational_partitions:
z = distribution.analytical_base_partition_function(numer, denom)
np.testing.assert_allclose(z, z_true, atol=1e-9, rtol=1e-9)
def testSplineCurveInverseIsCorrect(self):
"""Tests that the inverse curve is indeed the inverse of the curve."""
x_knot = np.arange(0, 16, 0.01, dtype=np.float64)
alpha = distribution.inv_partition_spline_curve(x_knot)
x_recon = distribution.partition_spline_curve(alpha)
np.testing.assert_allclose(x_recon, x_knot)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testLogPartitionInfinityIsAccurate(self, float_dtype):
"""Tests that the partition function is accurate at infinity."""
alpha = float_dtype(float('inf'))
log_z_true = np.float64(0.70526025442) # From mathematica.
log_z = self._distribution.log_base_partition_function(alpha)
np.testing.assert_allclose(log_z, log_z_true, atol=1e-7, rtol=1e-7)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testLogPartitionFractionsAreAccurate(self, float_dtype):
"""Test that the partition function is correct for [0/11, ... 22/11]."""
numers = range(0, 23)
denom = 11
log_zs_true = [
np.log(distribution.analytical_base_partition_function(n, denom))
for n in numers
]
log_zs = self._distribution.log_base_partition_function(
float_dtype(np.array(numers)) / float_dtype(denom))
np.testing.assert_allclose(log_zs, log_zs_true, atol=1e-7, rtol=1e-7)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testAlphaZeroSamplesMatchACauchyDistribution(self, float_dtype):
"""Tests that samples when alpha=0 match a Cauchy distribution."""
num_samples = 16384
scale = float_dtype(1.7)
samples = self._distribution.draw_samples(
np.zeros(num_samples, dtype=float_dtype),
scale * np.ones(num_samples, dtype=float_dtype))
# Perform the Kolmogorov-Smirnov test against a Cauchy distribution.
ks_statistic = scipy.stats.kstest(samples, 'cauchy',
(0., scale * np.sqrt(2.))).statistic
np.testing.assert_(ks_statistic < 0.01)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testAlphaTwoSamplesMatchANormalDistribution(self, float_dtype):
"""Tests that samples when alpha=2 match a normal distribution."""
num_samples = 16384
scale = float_dtype(1.7)
samples = self._distribution.draw_samples(
2. * np.ones(num_samples, dtype=float_dtype),
scale * np.ones(num_samples, dtype=float_dtype))
# Perform the Kolmogorov-Smirnov test against a normal distribution.
ks_statistic = scipy.stats.kstest(samples, 'norm', (0., scale)).statistic
np.testing.assert_(ks_statistic < 0.01)
@parameterized.named_parameters(
('SingleCPU', np.float32, 'cpu'), ('DoubleCPU', np.float64, 'cpu'),
('SingleGPU', np.float32, 'cuda'), ('DoubleGPU', np.float64, 'cuda'))
def testAlphaZeroNllsMatchACauchyDistribution(self, float_dtype, device):
"""Tests that NLLs when alpha=0 match a Cauchy distribution."""
x = np.linspace(-10., 10, 1000, dtype=float_dtype)
scale = np.array(1.7, float_dtype)
alpha = np.array(0., float_dtype)
nll = self._distribution.nllfun(
torch.tensor(x, device=device), torch.tensor(alpha, device=device),
torch.tensor(scale, device=device)).cpu().detach()
nll_true = -scipy.stats.cauchy(0., scale * np.sqrt(2.)).logpdf(x)
np.testing.assert_allclose(nll, nll_true, atol=1e-6, rtol=1e-6)
@parameterized.named_parameters(
('SingleCPU', np.float32, 'cpu'), ('DoubleCPU', np.float64, 'cpu'),
('SingleGPU', np.float32, 'cuda'), ('DoubleGPU', np.float64, 'cuda'))
def testAlphaTwoNllsMatchANormalDistribution(self, float_dtype, device):
"""Tests that NLLs when alpha=2 match a normal distribution."""
x = np.linspace(-10., 10, 1000, dtype=float_dtype)
scale = np.array(1.7, float_dtype)
alpha = np.array(2., float_dtype)
nll = self._distribution.nllfun(
torch.tensor(x, device=device), torch.tensor(alpha, device=device),
torch.tensor(scale, device=device)).cpu().detach()
nll_true = -scipy.stats.norm(0., scale).logpdf(x)
np.testing.assert_allclose(nll, nll_true, atol=1e-6, rtol=1e-6)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testPdfIntegratesToOne(self, float_dtype):
"""Tests that the PDF integrates to 1 for different alphas."""
alphas = np.exp(np.linspace(-4., 8., 8, dtype=float_dtype))
scale = float_dtype(1.7)
x = np.arange(-128., 128., 1 / 256., dtype=float_dtype) * scale
for alpha in alphas:
nll = self._distribution.nllfun(x, np.array(alpha),
np.array(scale)).detach().numpy()
pdf_sum = np.sum(np.exp(-nll)) * (x[1] - x[0])
np.testing.assert_allclose(pdf_sum, 1., atol=0.005, rtol=0.005)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testNllfunPreservesDtype(self, float_dtype):
"""Checks that the loss's output has the same precision as its input."""
n = 16
x = float_dtype(np.random.normal(size=n))
alpha = float_dtype(np.exp(np.random.normal(size=n)))
scale = float_dtype(np.exp(np.random.normal(size=n)))
y = self._distribution.nllfun(x, alpha, scale).detach().numpy()
np.testing.assert_(y.dtype, float_dtype)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testSamplingPreservesDtype(self, float_dtype):
"""Checks that sampling's output has the same precision as its input."""
n = 16
alpha = float_dtype(np.exp(np.random.normal(size=n)))
scale = float_dtype(np.exp(np.random.normal(size=n)))
y = self._distribution.draw_samples(alpha, scale).detach().numpy()
np.testing.assert_(y.dtype, float_dtype)
if __name__ == '__main__':
np.testing.run_module_suite()
|
src/einsteinpy/metric/kerrnewman.py | QMrpy/einsteinpy | 485 | 11070417 | import numpy as np
from astropy import units as u
from einsteinpy import constant
from einsteinpy.metric import BaseMetric
from einsteinpy.utils import CoordinateError
_c = constant.c.value
_G = constant.G.value
_Cc = constant.coulombs_const.value
class KerrNewman(BaseMetric):
"""
Class for defining Kerr-Newman Goemetry
"""
@u.quantity_input(M=u.kg, a=u.one, Q=u.C, q=u.C / u.kg)
def __init__(self, coords, M, a, Q, q=0.0 * u.C / u.kg):
"""
Constructor
Parameters
----------
coords : ~einsteinpy.coordinates.differential.*
Coordinate system, in which Metric is to be represented
M : ~astropy.units.quantity.Quantity
Mass of gravitating body, e.g. Black Hole
a : ~astropy.units.quantity.Quantity
Spin Parameter
Q : ~astropy.units.quantity.Quantity
Charge on gravitating body, e.g. Black Hole
q : ~astropy.units.quantity.Quantity, optional
Charge, per unit mass, of the test particle
Defaults to ``0 C / kg``
"""
super().__init__(
coords=coords,
M=M,
a=a,
Q=Q,
name="Kerr-Newman Metric",
metric_cov=self.metric_covariant,
christoffels=self._christoffels,
f_vec=self._f_vec,
)
self.q = q
# Precomputed list of tuples, containing indices \
# of non-zero Christoffel Symbols for Kerr-Newman Metric \
# in Boyer-Lindquist Coordinates
self._nonzero_christoffels_list_bl = [
(0, 0, 1),
(0, 0, 2),
(0, 1, 3),
(0, 2, 3),
(0, 1, 0),
(0, 2, 0),
(0, 3, 1),
(0, 3, 2),
(1, 0, 0),
(1, 1, 1),
(1, 2, 2),
(1, 3, 3),
(2, 0, 0),
(2, 1, 1),
(2, 2, 2),
(2, 3, 3),
(1, 0, 3),
(1, 1, 2),
(2, 0, 3),
(2, 1, 2),
(1, 2, 1),
(1, 3, 0),
(2, 2, 1),
(2, 3, 0),
(3, 0, 1),
(3, 0, 2),
(3, 1, 0),
(3, 1, 3),
(3, 2, 0),
(3, 2, 3),
(3, 3, 1),
(3, 3, 2),
]
def metric_covariant(self, x_vec):
"""
Returns Covariant Kerr-Newman Metric Tensor \
in chosen Coordinates
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Covariant Kerr-Newman Metric Tensor in chosen Coordinates
Numpy array of shape (4,4)
Raises
------
CoordinateError
Raised, if the metric is not available in \
the supplied Coordinate System
"""
if self.coords.system == "BoyerLindquist":
return self._g_cov_bl(x_vec)
raise CoordinateError(
"Kerr-Newman Metric is available only in Boyer-Lindquist Coordinates."
)
def _g_cov_bl(self, x_vec):
"""
Returns Covariant Kerr-Newman Metric Tensor \
in Boyer-Lindquist coordinates
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Covariant Kerr-Newman Metric Tensor \
in Boyer-Lindquist coordinates
Numpy array of shape (4,4)
"""
r, th = x_vec[1], x_vec[2]
M, a = self.M.value, self.a.value
alpha = super().alpha(M, a)
rho2, dl = super().rho(r, th, M, a) ** 2, super().delta(r, M, a)
g_cov_bl = np.zeros(shape=(4, 4), dtype=float)
g_cov_bl[0, 0] = (_c ** 2) * ((dl - ((alpha * np.sin(th)) ** 2)) / (rho2))
g_cov_bl[1, 1] = -rho2 / dl
g_cov_bl[2, 2] = -rho2
g_cov_bl[3, 3] = -(
(np.sin(th) ** 2)
* (((r ** 2 + alpha ** 2) ** 2 - dl * (alpha * np.sin(th)) ** 2) / rho2)
)
g_cov_bl[0, 3] = g_cov_bl[3, 0] = _c * (
(-alpha * (np.sin(th) ** 2) * (dl - (r ** 2) - (alpha ** 2))) / rho2
)
return g_cov_bl
def _dg_dx_bl(self, x_vec):
"""
Returns derivative of each Kerr-Newman Metric component \
w.r.t. coordinates in Boyer-Lindquist Coordinate System
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
dgdx : ~numpy.ndarray
Array, containing derivative of each Kerr-Newman \
Metric component w.r.t. coordinates \
in Boyer-Lindquist Coordinate System
Numpy array of shape (4,4,4)
dgdx[0], dgdx[1], dgdx[2] & dgdx[3] contain \
derivatives of metric w.r.t. t, r, theta & phi respectively
"""
r, th = x_vec[1], x_vec[2]
M, a = self.M.value, self.a.value
alpha = super().alpha(M, a)
rho2, dl = super().rho(r, th, M, a) ** 2, super().delta(r, M, a)
dgdx = np.zeros(shape=(4, 4, 4), dtype=float)
# Metric is invariant on t & phi
# Differentiation of metric wrt r
def due_to_r():
nonlocal dgdx
drh2dr = 2 * r
dddr = 2 * r - self.sch_rad
dgdx[1, 0, 0] = (
(_c ** 2)
* (dddr * rho2 - drh2dr * (dl - (alpha * np.sin(th)) ** 2))
/ (rho2 ** 2)
)
dgdx[1, 1, 1] = (-1 / (dl ** 2)) * (drh2dr * dl - dddr * rho2)
dgdx[1, 2, 2] = -drh2dr
dgdx[1, 3, 3] = ((np.sin(th) / rho2) ** 2) * (
(
(
((alpha * np.sin(th)) ** 2) * dddr
- 4 * (r ** 3)
- 4 * (r * (alpha ** 2))
)
* rho2
)
- (
drh2dr
* (((alpha * np.sin(th)) ** 2) * dl - ((r ** 2 + alpha ** 2) ** 2))
)
)
dgdx[1, 0, 3] = dgdx[1, 3, 0] = (
_c * (-alpha) * (np.sin(th) ** 2) / (rho2 ** 2)
) * ((dddr - 2 * r) * rho2 - drh2dr * (dl - r ** 2 - alpha ** 2))
# Differentiation of metric wrt theta
def due_to_theta():
nonlocal dgdx
drh2dth = -(alpha ** 2) * np.sin(2 * th)
dgdx[2, 0, 0] = (-((_c / rho2) ** 2)) * (
(drh2dth * (dl - ((alpha * np.sin(th)) ** 2)))
+ ((alpha ** 2) * rho2 * np.sin(2 * th))
)
dgdx[2, 1, 1] = -drh2dth / dl
dgdx[2, 2, 2] = -drh2dth
dgdx[2, 3, 3] = (1 / (rho2 ** 2)) * (
(dl * (alpha * np.sin(th)) ** 2)
* (2 * rho2 * np.sin(2 * th) - drh2dth * (np.sin(th)) ** 2)
- (
((r ** 2 + alpha ** 2) ** 2)
* (rho2 * np.sin(2 * th) - drh2dth * (np.sin(th)) ** 2)
)
)
dgdx[2, 0, 3] = dgdx[2, 3, 0] = (
(-alpha * _c * (dl - r ** 2 - alpha ** 2)) / (rho2 ** 2)
) * ((np.sin(2 * th) * rho2) - (drh2dth * (np.sin(th) ** 2)))
due_to_r()
due_to_theta()
return dgdx
def _christoffels(self, x_vec):
"""
Returns Christoffel Symbols for Kerr-Newman Metric in chosen Coordinates
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Christoffel Symbols for Kerr-Newman \
Metric in chosen Coordinates
Numpy array of shape (4,4,4)
Raises
------
CoordinateError
Raised, if the Christoffel Symbols are not \
available in the supplied Coordinate System
"""
if self.coords.system == "BoyerLindquist":
return self._ch_sym_bl(x_vec)
raise CoordinateError(
"Christoffel Symbols for Kerr-Newman Metric are available only in Boyer-Lindquist Coordinates."
)
def _ch_sym_bl(self, x_vec):
"""
Returns Christoffel Symbols for Kerr-Newman Metric \
in Boyer-Lindquist Coordinates
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Christoffel Symbols for Kerr-Newman Metric \
in Boyer-Lindquist Coordinates
Numpy array of shape (4,4,4)
"""
g_contra = self.metric_contravariant(x_vec)
dgdx = self._dg_dx_bl(x_vec)
chl = np.zeros(shape=(4, 4, 4), dtype=float)
for _, k, l in self._nonzero_christoffels_list_bl[0:4]:
val1 = dgdx[l, 0, k] + dgdx[k, 0, l]
val2 = dgdx[l, 3, k] + dgdx[k, 3, l]
chl[0, k, l] = chl[0, l, k] = 0.5 * (
g_contra[0, 0] * (val1) + g_contra[0, 3] * (val2)
)
chl[3, k, l] = chl[3, l, k] = 0.5 * (
g_contra[3, 0] * (val1) + g_contra[3, 3] * (val2)
)
for i, k, l in self._nonzero_christoffels_list_bl[8:16]:
chl[i, k, l] = 0.5 * (
g_contra[i, i] * (dgdx[l, i, k] + dgdx[k, i, l] - dgdx[i, k, l])
)
for i, k, l in self._nonzero_christoffels_list_bl[16:20]:
chl[i, k, l] = chl[i, l, k] = 0.5 * (
g_contra[i, i] * (dgdx[l, i, k] + dgdx[k, i, l] - dgdx[i, k, l])
)
return chl
def _f_vec(self, lambda_, vec):
"""
Returns f_vec for Kerr-Newman Metric in chosen coordinates
To be used for solving Geodesics ODE
Parameters
----------
lambda_ : float
Parameterizes current integration step
Used by ODE Solver
vec : array_like
Length-8 Vector, containing 4-Position & 4-Velocity
Returns
-------
~numpy.ndarray
f_vec for Kerr-Newman Metric in chosen coordinates
Numpy array of shape (8)
Raises
------
CoordinateError
Raised, if ``f_vec`` is not available in \
the supplied Coordinate System
"""
if self.coords.system == "BoyerLindquist":
return self._f_vec_bl(lambda_, vec)
raise CoordinateError(
"'f_vec' for Kerr-Newman Metric is available only in Boyer-Lindquist Coordinates."
)
def _f_vec_bl(self, lambda_, vec):
"""
Returns f_vec for Kerr-Newman Metric \
in Boyer-Lindquist Coordinates
To be used for solving Geodesics ODE
Parameters
----------
lambda_ : float
Parameterizes current integration step
Used by ODE Solver
vec : array_like
Length-8 Vector, containing 4-Position & 4-Velocity
Returns
-------
~numpy.ndarray
f_vec for Kerr-Newman Metric in Boyer-Lindquist Coordinates
Numpy array of shape (8)
"""
chl = self.christoffels(vec[:4])
F_contra = self.em_tensor_contravariant(vec[:4])
g_cov = self.metric_covariant(vec[:4])
vals = np.zeros(shape=vec.shape, dtype=vec.dtype)
vals[:4] = vec[4:]
vals[4] = -2.0 * (
chl[0, 0, 1] * vec[4] * vec[5]
+ chl[0, 0, 2] * vec[4] * vec[6]
+ chl[0, 1, 3] * vec[5] * vec[7]
+ chl[0, 2, 3] * vec[6] * vec[7]
)
vals[5] = -1.0 * (
chl[1, 0, 0] * vec[4] * vec[4]
+ 2 * chl[1, 0, 3] * vec[4] * vec[7]
+ chl[1, 1, 1] * vec[5] * vec[5]
+ 2 * chl[1, 1, 2] * vec[5] * vec[6]
+ chl[1, 2, 2] * vec[6] * vec[6]
+ chl[1, 3, 3] * vec[7] * vec[7]
)
vals[6] = -1.0 * (
chl[2, 0, 0] * vec[4] * vec[4]
+ 2 * chl[2, 0, 3] * vec[4] * vec[7]
+ chl[2, 1, 1] * vec[5] * vec[5]
+ 2 * chl[2, 1, 2] * vec[5] * vec[6]
+ chl[2, 2, 2] * vec[6] * vec[6]
+ chl[2, 3, 3] * vec[7] * vec[7]
)
vals[7] = -2.0 * (
chl[3, 0, 1] * vec[4] * vec[5]
+ chl[3, 0, 2] * vec[4] * vec[6]
+ chl[3, 1, 3] * vec[5] * vec[7]
+ chl[3, 2, 3] * vec[6] * vec[7]
)
vals[4:] -= self.q.value * (F_contra @ vec[4:] @ g_cov)
return vals
def em_potential_covariant(self, x_vec):
"""
Returns Covariant Electromagnetic 4-Potential
Specific to Kerr-Newman Geometries
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Covariant Electromagnetic 4-Potential
Numpy array of shape (4,)
"""
_, r, th, _ = x_vec
M, a, Q = self.M.value, self.a.value, self.Q.value
alpha = super().alpha(M, a)
# Geometrized Charge
r_Q = np.sqrt((Q ** 2 * _G * _Cc) / _c ** 4)
rho2 = super().rho(r, th, M, a) ** 2
A = np.zeros((4,), dtype=float)
A[0] = r * r_Q / rho2
A[3] = -r * alpha * r_Q * np.sin(th) ** 2 / rho2
return A
def em_potential_contravariant(self, x_vec):
"""
Returns Contravariant Electromagnetic 4-Potential
Specific to Kerr-Newman Geometries
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Contravariant Electromagnetic 4-Potential
Numpy array of shape (4,)
"""
A_cov = self.em_potential_covariant(x_vec)
g_contra = self.metric_contravariant(x_vec)
return g_contra @ A_cov
def em_tensor_covariant(self, x_vec):
"""
Returns Covariant Electromagnetic Tensor
Specific to Kerr-Newman Geometries
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Covariant Electromagnetic Tensor
Numpy array of shape (4, 4)
"""
_, r, th, _ = x_vec
M, a, Q = self.M.value, self.a.value, self.Q.value
alpha = super().alpha(M, a)
r_Q = np.sqrt((Q ** 2 * _G * _Cc) / _c ** 4)
rho2 = super().rho(r, th, M, a) ** 2
# Partial derivatives of rho2
drho2_dr = 2 * r
drho2_dtheta = -(alpha ** 2 * np.sin(2 * th))
F = np.zeros((4, 4), dtype=float)
F[0, 1] = -(r_Q * (rho2 - drho2_dr * r)) / (rho2 ** 2)
F[1, 0] = -F[0, 1]
F[0, 2] = (r * r_Q * drho2_dtheta) / (rho2 ** 2)
F[2, 0] = -F[0, 2]
F[1, 3] = (
(1 / rho2 ** 2) * (alpha * r_Q * np.sin(th) ** 2) * (rho2 - 2 * r ** 2)
)
F[3, 1] = -F[1, 3]
F[2, 3] = (
(1 / rho2 ** 2)
* (alpha * r_Q * r * np.sin(2 * th))
* (rho2 + (alpha * np.sin(th)) ** 2)
)
F[3, 2] = -F[2, 3]
return F
def em_tensor_contravariant(self, x_vec):
"""
Returns Contravariant Electromagnetic Tensor
Specific to Kerr-Newman Geometries
Parameters
----------
x_vec : array_like
Position 4-Vector
Returns
-------
~numpy.ndarray
Contravariant Electromagnetic Tensor
Numpy array of shape (4, 4)
"""
F_cov = self.em_tensor_covariant(x_vec)
g_contra = self.metric_contravariant(x_vec)
F_contra = g_contra @ F_cov @ g_contra
return F_contra
|
var/spack/repos/builtin/packages/py-preshed/package.py | LiamBindle/spack | 2,360 | 11070464 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyPreshed(PythonPackage):
"""preshed: Cython Hash Table for Pre-Hashed Keys."""
homepage = "https://github.com/explosion/preshed"
pypi = "preshed/preshed-3.0.2.tar.gz"
version('3.0.2', sha256='61d73468c97c1d6d5a048de0b01d5a6fd052123358aca4823cdb277e436436cb')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:2.0', type=('build', 'run'))
depends_on('[email protected]:1.0', type=('build', 'run'))
|
3rdParty/V8/v7.9.317/tools/release/search_related_commits.py | rajeev02101987/arangodb | 12,278 | 11070467 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import operator
import os
import re
from sets import Set
from subprocess import Popen, PIPE
import sys
def search_all_related_commits(
git_working_dir, start_hash, until, separator, verbose=False):
all_commits_raw = _find_commits_inbetween(
start_hash, until, git_working_dir, verbose)
if verbose:
print("All commits between <of> and <until>: " + all_commits_raw)
# Adding start hash too
all_commits = [start_hash]
all_commits.extend(all_commits_raw.splitlines())
all_related_commits = {}
already_treated_commits = Set([])
for commit in all_commits:
if commit in already_treated_commits:
continue
related_commits = _search_related_commits(
git_working_dir, commit, until, separator, verbose)
if len(related_commits) > 0:
all_related_commits[commit] = related_commits
already_treated_commits.update(related_commits)
already_treated_commits.update(commit)
return all_related_commits
def _search_related_commits(
git_working_dir, start_hash, until, separator, verbose=False):
if separator:
commits_between = _find_commits_inbetween(
start_hash, separator, git_working_dir, verbose)
if commits_between == "":
return []
# Extract commit position
original_message = git_execute(
git_working_dir,
["show", "-s", "--format=%B", start_hash],
verbose)
title = original_message.splitlines()[0]
matches = re.search("(\{#)([0-9]*)(\})", original_message)
if not matches:
return []
commit_position = matches.group(2)
if verbose:
print("1.) Commit position to look for: " + commit_position)
search_range = start_hash + ".." + until
def git_args(grep_pattern):
return [
"log",
"--reverse",
"--grep=" + grep_pattern,
"--format=%H",
search_range,
]
found_by_hash = git_execute(
git_working_dir, git_args(start_hash), verbose).strip()
if verbose:
print("2.) Found by hash: " + found_by_hash)
found_by_commit_pos = git_execute(
git_working_dir, git_args(commit_position), verbose).strip()
if verbose:
print("3.) Found by commit position: " + found_by_commit_pos)
# Replace brackets or else they are wrongly interpreted by --grep
title = title.replace("[", "\\[")
title = title.replace("]", "\\]")
found_by_title = git_execute(
git_working_dir, git_args(title), verbose).strip()
if verbose:
print("4.) Found by title: " + found_by_title)
hits = (
_convert_to_array(found_by_hash) +
_convert_to_array(found_by_commit_pos) +
_convert_to_array(found_by_title))
hits = _remove_duplicates(hits)
if separator:
for current_hit in hits:
commits_between = _find_commits_inbetween(
separator, current_hit, git_working_dir, verbose)
if commits_between != "":
return hits
return []
return hits
def _find_commits_inbetween(start_hash, end_hash, git_working_dir, verbose):
commits_between = git_execute(
git_working_dir,
["rev-list", "--reverse", start_hash + ".." + end_hash],
verbose)
return commits_between.strip()
def _convert_to_array(string_of_hashes):
return string_of_hashes.splitlines()
def _remove_duplicates(array):
no_duplicates = []
for current in array:
if not current in no_duplicates:
no_duplicates.append(current)
return no_duplicates
def git_execute(working_dir, args, verbose=False):
command = ["git", "-C", working_dir] + args
if verbose:
print("Git working dir: " + working_dir)
print("Executing git command:" + str(command))
p = Popen(args=command, stdin=PIPE,
stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
rc = p.returncode
if rc != 0:
raise Exception(err)
if verbose:
print("Git return value: " + output)
return output
def _pretty_print_entry(hash, git_dir, pre_text, verbose):
text_to_print = git_execute(
git_dir,
["show",
"--quiet",
"--date=iso",
hash,
"--format=%ad # %H # %s"],
verbose)
return pre_text + text_to_print.strip()
def main(options):
all_related_commits = search_all_related_commits(
options.git_dir,
options.of[0],
options.until[0],
options.separator,
options.verbose)
sort_key = lambda x: (
git_execute(
options.git_dir,
["show", "--quiet", "--date=iso", x, "--format=%ad"],
options.verbose)).strip()
high_level_commits = sorted(all_related_commits.keys(), key=sort_key)
for current_key in high_level_commits:
if options.prettyprint:
yield _pretty_print_entry(
current_key,
options.git_dir,
"+",
options.verbose)
else:
yield "+" + current_key
found_commits = all_related_commits[current_key]
for current_commit in found_commits:
if options.prettyprint:
yield _pretty_print_entry(
current_commit,
options.git_dir,
"| ",
options.verbose)
else:
yield "| " + current_commit
if __name__ == "__main__": # pragma: no cover
parser = argparse.ArgumentParser(
"This tool analyzes the commit range between <of> and <until>. "
"It finds commits which belong together e.g. Implement/Revert pairs and "
"Implement/Port/Revert triples. All supplied hashes need to be "
"from the same branch e.g. master.")
parser.add_argument("-g", "--git-dir", required=False, default=".",
help="The path to your git working directory.")
parser.add_argument("--verbose", action="store_true",
help="Enables a very verbose output")
parser.add_argument("of", nargs=1,
help="Hash of the commit to be searched.")
parser.add_argument("until", nargs=1,
help="Commit when searching should stop")
parser.add_argument("--separator", required=False,
help="The script will only list related commits "
"which are separated by hash <--separator>.")
parser.add_argument("--prettyprint", action="store_true",
help="Pretty prints the output")
args = sys.argv[1:]
options = parser.parse_args(args)
for current_line in main(options):
print(current_line)
|
imageatm/utils/io.py | vishalbelsare/imageatm | 215 | 11070494 | import json
import yaml
from pathlib import Path
from typing import Union
def load_json(file_path: Path) -> Union[dict, list]:
with open(file_path, 'r') as f:
return json.load(f)
def save_json(data: Union[dict, list], target_file: str):
with open(target_file, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
def load_yaml(file_path: str) -> str:
with open(file_path, 'r') as f:
return yaml.safe_load(f)
|
cctbx/maptbx/prepare_map_for_docking.py | whart222/cctbx_project | 155 | 11070499 | <reponame>whart222/cctbx_project
from __future__ import print_function
from __future__ import division
import math
from cctbx.maptbx import mask
from scitbx.array_family import flex
from scitbx.dtmin.minimizer import Minimizer
from scitbx.dtmin.refinebase import RefineBase
from scitbx.dtmin.reparams import Reparams
from scitbx.dtmin.bounds import Bounds
from cctbx import adptbx
import sys
class RefineCryoemErrors(RefineBase):
# Set up refinement class for dtmin minimiser (based on Phaser minimiser)
def __init__(self, mc1, mc2, ssqr_bins, target_spectrum, start_x):
RefineBase.__init__(self)
# Precompute data that will be used repeatedly for fgh evaluation
f1 = flex.abs(mc1.data())
f2 = flex.abs(mc2.data())
p1 = mc1.phases().data()
p2 = mc2.phases().data()
self.sumfsqr_miller = mc1.customized_copy(data=flex.pow2(f1) + flex.pow2(f2))
self.sumfsqr_miller.use_binner_of(mc1)
self.f1f2cos_miller = mc1.customized_copy(data=f1 * f2 * flex.cos(p2 - p1))
self.n_bins = mc1.binner().n_bins_used() # Assume consistent binning
self.ssqr_bins = ssqr_bins
self.unit_cell = mc1.unit_cell()
assert (self.n_bins == len(ssqr_bins))
self.target_spectrum = target_spectrum
self.start_x = start_x
self.x = start_x[:] # Full set of parameters
recip_params = self.unit_cell.reciprocal_parameters()
astar = recip_params[0]
bstar = recip_params[1]
cstar = recip_params[2]
self.large_shifts_beta = [astar * astar, bstar * bstar, cstar * cstar, astar * bstar, astar * cstar, bstar * cstar]
def target_gradient_hessian(self, do_gradient=True, do_hessian=True):
if (do_hessian):
assert (do_gradient)
# Extract parameters into variables with sensible names
n_bins = self.n_bins
i_par = 0
asqr_scale = self.x[i_par]
i_par += 1
sigmaT_bins = self.x[i_par:i_par + n_bins]
i_par += n_bins
asqr_beta = tuple(self.x[i_par:i_par + 6])
i_par += 6
sigmaE_scale = self.x[i_par]
i_par += 1
sigmaE_bins = self.x[i_par:i_par + n_bins]
i_par += n_bins
sigmaE_beta = tuple(self.x[i_par:i_par + 6])
i_par += 6
assert (i_par == len(self.x))
# Initialise function, gradient and Hessian with zeros
f = 0.
g = flex.double(self.nmp, 0)
h = flex.double(self.nmp * self.nmp, 0)
h.reshape(flex.grid(self.nmp, self.nmp))
twologpi = 2 * math.log(math.pi)
# Loop over bins to accumulate target, gradient, Hessian
ma = self.sumfsqr_miller # Miller array holding associated information
i_bin_used = 0 # Keep track in case full range of bins not used
for i_bin in ma.binner().range_used():
sel = ma.binner().selection(i_bin)
ma_sel = ma.select(sel)
# Make Miller array as basis for computing aniso corrections in bin
# Let u = A^2*sigmaT to simplify computation of derivatives
ones_array = flex.double(ma_sel.size(), 1)
all_ones = ma_sel.customized_copy(data=ones_array)
beta_miller_A = all_ones.apply_debye_waller_factors(
u_star=adptbx.beta_as_u_star(asqr_beta))
u_terms = (asqr_scale * sigmaT_bins[i_bin_used]
* self.target_spectrum[i_bin_used]) * beta_miller_A.data()
beta_miller_E = all_ones.apply_debye_waller_factors(
u_star=adptbx.beta_as_u_star(sigmaE_beta))
sigmaE_terms = ((sigmaE_scale * sigmaE_bins[i_bin_used])
* beta_miller_E.data())
# Variance term per reflection is function of these terms
u2sigE = 2 * u_terms + sigmaE_terms
var_terms = u2sigE * sigmaE_terms
f1f2cos = self.f1f2cos_miller.data().select(sel)
sumfsqr = self.sumfsqr_miller.data().select(sel)
# Leave out constant twologpi*ma_sel.size()
minusLL_terms = (sumfsqr * (u_terms + sigmaE_terms)
- 2 * u_terms * f1f2cos) / var_terms + flex.log(var_terms)
f += flex.sum(minusLL_terms)
if do_gradient:
# Define some intermediate results needed below
u2sigE2 = flex.pow2(u2sigE)
sigmaE_sqr = flex.pow2(sigmaE_terms)
sumsqrcos = sumfsqr + 2 * f1f2cos
hyposqr = sumfsqr - 2 * f1f2cos
if (self.refine_Asqr_scale or self.refine_sigmaT_bins
or self.refine_Asqr_beta):
dmLL_by_du_terms = (2 * u2sigE - sumsqrcos) / u2sigE2
if (self.refine_sigmaE_scale or self.refine_sigmaE_bins
or self.refine_sigmaE_beta):
dmLL_by_dsigE_terms = (
(2 * sigmaE_terms - hyposqr) / (2 * sigmaE_sqr)
- sumsqrcos / (2 * u2sigE2) + 1. / (u2sigE))
if (self.refine_Asqr_beta or self.refine_sigmaE_beta):
h_as_double, k_as_double, l_as_double = (
ma_sel.indices().as_vec3_double().parts())
hh = flex.pow2(h_as_double)
kk = flex.pow2(k_as_double)
ll = flex.pow2(l_as_double)
hk = h_as_double * k_as_double
hl = h_as_double * l_as_double
kl = k_as_double * l_as_double
i_par = 0 # Keep track of index for unrefined parameters
i_ref = 0 # Keep track of refined parameters
if self.refine_Asqr_scale: # Only affects U
du_by_dAsqr_scale = u_terms / asqr_scale
g[i_ref] += flex.sum(dmLL_by_du_terms * du_by_dAsqr_scale)
i_ref += 1
i_par += 1
if self.refine_sigmaT_bins: # Only affects U, just current bin
du_by_dsigmaT_bin = (asqr_scale
* self.target_spectrum[i_bin_used]) * beta_miller_A.data()
i_sigmaT_bin = i_ref+i_bin_used # Save for restraint terms below
g[i_sigmaT_bin] += flex.sum(dmLL_by_du_terms*du_by_dsigmaT_bin)
i_ref += self.n_bins
i_par += self.n_bins
if self.refine_Asqr_beta: # Only affects U
du_by_dbetaA11 = -hh * u_terms
du_by_dbetaA22 = -kk * u_terms
du_by_dbetaA33 = -ll * u_terms
du_by_dbetaA12 = -2 * hk * u_terms
du_by_dbetaA13 = -2 * hl * u_terms
du_by_dbetaA23 = -2 * kl * u_terms
g[i_ref] += flex.sum(dmLL_by_du_terms * du_by_dbetaA11)
g[i_ref+1] += flex.sum(dmLL_by_du_terms * du_by_dbetaA22)
g[i_ref+2] += flex.sum(dmLL_by_du_terms * du_by_dbetaA33)
g[i_ref+3] += flex.sum(dmLL_by_du_terms * du_by_dbetaA12)
g[i_ref+4] += flex.sum(dmLL_by_du_terms * du_by_dbetaA13)
g[i_ref+5] += flex.sum(dmLL_by_du_terms * du_by_dbetaA23)
i_ref += 6
i_par += 6
# Note that sigmaE_scale is fixed if sigmaE_bins and/or
# sigmaE_beta are refined
if self.refine_sigmaE_scale:
dsigE_by_dscaleE = sigmaE_terms/sigmaE_scale
g[i_ref] += flex.sum(dmLL_by_dsigE_terms * dsigE_by_dscaleE)
i_ref += 1
i_par += 1
if self.refine_sigmaE_bins: # Just current bin
dsigE_by_dsigmaE_bin = sigmaE_scale*beta_miller_E.data()
g[i_ref+i_bin_used] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dsigmaE_bin)
i_ref += self.n_bins
i_par += self.n_bins
if self.refine_sigmaE_beta: # Only affects SigmaE
dsigE_by_dbetaE11 = -hh * sigmaE_terms
dsigE_by_dbetaE22 = -kk * sigmaE_terms
dsigE_by_dbetaE33 = -ll * sigmaE_terms
dsigE_by_dbetaE12 = -2 * hk * sigmaE_terms
dsigE_by_dbetaE13 = -2 * hl * sigmaE_terms
dsigE_by_dbetaE23 = -2 * kl * sigmaE_terms
g[i_ref] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dbetaE11)
g[i_ref+1] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dbetaE22)
g[i_ref+2] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dbetaE33)
g[i_ref+3] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dbetaE12)
g[i_ref+4] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dbetaE13)
g[i_ref+5] += flex.sum(
dmLL_by_dsigE_terms * dsigE_by_dbetaE23)
i_ref += 6
i_par += 6
assert (i_par == len(self.x))
assert (i_ref == self.nmp)
if do_hessian:
u2sigE3 = u2sigE * u2sigE2
if (self.refine_Asqr_scale or self.refine_sigmaT_bins
or self.refine_Asqr_beta):
d2mLL_by_du2_terms = 4 * (
sumsqrcos - 2 * u_terms - sigmaE_terms) / u2sigE3
if (self.refine_sigmaE_scale or self.refine_sigmaE_bins
or self.refine_sigmaE_beta):
d2mLL_by_dsigE2_terms = ( (hyposqr - sigmaE_terms)
/ (sigmaE_sqr * sigmaE_terms) + sumsqrcos/u2sigE3 - 1./u2sigE2)
if (self.refine_Asqr_beta or self.refine_sigmaE_beta):
hh_sqr = flex.pow2(hh)
kk_sqr = flex.pow2(kk)
ll_sqr = flex.pow2(ll)
hk_sqr = flex.pow2(hk)
hl_sqr = flex.pow2(hl)
kl_sqr = flex.pow2(kl)
i_par = 0 # Keep track of index for unrefined parameters
i_ref = 0 # Keep track of refined parameters
# Note that various second derivatives are zero, i.e. of:
# u wrt Asqr_scale and sigmaT_bins
# sigmaE wrt sigmaE_bins and sigmaE_scale
if self.refine_Asqr_scale: # Only affects U
h[i_ref,i_ref] += (flex.sum(d2mLL_by_du2_terms
* flex.pow2(du_by_dAsqr_scale)))
i_ref += 1
i_par += 1
if self.refine_sigmaT_bins: # Only affects U, current bin
h[i_sigmaT_bin,i_sigmaT_bin] += flex.sum(
d2mLL_by_du2_terms * flex.pow2(du_by_dsigmaT_bin))
i_ref += self.n_bins
i_par += self.n_bins
if self.refine_Asqr_beta: # Only affects U
d2u_by_dbetaA11_2 = hh_sqr * u_terms
d2u_by_dbetaA22_2 = kk_sqr * u_terms
d2u_by_dbetaA33_2 = ll_sqr * u_terms
d2u_by_dbetaA12_2 = 4 * hk_sqr * u_terms
d2u_by_dbetaA13_2 = 4 * hl_sqr * u_terms
d2u_by_dbetaA23_2 = 4 * kl_sqr * u_terms
h[i_ref, i_ref] += (
flex.sum(d2mLL_by_du2_terms * flex.pow2(du_by_dbetaA11))
+ flex.sum(dmLL_by_du_terms * d2u_by_dbetaA11_2) )
h[i_ref+1,i_ref+1] += (
flex.sum(d2mLL_by_du2_terms * flex.pow2(du_by_dbetaA22))
+ flex.sum(dmLL_by_du_terms * d2u_by_dbetaA22_2) )
h[i_ref+2,i_ref+2] += (
flex.sum(d2mLL_by_du2_terms * flex.pow2(du_by_dbetaA33))
+ flex.sum(dmLL_by_du_terms * d2u_by_dbetaA33_2) )
h[i_ref+3,i_ref+3] += (
flex.sum(d2mLL_by_du2_terms * flex.pow2(du_by_dbetaA12))
+ flex.sum(dmLL_by_du_terms * d2u_by_dbetaA12_2) )
h[i_ref+4,i_ref+4] += (
flex.sum(d2mLL_by_du2_terms * flex.pow2(du_by_dbetaA13))
+ flex.sum(dmLL_by_du_terms * d2u_by_dbetaA13_2) )
h[i_ref+5,i_ref+5] += (
flex.sum(d2mLL_by_du2_terms * flex.pow2(du_by_dbetaA23))
+ flex.sum(dmLL_by_du_terms * d2u_by_dbetaA23_2) )
i_ref += 6
i_par += 6
# Note that sigmaE_scale and either sigmaE_bins or sigmaE_beta are
# mutually exclusive in practice. If scale and bins were refined
# simultaneously with no restraints, there would be one redundant parameter
if self.refine_sigmaE_scale:
h[i_ref, i_ref] += flex.sum(
d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dscaleE))
i_ref += 1
i_par += 1
if self.refine_sigmaE_bins:
h[i_ref+i_bin_used, i_ref+i_bin_used] += flex.sum(
d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dsigmaE_bin))
i_ref += self.n_bins
i_par += self.n_bins
if self.refine_sigmaE_beta: # Only affects SigmaE
d2sigE_by_dbetaE11_2 = hh_sqr * sigmaE_terms
d2sigE_by_dbetaE22_2 = kk_sqr * sigmaE_terms
d2sigE_by_dbetaE33_2 = ll_sqr * sigmaE_terms
d2sigE_by_dbetaE12_2 = 4 * hk_sqr * sigmaE_terms
d2sigE_by_dbetaE13_2 = 4 * hl_sqr * sigmaE_terms
d2sigE_by_dbetaE23_2 = 4 * kl_sqr * sigmaE_terms
h[i_ref, i_ref] += ( flex.sum(d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dbetaE11))
+ flex.sum(dmLL_by_dsigE_terms * d2sigE_by_dbetaE11_2) )
h[i_ref+1,i_ref+1] += ( flex.sum(d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dbetaE22))
+ flex.sum(dmLL_by_dsigE_terms * d2sigE_by_dbetaE22_2) )
h[i_ref+2,i_ref+2] += ( flex.sum(d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dbetaE33))
+ flex.sum(dmLL_by_dsigE_terms * d2sigE_by_dbetaE33_2) )
h[i_ref+3,i_ref+3] += ( flex.sum(d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dbetaE12))
+ flex.sum(dmLL_by_dsigE_terms * d2sigE_by_dbetaE12_2) )
h[i_ref+4,i_ref+4] += ( flex.sum(d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dbetaE13))
+ flex.sum(dmLL_by_dsigE_terms * d2sigE_by_dbetaE13_2) )
h[i_ref+5,i_ref+5] += ( flex.sum(d2mLL_by_dsigE2_terms * flex.pow2(dsigE_by_dbetaE23))
+ flex.sum(dmLL_by_dsigE_terms * d2sigE_by_dbetaE23_2) )
i_ref += 6
i_par += 6
assert (i_par == len(self.x))
assert (i_ref == self.nmp)
# Add restraint terms
# Restrain log of sigmaT_bins to 0, but downweighting low resolution
d_bin = math.sqrt(self.ssqr_bins[i_bin_used])
sigmascale = 0.15 + 0.001 / d_bin ** 3
stbin = sigmaT_bins[i_bin_used]
logbin = math.log(stbin)
f += (logbin/sigmascale)**2 / 2
if (do_gradient and self.refine_sigmaT_bins):
g[i_sigmaT_bin] += logbin / (stbin * sigmascale**2)
if do_hessian:
h[i_sigmaT_bin,i_sigmaT_bin] += (1.-logbin)/(stbin*sigmascale)**2
i_bin_used += 1
return (f, g, h, True)
def target(self):
f_g_h = self.target_gradient_hessian(do_gradient=False, do_hessian=False)
return f_g_h[0]
def target_gradient(self):
f_g_h = self.target_gradient_hessian(do_hessian=False)
f = f_g_h[0]
g = f_g_h[1]
return (f, g)
def get_macrocycle_parameters(self):
if len(self.refine_mask) == 0: # All parameters being refined
return self.x
mp = [] # Parameters for this macrocycle
for i in range(len(self.x)):
if self.refine_mask[i]:
mp.append(self.x[i])
assert (len(mp) == self.nmp)
return mp
def set_macrocycle_parameters(self, newx):
if len(self.refine_mask) == 0: # All parameters being refined
self.x = newx
else:
npref = 0
for i in range(len(self.x)):
if self.refine_mask[i]:
self.x[i] = newx[npref]
npref += 1
assert (npref == self.nmp)
def macrocycle_large_shifts(self):
i_par = 0 # Keep track of index for unrefined parameters
large_shifts = []
if self.refine_Asqr_scale:
large_shifts.append(self.start_x[i_par]/10.)
i_par += 1
if self.refine_sigmaT_bins:
for i_bin in range(self.n_bins):
large_shifts.append(0.05)
i_par += self.n_bins
if self.refine_Asqr_beta:
large_shifts.extend(self.large_shifts_beta)
i_par += 6
if self.refine_sigmaE_scale:
large_shifts.append(0.01)
i_par += 1
if self.refine_sigmaE_bins:
for i_bin in range(self.n_bins):
large_shifts.append(self.start_x[i_par+i_bin]/30.)
i_par += self.n_bins
if self.refine_sigmaE_beta:
large_shifts.extend(self.large_shifts_beta)
i_par += 6
assert (i_par == len(self.x))
assert (len(large_shifts) == self.nmp)
return large_shifts
def set_macrocycle_protocol(self, macrocycle_protocol):
# Possible parameters include overall scale of signal,
# bin parameters for signal (BEST-like curve), anisotropy tensor for signal,
# bin parameters for error, anisotropy tensor for error
# Start with everything being refined, turn some things off for different protocols
self.refine_mask = [] # Indicates "all" if left empty
self.refine_Asqr_scale = True
self.refine_sigmaT_bins = True
self.refine_Asqr_beta = True
self.refine_sigmaE_scale = True
self.refine_sigmaE_bins = True
self.refine_sigmaE_beta = True
# For each protocol, define variables that aren't refined
if macrocycle_protocol == ["default"]:
self.refine_sigmaE_scale = False
elif macrocycle_protocol == ["Eprior"]:
self.refine_sigmaE_bins = False
self.refine_sigmaE_beta = False
else:
print("Macrocycle protocol", macrocycle_protocol, " not recognised")
sys.stdout.flush()
exit
# Now accumulate mask
self.nmp = 0
if self.refine_Asqr_scale:
self.refine_mask.append(True)
self.nmp += 1
else:
self.refine_mask.append(False)
if self.refine_sigmaT_bins:
self.refine_mask.extend([True for i in range(self.n_bins)])
self.nmp += self.n_bins
else:
self.refine_mask.extend([False for i in range(self.n_bins)])
if self.refine_Asqr_beta:
self.refine_mask.extend([True for i in range(6)])
self.nmp += 6
else:
self.refine_mask.extend([False for i in range(6)])
if self.refine_sigmaE_scale:
self.refine_mask.append(True)
self.nmp += 1
else:
self.refine_mask.append(False)
if self.refine_sigmaE_bins:
self.refine_mask.extend([True for i in range(self.n_bins)])
self.nmp += self.n_bins
else:
self.refine_mask.extend([False for i in range(self.n_bins)])
if self.refine_sigmaE_beta:
self.refine_mask.extend([True for i in range(6)])
self.nmp += 6
else:
self.refine_mask.extend([False for i in range(6)])
assert (len(self.refine_mask) == len(self.x))
def macrocycle_parameter_names(self, full_list=False):
parameter_names = []
if full_list or self.refine_Asqr_scale:
parameter_names.append("Asqr_scale")
if full_list or self.refine_sigmaT_bins:
for i in range(self.n_bins):
parameter_names.append("SigmaT_bin#" + str(i + 1))
if full_list or self.refine_Asqr_beta:
parameter_names.append("Asqr_beta11")
parameter_names.append("Asqr_beta22")
parameter_names.append("Asqr_beta33")
parameter_names.append("Asqr_beta12")
parameter_names.append("Asqr_beta13")
parameter_names.append("Asqr_beta23")
if full_list or self.refine_sigmaE_scale:
parameter_names.append("sigmaE_scale")
if full_list or self.refine_sigmaE_bins:
for i in range(self.n_bins):
parameter_names.append("sigmaE_bin#" + str(i + 1))
if full_list or self.refine_sigmaE_beta:
parameter_names.append("sigmaE_beta11")
parameter_names.append("sigmaE_beta22")
parameter_names.append("sigmaE_beta33")
parameter_names.append("sigmaE_beta12")
parameter_names.append("sigmaE_beta13")
parameter_names.append("sigmaE_beta23")
if not full_list:
assert (len(parameter_names) == self.nmp)
else:
assert (len(parameter_names) == len(self.x))
return parameter_names
def reparameterize(self):
i_par = 0 # Keep track of index for unrefined parameters
repar = []
if self.refine_Asqr_scale:
repar.append(Reparams(True,0.))
i_par += 1
if self.refine_sigmaT_bins:
repar.extend([Reparams(True, 0.) for i in range(self.n_bins)])
i_par += self.n_bins
if self.refine_Asqr_beta:
repar.extend([Reparams(False) for i in range(6)])
i_par += 6
if self.refine_sigmaE_scale:
repar.append(Reparams(True,0.))
i_par += 1
if self.refine_sigmaE_bins:
repar.extend([Reparams(True, 0.) for i in range(self.n_bins)])
i_par += self.n_bins
if self.refine_sigmaE_beta:
repar.extend([Reparams(False) for i in range(6)])
i_par += 6
assert (i_par == len(self.x))
assert (len(repar) == self.nmp)
return repar
def bounds(self):
i_par = 0
bounds_list = []
if self.refine_Asqr_scale:
this_bound = Bounds()
this_bound.lower_on(0.001*self.start_x[i_par])
bounds_list.append(this_bound)
i_par += 1
if self.refine_sigmaT_bins:
this_bound = Bounds()
this_bound.lower_on(0.001)
for i in range(self.n_bins):
bounds_list.append(this_bound)
i_par += self.n_bins
if self.refine_Asqr_beta:
this_bound = Bounds()
this_bound.off()
for i in range(6):
bounds_list.append(this_bound)
i_par += 6
if self.refine_sigmaE_scale:
this_bound = Bounds()
this_bound.lower_on(0.01)
bounds_list.append(this_bound)
i_par += 1
if self.refine_sigmaE_bins:
for i_bin in range(self.n_bins):
this_bound = Bounds()
this_bound.lower_on(0.001*self.start_x[i_par+i_bin])
bounds_list.append(this_bound)
i_par += self.n_bins
if self.refine_sigmaE_beta:
this_bound = Bounds()
this_bound.off()
for i in range(6):
bounds_list.append(this_bound)
i_par += 6
assert (i_par == len(self.x))
assert (len(bounds_list) == self.nmp)
return bounds_list
def current_statistics(self, level=3, full_list=False):
self.log_tab_printf(1, level, "Log-likelihood: %10.6g\n", -self.target())
self.log_blank(level)
parameter_names = self.macrocycle_parameter_names(full_list=full_list)
if full_list:
self.log_tab(1, level, "All parameters")
else:
self.log_tab(1, level, "Refined parameters")
list_all = (full_list or len(self.refine_mask) == 0)
iref = 0
for i in range(len(self.x)):
if (list_all or self.refine_mask[i]):
self.log_tab_printf(2, level, "%-15s %10.5g\n", (parameter_names[iref], self.x[i]))
iref += 1
def initial_statistics(self):
level=2
self.log_blank(level)
self.log_tab(1, level, "Initial statistics")
self.current_statistics(level=level, full_list=True)
def final_statistics(self):
level=2
self.log_blank(level)
self.log_tab(1, level, "Final statistics")
self.current_statistics(level=level, full_list=True)
def cleanup(self):
# Take out overall scale and isotropic B from sigmaT_bins, put into asqr_scale and asqr_beta
# Take out overall isotropic B from anisotropy in errors, put into bins
n_bins = self.n_bins
# Asqr_scale = self.x[0] # Unneeded parameters listed for completeness
sigmaT_bins = self.x[1:n_bins + 1]
# Asqr_beta = self.x[n_bins + 1 : n_bins + 7]
# sigmaE_scale = self.x[n_bins + 7]
# sigmaE_bins = self.x[n_bins + 8 : 2*n_bins + 8]
sigmaE_beta = tuple(self.x[2*n_bins + 8 : 2*n_bins + 14])
sumw = sumwx = sumwa = sumwx2 = sumwxa = 0.
for i_bin in range(n_bins):
x = self.ssqr_bins[i_bin]
d_bin = math.sqrt(x)
a = math.log(sigmaT_bins[i_bin])
sigmascale = 0.15 + 0.001 / d_bin**3 # Downweight low resolution as in refinement
w = 1./sigmascale**2
sumw += w
sumwx += w * x
sumwa += w * a
sumwx2 += w * x ** 2
sumwxa += w * x * a
if self.refine_sigmaT_bins:
# Make sigmaT_bins values as close as possible to 1 by taking out overall scale
# and B, and putting them into asqr_scale and asqr_beta terms
slope_a = (sumw * sumwxa - (sumwx * sumwa)) / (sumw * sumwx2 - sumwx ** 2)
intercept_a = (sumwa - slope_a * sumwx) / sumw
scale_a = math.exp(intercept_a)
deltaB_a = -4 * slope_a
self.x[0] = self.x[0] * scale_a # Update overall scale
for i_bin in range(n_bins): # Take slope out of sigmaT_bins
self.x[1 + i_bin] = self.x[1 + i_bin] / scale_a * math.exp(deltaB_a * self.ssqr_bins[i_bin] / 4)
delta_beta_a = list(adptbx.u_iso_as_beta(self.unit_cell,adptbx.b_as_u(deltaB_a)))
for i_beta in range(6): # Then put slope into asqr_beta
self.x[1 + n_bins + i_beta] = self.x[1 + n_bins + i_beta] + delta_beta_a[i_beta]
if self.refine_sigmaE_beta:
# Extract isotropic B from sigmaE_beta, put it into sigmaE_bins
sigmaE_u_cart = adptbx.beta_as_u_cart(self.unit_cell, sigmaE_beta)
sigmaE_u_iso = adptbx.u_cart_as_u_iso(sigmaE_u_cart)
sigmaE_delta_beta = list(adptbx.u_iso_as_beta(self.unit_cell, sigmaE_u_iso))
sigmaE_b_iso = adptbx.u_as_b(sigmaE_u_iso)
for i_bin in range(n_bins): # Put isotropic B into bins
self.x[8 + n_bins + i_bin] = (self.x[8 + n_bins + i_bin]
* math.exp(-sigmaE_b_iso * self.ssqr_bins[i_bin] / 4))
for i_beta in range(6): # Remove isotropic B from sigmaE_beta
self.x[8 + 2*n_bins + i_beta] = (
self.x[8 + 2*n_bins + i_beta] - sigmaE_delta_beta[i_beta])
def default_target_spectrum(ssqr):
# Placeholder for something better based on analysis of cryoEM reconstructions
# Scaled data from BEST curve. Original data obtained from <NAME>, then
# rescaled to correspond at higher resolution to the average X-ray scattering
# factor from proteins atoms (with average atomic composition)
best_data = ((0.009, 3.40735),
(0.013092, 2.9006),
(0.0171839, 2.33083),
(0.0212759, 1.80796),
(0.0253679, 1.65133),
(0.0294599, 1.75784),
(0.0335518, 2.06865),
(0.0376438, 2.57016),
(0.0417358, 3.13121),
(0.0458278, 3.62596),
(0.0499197, 3.92071),
(0.0540117, 3.98257),
(0.0581037, 3.91846),
(0.0621956, 3.80829),
(0.0662876, 3.69517),
(0.0703796, 3.59068),
(0.0744716, 3.44971),
(0.0785635, 3.30765),
(0.0826555, 3.16069),
(0.0867475, 2.98656),
(0.0908395, 2.77615),
(0.0949314, 2.56306),
(0.0990234, 2.37314),
(0.103115, 2.22874),
(0.107207, 2.09477),
(0.111299, 1.98107),
(0.115391, 1.8652),
(0.119483, 1.75908),
(0.123575, 1.67093),
(0.127667, 1.59257),
(0.131759, 1.52962),
(0.135851, 1.48468),
(0.139943, 1.45848),
(0.144035, 1.43042),
(0.148127, 1.40953),
(0.152219, 1.37291),
(0.156311, 1.34217),
(0.160403, 1.3308),
(0.164495, 1.32782),
(0.168587, 1.30862),
(0.172679, 1.31319),
(0.176771, 1.30907),
(0.180863, 1.31456),
(0.184955, 1.31055),
(0.189047, 1.31484),
(0.193139, 1.31828),
(0.197231, 1.32321),
(0.201323, 1.30853),
(0.205415, 1.30257),
(0.209507, 1.2851),
(0.213599, 1.26912),
(0.217691, 1.24259),
(0.221783, 1.24119),
(0.225875, 1.2382),
(0.229967, 1.21605),
(0.234059, 1.17269),
(0.23815, 1.13909),
(0.242242, 1.1165),
(0.246334, 1.08484),
(0.250426, 1.0495),
(0.254518, 1.01289),
(0.25861, 0.974819),
(0.262702, 0.940975),
(0.266794, 0.900938),
(0.270886, 0.861657),
(0.274978, 0.830192),
(0.27907, 0.802167),
(0.283162, 0.780746),
(0.287254, 0.749194),
(0.291346, 0.720884),
(0.295438, 0.694409),
(0.29953, 0.676239),
(0.303622, 0.650672),
(0.307714, 0.632438),
(0.311806, 0.618569),
(0.315898, 0.605762),
(0.31999, 0.591398),
(0.324082, 0.579308),
(0.328174, 0.572076),
(0.332266, 0.568138),
(0.336358, 0.559537),
(0.34045, 0.547927),
(0.344542, 0.539319),
(0.348634, 0.529009),
(0.352726, 0.516954),
(0.356818, 0.512218),
(0.36091, 0.511836),
(0.365002, 0.511873),
(0.369094, 0.506957),
(0.373186, 0.502738),
(0.377278, 0.50191),
(0.38137, 0.492422),
(0.385462, 0.488461),
(0.389553, 0.483436),
(0.393645, 0.481468),
(0.397737, 0.473786),
(0.401829, 0.468684),
(0.405921, 0.468291),
(0.410013, 0.46645),
(0.414105, 0.4643),
(0.418197, 0.45641),
(0.422289, 0.450462),
(0.426381, 0.444678),
(0.430473, 0.443807),
(0.434565, 0.441158),
(0.438657, 0.441303),
(0.442749, 0.437144),
(0.446841, 0.428504),
(0.450933, 0.420459),
(0.455025, 0.413754),
(0.459117, 0.412064),
(0.463209, 0.406677),
(0.467301, 0.40253),
(0.471393, 0.396454),
(0.475485, 0.393192),
(0.479577, 0.390452),
(0.483669, 0.38408),
(0.487761, 0.379456),
(0.491853, 0.373123),
(0.495945, 0.374026),
(0.500037, 0.373344),
(0.504129, 0.377639),
(0.508221, 0.374029),
(0.512313, 0.374691),
(0.516405, 0.371632),
(0.520497, 0.370724),
(0.524589, 0.366095),
(0.528681, 0.369447),
(0.532773, 0.369043),
(0.536865, 0.368967),
(0.540956, 0.36583),
(0.545048, 0.370593),
(0.54914, 0.371047),
(0.553232, 0.372723),
(0.557324, 0.371915),
(0.561416, 0.372882),
(0.565508, 0.371052),
(0.5696, 0.36775),
(0.573692, 0.369884),
(0.577784, 0.374098),
(0.581876, 0.374169),
(0.585968, 0.37261),
(0.59006, 0.372356),
(0.594152, 0.377055),
(0.598244, 0.3817),
(0.602336, 0.381867),
(0.606428, 0.377746),
(0.61052, 0.377157),
(0.614612, 0.376604),
(0.618704, 0.37532),
(0.622796, 0.372488),
(0.626888, 0.373312),
(0.63098, 0.377505),
(0.635072, 0.381011),
(0.639164, 0.379326),
(0.643256, 0.380193),
(0.647348, 0.381122),
(0.65144, 0.387213),
(0.655532, 0.391928),
(0.659624, 0.398986),
(0.663716, 0.402951),
(0.667808, 0.405893),
(0.6719, 0.40217),
(0.675992, 0.401806),
(0.680084, 0.404238),
(0.684176, 0.409404),
(0.688268, 0.413486),
(0.692359, 0.413167),
(0.696451, 0.414008),
(0.700543, 0.417128),
(0.704635, 0.420275),
(0.708727, 0.423617),
(0.712819, 0.42441),
(0.716911, 0.426445),
(0.721003, 0.429012),
(0.725095, 0.430132),
(0.729187, 0.42992),
(0.733279, 0.425202),
(0.737371, 0.423159),
(0.741463, 0.423913),
(0.745555, 0.425542),
(0.749647, 0.426682),
(0.753739, 0.431186),
(0.757831, 0.433959),
(0.761923, 0.433839),
(0.766015, 0.428679),
(0.770107, 0.425968),
(0.774199, 0.426528),
(0.778291, 0.427093),
(0.782383, 0.426848),
(0.786475, 0.424549),
(0.790567, 0.423785),
(0.794659, 0.419892),
(0.798751, 0.417391),
(0.802843, 0.413128),
(0.806935, 0.408498),
(0.811027, 0.402764),
(0.815119, 0.404852),
(0.819211, 0.405915),
(0.823303, 0.392919),
(0.827395, 0.384632),
(0.831487, 0.382626),
(0.835579, 0.379891),
(0.839671, 0.376414),
(0.843762, 0.372915),
(0.847854, 0.375089),
(0.851946, 0.371918),
(0.856038, 0.36652),
(0.86013, 0.358529),
(0.864222, 0.356496),
(0.868314, 0.354707),
(0.872406, 0.348802),
(0.876498, 0.343693),
(0.88059, 0.34059),
(0.884682, 0.342432),
(0.888774, 0.345099),
(0.892866, 0.344524),
(0.896958, 0.342489),
(0.90105, 0.328009),
(0.905142, 0.323685),
(0.909234, 0.321378),
(0.913326, 0.318832),
(0.917418, 0.314999),
(0.92151, 0.311775),
(0.925602, 0.30844),
(0.929694, 0.30678),
(0.933786, 0.303484),
(0.937878, 0.301197),
(0.94197, 0.296788),
(0.946062, 0.295353),
(0.950154, 0.298028),
(0.954246, 0.298098),
(0.958338, 0.295081),
(0.96243, 0.289337),
(0.966522, 0.286116),
(0.970614, 0.284319),
(0.974706, 0.280972),
(0.978798, 0.28015),
(0.98289, 0.279016),
(0.986982, 0.277532),
(0.991074, 0.276013),
(0.995165, 0.270923),
(0.999257, 0.269446),
(1.00335, 0.266567),
(1.00744, 0.263561),
(1.01153, 0.261002),
(1.01563, 0.255349),
(1.01972, 0.258644),
(1.02381, 0.254974),
(1.0279, 0.2523),
(1.03199, 0.244489),
(1.03609, 0.249418),
(1.04018, 0.249519),
(1.04427, 0.249316),
(1.04836, 0.249197),
(1.05245, 0.24415),
(1.05655, 0.244556),
(1.06064, 0.241169),
(1.06473, 0.238484),
(1.06882, 0.2392),
(1.07291, 0.240651),
(1.077, 0.243724),
(1.0811, 0.243174),
(1.08519, 0.239545),
(1.08928, 0.239106),
(1.09337, 0.238763),
(1.09746, 0.238971),
(1.10156, 0.229925),
(1.10565, 0.225123),
(1.10974, 0.226932),
(1.11383, 0.23118),
(1.11792, 0.228654),
(1.12202, 0.225084),
(1.12611, 0.225866),
(1.1302, 0.227717),
(1.13429, 0.229508),
(1.13838, 0.227977),
(1.14248, 0.226799),
(1.14657, 0.228456),
(1.15066, 0.22383),
(1.15475, 0.22188),
(1.15884, 0.219986),
(1.16294, 0.217418),
(1.16703, 0.214356),
(1.17112, 0.211027),
(1.17521, 0.210011),
(1.1793, 0.210609),
(1.1834, 0.210893),
(1.18749, 0.212583),
(1.19158, 0.208415),
(1.19567, 0.204557),
(1.19976, 0.198068),
(1.20386, 0.197603),
(1.20795, 0.196691),
(1.21204, 0.200617),
(1.21613, 0.199803),
(1.22022, 0.199199),
(1.22432, 0.196859),
(1.22841, 0.197471),
(1.2325, 0.19799))
# 300 data points from 0.009 to 1.2325, so separated by 0.004091973
s1 = (ssqr - 0.009) / 0.004091973
is1 = int(math.floor(s1))
if is1 < 0:
return best_data[0][1] # Below low-res limit for BEST data
elif is1 >= 299:
return best_data[0][299] # Above high-res limit, about 0.9A
else:
ds = s1 - is1
is2 = is1 + 1
best_val = (1.-ds)*best_data[is1][1] + ds*best_data[is2][1]
return best_val
def run_refine_cryoem_errors(
mmm, d_min,
map_1_id="map_manager_1", map_2_id="map_manager_2",
sphere_cent=None, radius=None, verbosity=1, prior_params=None,
shift_map_origin=True):
"""
Refine error parameters from half-maps, make weighted map coeffs for region.
Compulsory arguments:
mmm: map_model_manager object containing two half-maps from reconstruction
d_min: target resolution, either best resolution for map or resolution for
target region
Optional arguments:
map_1_id: identifier of first half-map, if different from default of
map_manager_1
map_2_id: same for second half-map
sphere_cent: center of sphere defining target region for analysis
default is center of map
radius: radius of sphere
default (when sphere center not defined either) is 1/4 narrowest map width
prior_params: refined parameters from previous call, usually from the
whole reconstruction before focusing on a target region
shift_map_origin: should map coefficients be shifted to correspond to
original origin, rather than the origin being the corner of the box,
default True
verbosity: 0/1/2/3/4 for mute/log/verbose/debug/testing
"""
from scipy import interpolate
from libtbx import group_args
from iotbx.map_model_manager import map_model_manager
# Start from two maps in map_model_manager plus optional mask specification
# First get map coefficients for maps after spherical masking
ucpars = mmm.map_manager().unit_cell().parameters()
if sphere_cent is None:
# Default to sphere in center of cell extending halfway to nearest edge
sphere_cent = flex.double((ucpars[0], ucpars[1], ucpars[2]))/2.
radius = min(ucpars[0], ucpars[1], ucpars[2])/4.
else:
sphere_cent = flex.double(sphere_cent)
# Define box big enough to hold sphere plus soft masking
boundary_to_smoothing_ratio = 2
soft_mask_radius = d_min
padding = soft_mask_radius * boundary_to_smoothing_ratio
cushion = flex.double(3,radius+padding)
cart_min = flex.double(sphere_cent) - cushion
cart_max = flex.double(sphere_cent) + cushion
for i in range(3): # Keep within unit cell
cart_min[i] = max(cart_min[i],0)
cart_max[i] = min(cart_max[i],ucpars[i])
# Box the map within xyz bounds, converted to map grid units
cs = mmm.crystal_symmetry()
uc = cs.unit_cell()
lower_frac = uc.fractionalize(tuple(cart_min))
upper_frac = uc.fractionalize(tuple(cart_max))
map_data = mmm.map_data()
all_orig = map_data.all()
lower_bounds = [int(math.floor(f * n)) for f, n in zip(lower_frac, all_orig)]
upper_bounds = [int(math.ceil( f * n)) for f, n in zip(upper_frac, all_orig)]
working_mmm = mmm.extract_all_maps_with_bounds(
lower_bounds=lower_bounds, upper_bounds=upper_bounds)
# Make and apply spherical mask
working_mmm.create_spherical_mask(
soft_mask_radius=soft_mask_radius,
boundary_to_smoothing_ratio=boundary_to_smoothing_ratio)
working_mmm.apply_mask_to_maps(
map_ids=[map_1_id, map_2_id], mask_id='mask')
mask_info = working_mmm.mask_info()
# Keep track of volume of map (in voxels) for determining relative
# scale of sigmaE in different subvolumes
weighted_points = mask_info.size*mask_info.mean # Weighted volume
# Mean mask value measures the weighted fraction of the volume of the whole
# map occupied by the density used. This can be converted into the radius of
# a sphere (on a relative scale) that would occupy this volume, and then
# the oversampling of the Fourier transform is the ratio of the volume of the
# total map box divided by the volume of a cube just enclosing that sphere.
relative_radius = (3*mask_info.mean/(4*math.pi))**(1./3.)
relative_volume = (2*relative_radius)**3
over_sampling_factor = 1./relative_volume
box_volume = uc.volume() * (
working_mmm.map_data().size()/mmm.map_data().size())
masked_volume = box_volume * relative_volume
d_max = 2*(radius+padding) + d_min # Size of sphere plus a bit
mc1 = working_mmm.map_as_fourier_coefficients(d_min=d_min, d_max=d_max, map_id=map_1_id)
mc2 = working_mmm.map_as_fourier_coefficients(d_min=d_min, d_max=d_max, map_id=map_2_id)
# Use bins of equal width in d_star_sq, which works well with cubic cell
mc1.setup_binner_d_star_sq_bin_size()
mc2.use_binner_of(mc1)
ssqmin = flex.min(mc1.d_star_sq().data())
ssqmax = flex.max(mc1.d_star_sq().data())
nref = mc1.size()
# Initialise parameters. This requires slope and intercept of Wilson plot,
# plus mapCC per bin.
ssqr_bins = flex.double()
target_spectrum = flex.double()
meanfsq_bins = flex.double()
mapCC_bins = flex.double()
sumw = 0
sumwx = 0.
sumwy = 0.
sumwx2 = 0.
sumwxy = 0.
for i_bin in mc1.binner().range_used():
sel = mc1.binner().selection(i_bin)
mc1sel = mc1.select(sel)
mc2sel = mc2.select(sel)
mapCC = mc1sel.map_correlation(other=mc2sel)
assert (mapCC < 1.) # Ensure these are really independent half-maps
mapCC = max(mapCC,0.001) # Avoid zero or negative values
mapCC_bins.append(mapCC)
ssqr = mc1sel.d_star_sq().data()
x = flex.mean_default(ssqr, 0) # Mean 1/d^2 for bin
ssqr_bins.append(x) # Save for later
fsq = flex.pow2(flex.abs(mc1sel.data()))
meanfsq = flex.mean_default(fsq, 0)
meanfsq_bins.append(meanfsq)
y = math.log(meanfsq)
w = fsq.size()
sumw += w
sumwx += w * x
sumwy += w * y
sumwx2 += w * x**2
sumwxy += w * x * y
target_power = default_target_spectrum(x) # Could have a different target
target_spectrum.append(target_power)
assert (nref == sumw) # Check no Fourier terms lost outside bins
slope = (sumw * sumwxy - (sumwx * sumwy)) / (sumw * sumwx2 - sumwx**2)
intercept = (sumwy - slope * sumwx) / sumw
wilson_scale_intensity = math.exp(intercept)
wilson_b_intensity = -4 * slope
n_bins = ssqr_bins.size()
if (prior_params is not None):
if d_min < 0.99*math.sqrt(1./prior_params['ssqmax']):
print("Requested resolution is higher than prior parameters support")
sys.stdout.flush()
exit
ssqr_prior = tuple(prior_params['ssqr_bins'])
sigmaT_prior = tuple(prior_params['sigmaT_bins'])
sigmaE_prior = tuple(prior_params['sigmaE_bins'])
sTinterp = interpolate.interp1d(ssqr_prior,sigmaT_prior,fill_value="extrapolate")
sEinterp = interpolate.interp1d(ssqr_prior,sigmaE_prior,fill_value="extrapolate")
sigmaT_bins = flex.double(sTinterp(ssqr_bins))
# Start sigmaE_scale at 1 after rescaling sigmaE_bins by volume comparison.
# This is then refined because of uncertainty in weighting of volume and
# also about whether masking might have been applied to the periphery of the
# map used to obtained prior parameters.
sigmaE_scale = 1.
sigmaE_bins = flex.double(sEinterp(ssqr_bins))*(weighted_points/prior_params['weighted_points'])
sigmaE_baniso = prior_params['sigmaE_baniso']
sigmaE_beta = adptbx.u_star_as_beta(adptbx.u_cart_as_u_star(mc1.unit_cell(),adptbx.b_as_u(sigmaE_baniso)))
else:
sigmaT_bins = [1.]*n_bins # SigmaT_bins correction term for BEST in SigmaT
sigmaE_scale = 1. # Fix at 1
sigmaE_bins = []
for i_bin in range(n_bins):
sigmaE = meanfsq_bins[i_bin] * (1.-mapCC_bins[i_bin])
sigmaE_bins.append(sigmaE) # Error bin parameter
sigmaE_beta = list(adptbx.u_iso_as_beta(mc1.unit_cell(), 0.))
start_params = []
start_params.append(wilson_scale_intensity/3.5) # Asqr_scale, factor out low-res BEST value
start_params.extend(sigmaT_bins)
wilson_u=adptbx.b_as_u(wilson_b_intensity)
asqr_beta=list(adptbx.u_iso_as_beta(mc1.unit_cell(), wilson_u))
start_params.extend(asqr_beta)
start_params.append(sigmaE_scale)
start_params.extend(sigmaE_bins)
start_params.extend(sigmaE_beta)
# create inputs for the minimizer's run method
if (prior_params is not None):
macro = ["Eprior"] # protocol: fix error terms using prior
else:
macro = ["default"] # protocol: refine sigmaE terms too
protocol = [macro, macro] # overall minimization protocol
ncyc = 50 # maximum number of microcycles per macrocycle
minimizer_type = "bfgs" # minimizer, bfgs or newton
study_params = False # flag for calling studyparams procedure
output_level=verbosity # 0/1/2/3/4 for mute/log/verbose/debug/testing
# create instances of refine and minimizer
refine_cryoem_errors = RefineCryoemErrors(
mc1=mc1, mc2=mc2,
ssqr_bins = ssqr_bins, target_spectrum = target_spectrum,
start_x = start_params)
minimizer = Minimizer(output_level=output_level)
# Run minimizer
minimizer.run(refine_cryoem_errors, protocol, ncyc, minimizer_type, study_params)
refined_params=refine_cryoem_errors.x
# Extract and report refined parameters
i_par = 0
asqr_scale = refined_params[i_par] # Not used for correction: leave map on original scale
i_par += 1
sigmaT_bins = refined_params[i_par:i_par + n_bins]
i_par += n_bins
asqr_beta = tuple(refined_params[i_par:i_par + 6])
i_par += 6
sigmaE_scale = refined_params[i_par] # Used here but not saved later
i_par += 1
sigmaE_bins = list(sigmaE_scale * flex.double(refined_params[i_par:i_par + n_bins]))
i_par += n_bins
sigmaE_beta = tuple(refined_params[i_par:i_par + 6])
i_par += 6
assert (i_par == len(refined_params))
# Convert asqr_beta to a_beta for application in weights
a_beta = tuple(flex.double(asqr_beta)/2)
# Convert beta parameters to Baniso for (optional) use and output
a_baniso = adptbx.u_as_b(adptbx.beta_as_u_cart(mc1.unit_cell(), a_beta))
sigmaE_baniso = adptbx.u_as_b(adptbx.beta_as_u_cart(mc1.unit_cell(), sigmaE_beta))
if verbosity > 0:
print("\nRefinement of scales and error terms completed\n")
print("\nParameters for A and BEST curve correction")
print(" A overall scale: ",math.sqrt(asqr_scale))
for i_bin in range(n_bins):
print(" Bin #", i_bin + 1, "BEST curve correction: ", sigmaT_bins[i_bin])
print(" A tensor as beta:", a_beta)
print(" A tensor as Baniso: ", a_baniso)
es = adptbx.eigensystem(a_baniso)
a_beta_ev = es.vectors
print(" Eigenvalues and eigenvectors:")
for iv in range(3):
print(" ",es.values()[iv],es.vectors(iv))
print("\nParameters for SigmaE")
if (prior_params is not None):
print(" SigmaE scale applied to prior bins:", sigmaE_scale)
for i_bin in range(n_bins):
print(" Bin #", i_bin + 1, "SigmaE base: ", sigmaE_bins[i_bin])
print(" SigmaE tensor as beta:", sigmaE_beta)
print(" SigmaE tensor as Baniso (intensity scale): ", sigmaE_baniso)
es = adptbx.eigensystem(sigmaE_baniso)
sigmaE_beta_ev = es.vectors
print(" Eigenvalues and eigenvectors:")
for iv in range(3):
print(" ",es.values()[iv],es.vectors(iv))
sys.stdout.flush()
# Loop over bins to compute expectedE and Dobs for each Fourier term
# Start with mean of half-map Fourier terms and make Miller array for Dobs
expectE = mc1.customized_copy(data = (mc1.data() + mc2.data())/2)
expectE.use_binner_of(mc1)
dobs = expectE.customized_copy(data=flex.double(expectE.size(),0))
i_bin_used = 0 # Keep track in case full range of bins not used
if verbosity > 0:
print("MapCC before and after rescaling as a function of resolution")
print("Bin <ssqr> mapCC_before mapCC_after")
for i_bin in mc1.binner().range_used():
sel = expectE.binner().selection(i_bin)
eEsel = expectE.select(sel)
# Make Miller array as basis for computing aniso corrections in this bin
ones_array = flex.double(eEsel.size(), 1)
all_ones = eEsel.customized_copy(data=ones_array)
beta_a_miller = all_ones.apply_debye_waller_factors(
u_star=adptbx.beta_as_u_star(a_beta))
beta_sE_miller = all_ones.apply_debye_waller_factors(
u_star=adptbx.beta_as_u_star(sigmaE_beta))
# SigmaT is target_spectrum times sigmaT_bins correction factor
sigmaT = sigmaT_bins[i_bin_used] * target_spectrum[i_bin_used]
abeta_terms = beta_a_miller.data() # Anisotropy correction per reflection
a2beta_terms = flex.pow2(abeta_terms)
asqrSigmaT = asqr_scale * sigmaT * a2beta_terms
sigmaE_terms = sigmaE_bins[i_bin_used] * beta_sE_miller.data()
scale_terms = 1./flex.sqrt(asqrSigmaT + sigmaE_terms/2.)
dobs_terms = 1./flex.sqrt(1. + sigmaE_terms/(2*asqrSigmaT))
expectE.data().set_selected(sel, expectE.data().select(sel) * scale_terms)
dobs.data().set_selected(sel, dobs_terms)
# Apply corrections to mc1 and mc2 to compute mapCC after rescaling
# SigmaE variance is twice as large for half-maps before averaging
scale_terms_12 = 1./(abeta_terms + sigmaE_terms / (asqr_scale * sigmaT * abeta_terms))
mc1.data().set_selected(sel, mc1.data().select(sel) * scale_terms_12)
mc2.data().set_selected(sel, mc2.data().select(sel) * scale_terms_12)
mc1sel = mc1.select(sel)
mc2sel = mc2.select(sel)
mapCC = mc1sel.map_correlation(other=mc2sel)
if (verbosity > 0):
print(i_bin_used+1, ssqr_bins[i_bin_used], mapCC_bins[i_bin_used], mapCC)
sys.stdout.flush()
mapCC_bins[i_bin_used] = mapCC # Update for returned output
i_bin_used += 1
shift_cart = working_mmm.shift_cart()
if shift_map_origin:
ucwork = expectE.crystal_symmetry().unit_cell()
# shift_cart is position of original origin in boxed-map coordinate system
# shift_frac should correspond to what has to be done to a model to put it
# into the map, i.e. move it in the opposite direction
shift_frac = ucwork.fractionalize(shift_cart)
shift_frac = tuple(-flex.double(shift_frac))
expectE = expectE.translational_shift(shift_frac)
resultsdict = dict(
n_bins = n_bins,
ssqr_bins = ssqr_bins,
ssqmin = ssqmin,
ssqmax = ssqmax,
weighted_points = weighted_points,
asqr_scale = asqr_scale,
sigmaT_bins = sigmaT_bins,
asqr_beta = asqr_beta,
a_baniso = a_baniso,
sigmaE_bins = sigmaE_bins,
sigmaE_baniso = sigmaE_baniso,
mapCC_bins = mapCC_bins)
return group_args(
shift_cart = shift_cart,
expectE = expectE, dobs = dobs,
over_sampling_factor = over_sampling_factor,
masked_volume = masked_volume,
resultsdict = resultsdict)
# Command-line interface using argparse
def run():
"""
Prepare cryo-EM map for docking by preparing weighted MTZ file.
Obligatory command-line arguments (no keywords):
half_map_1: name of file containing the first half-map from a reconstruction
half_map_2: name of file containing the second half-map
d_min: desired resolution, either best for whole map or for local region
Optional command-line arguments (keyworded):
--file_root: root name for output files
--mask: optional mask to define map region (not yet implemented)
--sphere_cent: Centre of sphere defining target map region
defaults to centre of map, unless mask is specified
--radius: radius of sphere
defaults to narrowest extent of input map divided by 4,
unless mask is specified
--shift_map_origin: shift output mtz file to match input map on its origin:
default
--no_shift_map_origin: leave origin of map at lowest corner of the box
--write_params: write out refined parameters as a pickle file
--read_params: start with refined parameters from earlier run
--mute (or -m): mute output
--verbose (or -v): verbose output
--testing: extra verbose output for debugging
"""
import argparse
import pickle
from iotbx.map_model_manager import map_model_manager
from iotbx.data_manager import DataManager
dm = DataManager()
dm.set_overwrite(True)
parser = argparse.ArgumentParser(
description='Prepare cryo-EM map for docking')
parser.add_argument('map1',help='Map file for half-map 1')
parser.add_argument('map2', help='Map file for half-map 2')
parser.add_argument('d_min', help='d_min for maps', type=float)
parser.add_argument('--file_root',
help='Root of filenames for output')
parser.add_argument('--read_params', help='Filename for prior parameters')
parser.add_argument('--write_params', help='Write out refined parameters',
action='store_true')
parser.add_argument('--shift_map_origin', dest='shift_map_origin', action='store_true')
parser.add_argument('--no_shift_map_origin', dest='shift_map_origin', action='store_false')
parser.set_defaults(shift_map_origin=True)
parser.add_argument('--mask',
help='Optional mask to define map region (not implemented)')
parser.add_argument('--sphere_cent',help='Centre of sphere for docking', nargs=3, type=float)
parser.add_argument('--radius',help='Radius of sphere for docking', type=float)
parser.add_argument('-m', '--mute', dest = 'mute',
help = 'Mute output', action = 'store_true')
parser.add_argument('-v', '--verbose', dest = 'verbose',
help = 'Set output as verbose', action = 'store_true')
parser.add_argument('--testing', dest = 'testing',
help='Set output as testing', action='store_true')
args = parser.parse_args()
d_min = args.d_min
verbosity = 1
if args.mute: verbosity = 0
if args.verbose: verbosity = 2
if args.testing: verbosity = 4
shift_map_origin = args.shift_map_origin
mask_specified = True
mask = None
sphere_cent = None
radius = None
if args.mask is not None:
print("Mask file is not yet implemented")
sys.stdout.flush()
exit
elif args.sphere_cent is not None:
assert args.radius is not None
sphere_cent = tuple(args.sphere_cent)
radius = args.radius
else:
mask_specified = False
# Get prior parameters if provided
if (args.read_params is not None):
infile = open(args.read_params,"rb")
prior_params = pickle.load(infile)
infile.close()
else:
prior_params = None
# Create map_model_manager containing half-maps
map1_filename = args.map1
mm1 = dm.get_real_map(map1_filename)
map2_filename = args.map2
mm2 = dm.get_real_map(map2_filename)
mmm = map_model_manager(map_manager_1=mm1, map_manager_2=mm2)
if (prior_params is None):
# Initial refinement to get overall error parameters
results = run_refine_cryoem_errors(mmm, d_min, verbosity=verbosity,
shift_map_origin=shift_map_origin)
prior_params = results.resultsdict
if args.write_params:
if (args.file_root is not None):
paramsfile = args.file_root + ".pickle"
else:
paramsfile = "prior_params.pickle"
outf = open(paramsfile,"wb")
pickle.dump(prior_params,outf,2)
outf.close()
# The following could loop over different regions
if mask_specified:
# Refine to get scale and error parameters for docking region
results = run_refine_cryoem_errors(mmm, d_min, verbosity=verbosity,
sphere_cent=sphere_cent, radius=radius, prior_params=prior_params,
shift_map_origin=shift_map_origin)
expectE = results.expectE
mtz_dataset = expectE.as_mtz_dataset(column_root_label='Emean')
dobs = results.dobs
mtz_dataset.add_miller_array(
dobs,column_root_label='Dobs',column_types='W')
mtz_object=mtz_dataset.mtz_object()
if (args.file_root is not None):
mtzout_file_name = args.file_root + ".mtz"
else:
mtzout_file_name = "weighted_map_data.mtz"
print ("Writing mtz for docking as",mtzout_file_name)
if not shift_map_origin:
shift_cart = results.shift_cart
print ("Origin of full map relative to mtz:", shift_cart)
dm.write_miller_array_file(mtz_object, filename=mtzout_file_name)
over_sampling_factor = results.over_sampling_factor
print ("Over-sampling factor for Fourier terms:",over_sampling_factor)
print ("Weighted volume of density:",results.masked_volume)
sys.stdout.flush()
if (__name__ == "__main__"):
run()
|
src/_dependencies/resolve.py | dry-python/dependencies | 175 | 11070502 | <gh_stars>100-1000
from _dependencies.exceptions import DependencyError
from _dependencies.state import _State
from _dependencies.trace import _Trace
class _Resolver:
def __init__(self, graph, cache, attrname):
self.graph = graph
self.state = _State(cache, attrname)
self.attrname = attrname
def resolve(self):
try:
return self.find()
except RecursionError:
message = _Trace(self.state)
message.add("Circle error found in definition of the dependency graph")
raise DependencyError(message) from None
def find(self):
while self.attrname not in self.state.cache:
spec = self.graph.get(self.state.current)
if self.is_optional(spec):
continue
if self.state.resolved(spec.required, spec.optional):
self.create(spec.factory, spec.args)
else:
self.match(spec.args)
return self.state.cache[self.attrname]
def is_optional(self, spec):
if spec is not None:
return False
if self.state.have_default:
self.state.pop()
return True
message = _Trace(self.state)
message.add(f"Can not resolve attribute {self.state.current!r}")
raise DependencyError(message)
def create(self, factory, args):
try:
self.state.store(factory(**self.state.kwargs(args)))
except DependencyError as error:
message = _Trace(self.state)
message.add(error)
raise DependencyError(message) from None
def match(self, args):
for arg, have_default in args.items(): # pragma: no branch
if self.state.should(arg, have_default):
self.state.add(arg, have_default)
break
|
model/ESTRNN-RAW.py | RunqiuBao/Event_ESTRNN | 180 | 11070577 | import torch
import torch.nn as nn
import torch.nn.functional as F
from thop import profile
from .arches import conv1x1, conv3x3, conv5x5, actFunc, SpaceToDepth
from .attention import CBAM
# Dense layer
class dense_layer(nn.Module):
def __init__(self, in_channels, growthRate, activation='relu'):
super(dense_layer, self).__init__()
self.conv = conv3x3(in_channels, growthRate)
self.act = actFunc(activation)
def forward(self, x):
out = self.act(self.conv(x))
out = torch.cat((x, out), 1)
return out
# Residual dense block
class RDB(nn.Module):
def __init__(self, in_channels, growthRate, num_layer, activation='relu'):
super(RDB, self).__init__()
in_channels_ = in_channels
modules = []
for i in range(num_layer):
modules.append(dense_layer(in_channels_, growthRate, activation))
in_channels_ += growthRate
self.dense_layers = nn.Sequential(*modules)
self.conv1x1 = conv1x1(in_channels_, in_channels)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv1x1(out)
out += x
return out
# Middle network of residual dense blocks
class RDNet(nn.Module):
def __init__(self, in_channels, growthRate, num_layer, num_blocks, activation='relu'):
super(RDNet, self).__init__()
self.num_blocks = num_blocks
self.RDBs = nn.ModuleList()
for i in range(num_blocks):
self.RDBs.append(RDB(in_channels, growthRate, num_layer, activation))
self.conv1x1 = conv1x1(num_blocks * in_channels, in_channels)
self.conv3x3 = conv3x3(in_channels, in_channels)
def forward(self, x):
out = []
h = x
for i in range(self.num_blocks):
h = self.RDBs[i](h)
out.append(h)
out = torch.cat(out, dim=1)
out = self.conv1x1(out)
out = self.conv3x3(out)
return out
# DownSampling module
class RDB_DS(nn.Module):
def __init__(self, in_channels, growthRate, num_layer, activation='relu'):
super(RDB_DS, self).__init__()
self.rdb = RDB(in_channels, growthRate, num_layer, activation)
self.down_sampling = conv5x5(in_channels, 2 * in_channels, stride=2)
def forward(self, x):
# x: n,c,h,w
x = self.rdb(x)
out = self.down_sampling(x)
return out
# Global spatio-temporal attention module
class GSA(nn.Module):
def __init__(self, para):
super(GSA, self).__init__()
self.n_feats = para.n_features
self.center = para.past_frames
self.num_ff = para.future_frames
self.num_fb = para.past_frames
self.related_f = self.num_ff + 1 + self.num_fb
self.F_f = nn.Sequential(
nn.Linear(2 * (5 * self.n_feats), 4 * (5 * self.n_feats)),
actFunc(para.activation),
nn.Linear(4 * (5 * self.n_feats), 2 * (5 * self.n_feats)),
nn.Sigmoid()
)
# out channel: 160
self.F_p = nn.Sequential(
conv1x1(2 * (5 * self.n_feats), 4 * (5 * self.n_feats)),
conv1x1(4 * (5 * self.n_feats), 2 * (5 * self.n_feats))
)
# condense layer
self.condense = conv1x1(2 * (5 * self.n_feats), 5 * self.n_feats)
# fusion layer
self.fusion = conv1x1(self.related_f * (5 * self.n_feats), self.related_f * (5 * self.n_feats))
def forward(self, hs):
# hs: [(n=4,c=80,h=64,w=64), ..., (n,c,h,w)]
self.nframes = len(hs)
f_ref = hs[self.center]
cor_l = []
for i in range(self.nframes):
if i != self.center:
cor = torch.cat([f_ref, hs[i]], dim=1)
w = F.adaptive_avg_pool2d(cor, (1, 1)).squeeze() # (n,c) : (4, 160)
if len(w.shape) == 1:
w = w.unsqueeze(dim=0)
w = self.F_f(w)
w = w.reshape(*w.shape, 1, 1)
cor = self.F_p(cor)
cor = self.condense(w * cor)
cor_l.append(cor)
cor_l.append(f_ref)
out = self.fusion(torch.cat(cor_l, dim=1))
return out
# RDB-based RNN cell
class RDBCell(nn.Module):
def __init__(self, para):
super(RDBCell, self).__init__()
self.activation = para.activation
self.n_feats = para.n_features
self.n_blocks = para.n_blocks
self.pixel_unshuffle = nn.Sequential(SpaceToDepth(block_size=2), conv1x1(in_channels=4, out_channels=4),
actFunc(act=self.activation))
self.downsampling = nn.Sequential(conv5x5(in_channels=1, out_channels=4, stride=2),
actFunc(act=self.activation))
self.F_B0 = nn.Sequential(conv5x5(2 * 4, 2 * self.n_feats, stride=1), actFunc(act=self.activation))
self.cbam = CBAM(2 * self.n_feats, reduction_ratio=4)
self.F_B1 = nn.Sequential(RDB(in_channels=2 * self.n_feats, growthRate=self.n_feats, num_layer=3,
activation=self.activation),
RDB(in_channels=2 * self.n_feats, growthRate=int(self.n_feats * 3 / 2), num_layer=3,
activation=self.activation)
)
self.F_B2 = RDB_DS(in_channels=2 * self.n_feats, growthRate=2 * self.n_feats, num_layer=3,
activation=self.activation)
self.F_R = RDNet(in_channels=(1 + 4) * self.n_feats, growthRate=2 * self.n_feats, num_layer=3,
num_blocks=self.n_blocks, activation=self.activation) # in: 80
# F_h: hidden state part
self.F_h = nn.Sequential(
conv3x3((1 + 4) * self.n_feats, self.n_feats),
RDB(in_channels=self.n_feats, growthRate=self.n_feats, num_layer=3, activation=self.activation),
conv3x3(self.n_feats, self.n_feats)
)
def forward(self, x, s_last):
out = torch.cat((self.downsampling(x), self.pixel_unshuffle(x)), dim=1)
out = self.F_B0(out)
out = self.cbam(out)
out = self.F_B1(out)
out = self.F_B2(out)
out = torch.cat([out, s_last], dim=1)
out = self.F_R(out)
s = self.F_h(out)
return out, s
# Reconstructor
class Reconstructor(nn.Module):
def __init__(self, para):
super(Reconstructor, self).__init__()
self.para = para
self.num_ff = para.future_frames
self.num_fb = para.past_frames
self.related_f = self.num_ff + 1 + self.num_fb
self.n_feats = para.n_features
self.model = nn.Sequential(
nn.ConvTranspose2d((5 * self.n_feats) * (self.related_f), 2 * self.n_feats, kernel_size=3, stride=2,
padding=1, output_padding=1),
nn.ConvTranspose2d(2 * self.n_feats, self.n_feats, kernel_size=3, stride=2, padding=1, output_padding=1),
conv5x5(self.n_feats, 1, stride=1)
)
def forward(self, x):
return self.model(x)
class Model(nn.Module):
"""
Efficient saptio-temporal recurrent neural network for RAW images (ESTRNN-RAW)
"""
def __init__(self, para):
super(Model, self).__init__()
self.para = para
self.n_feats = para.n_features
self.num_ff = para.future_frames
self.num_fb = para.past_frames
self.ds_ratio = 4
self.device = torch.device('cuda')
self.cell = RDBCell(para)
self.recons = Reconstructor(para)
self.fusion = GSA(para)
def forward(self, x, profile_flag=False):
if profile_flag:
return self.profile_forward(x)
outputs, hs = [], []
batch_size, frames, channels, height, width = x.shape
s_height = int(height / self.ds_ratio)
s_width = int(width / self.ds_ratio)
# forward h structure: (batch_size, channel, height, width)
s = torch.zeros(batch_size, self.n_feats, s_height, s_width).to(self.device)
for i in range(frames):
h, s = self.cell(x[:, i, :, :, :], s)
hs.append(h)
for i in range(self.num_fb, frames - self.num_ff):
out = self.fusion(hs[i - self.num_fb:i + self.num_ff + 1])
out = self.recons(out)
outputs.append(out.unsqueeze(dim=1))
return torch.cat(outputs, dim=1)
# For calculating GMACs
def profile_forward(self, x):
outputs, hs = [], []
batch_size, frames, channels, height, width = x.shape
s_height = int(height / self.ds_ratio)
s_width = int(width / self.ds_ratio)
s = torch.zeros(batch_size, self.n_feats, s_height, s_width).to(self.device)
for i in range(frames):
h, s = self.cell(x[:, i, :, :, :], s)
hs.append(h)
for i in range(self.num_fb + self.num_ff):
hs.append(torch.randn(*h.shape).to(self.device))
for i in range(self.num_fb, frames + self.num_fb):
out = self.fusion(hs[i - self.num_fb:i + self.num_ff + 1])
out = self.recons(out)
outputs.append(out.unsqueeze(dim=1))
return torch.cat(outputs, dim=1)
def feed(model, iter_samples):
inputs = iter_samples[0]
outputs = model(inputs)
return outputs
def cost_profile(model, H, W, seq_length):
x = torch.randn(1, seq_length, 1, H, W).cuda()
profile_flag = True
flops, params = profile(model, inputs=(x, profile_flag), verbose=False)
return flops / seq_length, params
|
tilecloud/layout/wms.py | camptocamp/tilecloud | 134 | 11070592 | <reponame>camptocamp/tilecloud<filename>tilecloud/layout/wms.py<gh_stars>100-1000
from typing import Any, Dict, Optional
from urllib.parse import urlencode
from tilecloud import TileCoord, TileGrid, TileLayout
class WMSTileLayout(TileLayout):
def __init__(
self,
url: str,
layers: str,
srs: str,
format: str,
tilegrid: TileGrid,
border: int = 0,
params: Optional[Dict[str, str]] = None,
) -> None:
if params is None:
params = {}
self.tilegrid = tilegrid
self.url = url
self.border = border
self.params = {
"LAYERS": layers,
"FORMAT": format,
"TRANSPARENT": "TRUE" if format == "image/png" else "FALSE",
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetMap",
"STYLES": "",
"SRS": srs,
}
self.params.update(params)
if params.get("FILTER", None) is not None:
self.params["FILTER"] = params["FILTER"].format(**params)
def filename(self, tilecoord: TileCoord, metadata: Optional[Any] = None) -> str:
metadata = {} if metadata is None else metadata
bbox = self.tilegrid.extent(tilecoord, self.border)
size = tilecoord.n * self.tilegrid.tile_size + 2 * self.border
params = self.params.copy()
for k, v in metadata.items():
if k.startswith("dimension_"):
params[k[len("dimension_") :]] = v
params["BBOX"] = f"{bbox[0]:f},{bbox[1]:f},{bbox[2]:f},{bbox[3]:f}"
params["WIDTH"] = str(size)
params["HEIGHT"] = str(size)
return self.url + "?" + urlencode(params)
|
language/serene/util.py | Xtuden-com/language | 1,199 | 11070593 | <filename>language/serene/util.py<gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for fever project."""
import contextlib
import json
import os
import pathlib
import random
import time
import unicodedata
from absl import logging
import tensorflow.compat.v2 as tf
Path = Union[Text, pathlib.PurePath]
def safe_path(path):
"""Return a path that is safe to write to by making intermediate dirs.
Args:
path: Path to ensure is safe
Returns:
The original path, converted to Text if it was a PurePath
"""
if isinstance(path, pathlib.PurePath):
path = str(path)
directory = os.path.dirname(path)
tf.io.gfile.makedirs(directory)
return path
def safe_copy(src, dst, overwrite=True):
"""Copy safely from src to dst by creating intermediate directories.
Args:
src: File to copy
dst: Where to copy to
overwrite: Whether to overwrite destination if it exists
"""
if isinstance(src, pathlib.PurePath):
src = str(src)
dst = safe_path(dst)
tf.io.gfile.copy(src, dst, overwrite=overwrite)
def safe_open(path, mode = 'r'):
"""Open the path safely. If in write model, make intermediate directories.
Args:
path: path to open
mode: mode to use
Returns:
file handler
"""
if isinstance(path, pathlib.PurePath):
path = str(path)
if 'w' in mode:
directory = os.path.dirname(path)
tf.io.gfile.makedirs(directory)
return tf.io.gfile.GFile(path, mode)
else:
return tf.io.gfile.GFile(path, mode)
def read_jsonlines(path):
"""Read jsonlines file from the path.
Args:
path: Path to json file
Returns:
List of objects decoded from each line
"""
entries = []
with safe_open(path) as f:
for line in f:
entries.append(json.loads(line))
return entries
def read_json(path):
"""Read json file from path and return.
Args:
path: Path of file to read
Returns:
JSON object from file
"""
with safe_open(path) as f:
return json.load(f)
def write_json(obj, path):
"""Write json object to the path.
Args:
obj: Object to write
path: path to write
"""
with safe_open(path, 'w') as f:
json.dump(obj, f)
def random_string(*, prefix = None):
"""Return a moderately randomized string, possibly with a prefix.
Helpful for generating random directories to write different models to
Args:
prefix: If not None, prefix this to the random string
Returns:
Random string, perhaps with a prefix
"""
# For the use case, this is large enough (unique experiment IDs)
postfix = str(random.randrange(1_000_000, 2_000_000))
if prefix is None:
return postfix
else:
return f'{prefix}-{postfix}'
@contextlib.contextmanager
def log_time(message):
"""Utility to easily log the runtime of a passage of code with a message.
EG.
with log_time('hello there'):
time.sleep(1)
# prints: hello there: 1 seconds
Args:
message: The message to prepend to the runtime of the code
Yields:
Nothing, but can be used with "with" statement.
"""
start = time.time()
yield
end = time.time()
logging.info('%s: %s seconds', message, end - start)
def normalize(wikipedia_url):
"""Unicode normalize the wikipedia title.
Args:
wikipedia_url: The original title
Returns:
The unicode normalized title
"""
return unicodedata.normalize('NFC', wikipedia_url)
def tf_to_str(text):
"""Convert a string-like input to python string.
Specifically, this is helpful when its unclear whether a function is
expected a tf.Tensor wrapping a string, a bytes object from unwrapping
from a tf.Tensor, or the input is already a normal python string.
Args:
text: A tf.Tensor containing a string, a bytes object that represents a
utf-8 string, or a string itself.
Returns:
Python string of the input
"""
if isinstance(text, tf.Tensor):
text = text.numpy()
if isinstance(text, bytes):
return text.decode('utf8')
elif isinstance(text, Text):
return text
else:
input_type = type(text)
raise TypeError(f'Unexpected type: {input_type} for input: {text}')
|
ocs_ci/ocs/pgsql.py | annagitel/ocs-ci | 130 | 11070638 | """
Postgresql workload class
"""
import logging
import random
import time
from prettytable import PrettyTable
from ocs_ci.ocs.benchmark_operator import BenchmarkOperator, BMO_NAME
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.utility import utils, templating
from ocs_ci.ocs.exceptions import (
UnexpectedBehaviour,
CommandFailed,
ResourceWrongStatusException,
)
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants
from subprocess import CalledProcessError
from ocs_ci.ocs.resources.pod import (
get_all_pods,
get_pod_obj,
get_operator_pods,
get_file_path,
get_pod_node,
)
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs
from ocs_ci.helpers.helpers import (
wait_for_resource_state,
create_unique_resource_name,
storagecluster_independent_check,
)
from ocs_ci.utility.spreadsheet.spreadsheet_api import GoogleSpreadSheetAPI
log = logging.getLogger(__name__)
class Postgresql(BenchmarkOperator):
"""
Postgresql workload operation
"""
def __init__(self, **kwargs):
"""
Initializer function
"""
super().__init__(**kwargs)
BenchmarkOperator.deploy(self)
def setup_postgresql(self, replicas, sc_name=None):
"""
Deploy postgres sql server
Args:
replicas (int): Number of postgresql pods to be deployed
Raises:
CommandFailed: If PostgreSQL server setup fails
"""
log.info("Deploying postgres database")
try:
pgsql_service = templating.load_yaml(constants.PGSQL_SERVICE_YAML)
pgsql_cmap = templating.load_yaml(constants.PGSQL_CONFIGMAP_YAML)
pgsql_sset = templating.load_yaml(constants.PGSQL_STATEFULSET_YAML)
pgsql_sset["spec"]["replicas"] = replicas
if storagecluster_independent_check():
pgsql_sset["spec"]["volumeClaimTemplates"][0]["spec"][
"storageClassName"
] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
if sc_name:
pgsql_sset["spec"]["volumeClaimTemplates"][0]["spec"][
"storageClassName"
] = sc_name
self.pgsql_service = OCS(**pgsql_service)
self.pgsql_service.create()
self.pgsql_cmap = OCS(**pgsql_cmap)
self.pgsql_cmap.create()
self.pgsql_sset = OCS(**pgsql_sset)
self.pgsql_sset.create()
self.pod_obj.wait_for_resource(
condition="Running",
selector="app=postgres",
resource_count=replicas,
timeout=3600,
)
except (CommandFailed, CalledProcessError) as cf:
log.error("Failed during setup of PostgreSQL server")
raise cf
self.pgsql_is_setup = True
log.info("Successfully deployed postgres database")
def create_pgbench_benchmark(
self,
replicas,
pgbench_name=None,
postgres_name=None,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
wait=True,
):
"""
Create pgbench benchmark pods
Args:
replicas (int): Number of pgbench pods to be deployed
pgbench_name (str): Name of pgbench bechmark
postgres_name (str): Name of postgres pod
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
wait (bool): On true waits till pgbench reaches Completed state
Returns:
List: pgbench pod objects list
"""
pg_obj_list = []
pgbench_name = pgbench_name if pgbench_name else "pgbench-benchmark"
postgres_name = postgres_name if postgres_name else "postgres"
for i in range(replicas):
log.info("Create resource file for pgbench workload")
pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
pg_data["metadata"]["name"] = f"{pgbench_name}" + f"{i}"
pg_data["spec"]["workload"]["args"]["databases"][0]["host"] = (
f"{postgres_name}-" + f"{i}" + ".postgres"
)
if clients is not None:
pg_data["spec"]["workload"]["args"]["clients"][0] = clients
if threads is not None:
pg_data["spec"]["workload"]["args"]["threads"] = threads
if transactions is not None:
pg_data["spec"]["workload"]["args"]["transactions"] = transactions
if scaling_factor is not None:
pg_data["spec"]["workload"]["args"]["scaling_factor"] = scaling_factor
pg_obj = OCS(**pg_data)
pg_obj_list.append(pg_obj)
pg_obj.create()
if wait:
# Confirm that expected pgbench pods are spinned
log.info("Searching the pgbench pods by its name pattern")
timeout = timeout if timeout else 300
for pgbench_pods in TimeoutSampler(
timeout,
replicas,
get_pod_name_by_pattern,
"pgbench-1-dbs-client",
BMO_NAME,
):
try:
if len(pgbench_pods) == replicas:
log.info(
f"Expected number of pgbench pods are " f"found: {replicas}"
)
break
except IndexError:
log.info(
f"Expected number of pgbench pods are {replicas} "
f"but only found {len(pgbench_pods)}"
)
return pg_obj_list
def get_postgres_pvc(self):
"""
Get all postgres pvc
Returns:
List: postgres pvc objects list
"""
return get_all_pvc_objs(namespace=BMO_NAME)
def get_postgres_pods(self):
"""
Get all postgres pods
Returns:
List: postgres pod objects list
"""
return get_all_pods(namespace=BMO_NAME, selector=["postgres"])
def get_pgbench_pods(self):
"""
Get all pgbench pods
Returns:
List: pgbench pod objects list
"""
return [
get_pod_obj(pod, BMO_NAME)
for pod in get_pod_name_by_pattern("pgbench", BMO_NAME)
]
def delete_pgbench_pods(self, pg_obj_list):
"""
Delete all pgbench pods on cluster
Returns:
bool: True if deleted, False otherwise
"""
log.info("Delete pgbench Benchmark")
for pgbench_pod in pg_obj_list:
pgbench_pod.delete(force=True)
def is_pgbench_running(self):
"""
Check if pgbench is running
Returns:
bool: True if pgbench is running; False otherwise
"""
pod_objs = self.get_pgbench_pods()
for pod in pod_objs:
if (
pod.get().get("status").get("containerStatuses")[0].get("state")
== "running"
):
log.info("One or more pgbench pods are in running state")
return True
else:
return False
break
def get_pgbench_status(self, pgbench_pod_name):
"""
Get pgbench status
Args:
pgbench_pod_name (str): Name of the pgbench pod
Returns:
str: state of pgbench pod (running/completed)
"""
pod_obj = get_pod_obj(pgbench_pod_name, namespace=BMO_NAME)
status = pod_obj.get().get("status").get("containerStatuses")[0].get("state")
return (
"running"
if list(status.keys())[0] == "running"
else status["terminated"]["reason"]
)
def wait_for_postgres_status(self, status=constants.STATUS_RUNNING, timeout=300):
"""
Wait for postgres pods status to reach running/completed
Args:
status (str): status to reach Running or Completed
timeout (int): Time in seconds to wait
"""
log.info(f"Waiting for postgres pods to be reach {status} state")
postgres_pod_objs = self.get_postgres_pods()
for postgres_pod_obj in postgres_pod_objs:
wait_for_resource_state(
resource=postgres_pod_obj, state=status, timeout=timeout
)
def wait_for_pgbench_status(self, status, timeout=None):
"""
Wait for pgbench benchmark pods status to reach running/completed
Args:
status (str): status to reach Running or Completed
timeout (int): Time in seconds to wait
"""
timeout = timeout if timeout else 900
# Wait for pg_bench pods to initialized and running
log.info(f"Waiting for pgbench pods to be reach {status} state")
pgbench_pod_objs = self.get_pgbench_pods()
for pgbench_pod_obj in pgbench_pod_objs:
try:
wait_for_resource_state(
resource=pgbench_pod_obj, state=status, timeout=timeout
)
except ResourceWrongStatusException:
output = run_cmd(f"oc logs {pgbench_pod_obj.name} -n {BMO_NAME}")
error_msg = f"{pgbench_pod_obj.name} did not reach to {status} state after {timeout} sec\n{output}"
log.error(error_msg)
raise UnexpectedBehaviour(error_msg)
def validate_pgbench_run(self, pgbench_pods, print_table=True):
"""
Validate pgbench run
Args:
pgbench pods (list): List of pgbench pods
Returns:
pg_output (list): pgbench outputs in list
"""
all_pgbench_pods_output = []
for pgbench_pod in pgbench_pods:
log.info(f"pgbench_client_pod===={pgbench_pod.name}====")
output = run_cmd(f"oc logs {pgbench_pod.name} -n {BMO_NAME}")
pg_output = utils.parse_pgsql_logs(output)
log.info("*******PGBench output log*********\n" f"{pg_output}")
# for data in all_pgbench_pods_output:
for data in pg_output:
run_id = list(data.keys())
latency_avg = data[run_id[0]]["latency_avg"]
if not latency_avg:
raise UnexpectedBehaviour(
"PGBench failed to run, " "no data found on latency_avg"
)
log.info(f"PGBench on {pgbench_pod.name} completed successfully")
all_pgbench_pods_output.append((pg_output, pgbench_pod.name))
if print_table:
pgbench_pod_table = PrettyTable()
pgbench_pod_table.field_names = [
"pod_name",
"scaling_factor",
"num_clients",
"num_threads",
"trans_client",
"actually_trans",
"latency_avg",
"lat_stddev",
"tps_incl",
"tps_excl",
]
for pgbench_pod_out in all_pgbench_pods_output:
for pod_output in pgbench_pod_out[0]:
for pod in pod_output.values():
pgbench_pod_table.add_row(
[
pgbench_pod_out[1],
pod["scaling_factor"],
pod["num_clients"],
pod["num_threads"],
pod["number_of_transactions_per_client"],
pod["number_of_transactions_actually_processed"],
pod["latency_avg"],
pod["lat_stddev"],
pod["tps_incl"],
pod["tps_excl"],
]
)
log.info(f"\n{pgbench_pod_table}\n")
return all_pgbench_pods_output
def get_pgsql_nodes(self):
"""
Get nodes that contain a pgsql app pod
Returns:
list: Cluster node OCP objects
"""
pgsql_pod_objs = self.pod_obj.get(
selector=constants.PGSQL_APP_LABEL, all_namespaces=True
)
log.info("Create a list of nodes that contain a pgsql app pod")
nodes_set = set()
for pod in pgsql_pod_objs["items"]:
log.info(
f"pod {pod['metadata']['name']} located on "
f"node {pod['spec']['nodeName']}"
)
nodes_set.add(pod["spec"]["nodeName"])
return list(nodes_set)
def get_pgbench_running_nodes(self):
"""
get nodes that contains pgbench pods
Returns:
list: List of pgbench running nodes
"""
pgbench_nodes = [
get_pod_node(pgbench_pod).name for pgbench_pod in self.get_pgbench_pods()
]
return list(set(pgbench_nodes))
def filter_pgbench_nodes_from_nodeslist(self, nodes_list):
"""
Filter pgbench nodes from the given nodes list
Args:
nodes_list (list): List of nodes to be filtered
Returns:
list: List of pgbench not running nodes from the given nodes list
"""
log.info("Get pgbench running nodes")
pgbench_nodes = self.get_pgbench_running_nodes()
log.info("Select a node where pgbench is not running from the nodes list")
log.info(f"nodes list: {nodes_list}")
log.info(f"pgbench running nodes list: {pgbench_nodes}")
filtered_nodes_list = list(set(nodes_list) - set(pgbench_nodes))
log.info(f"pgbench is not running on nodes: {filtered_nodes_list}")
return filtered_nodes_list
def respin_pgsql_app_pod(self):
"""
Respin the pgsql app pod
Returns:
pod status
"""
app_pod_list = get_operator_pods(constants.PGSQL_APP_LABEL, BMO_NAME)
app_pod = app_pod_list[random.randint(0, len(app_pod_list) - 1)]
log.info(f"respin pod {app_pod.name}")
app_pod.delete(wait=True, force=False)
wait_for_resource_state(
resource=app_pod, state=constants.STATUS_RUNNING, timeout=300
)
def get_pgbech_pod_status_table(self, pgbench_pods):
"""
Get pgbench pod data and print results on a table
Args:
pgbench pods (list): List of pgbench pods
"""
pgbench_pod_table = PrettyTable()
pgbench_pod_table.field_names = [
"pod_name",
"scaling_factor",
"num_clients",
"num_threads",
"trans_client",
"actually_trans",
"latency_avg",
"lat_stddev",
"tps_incl",
"tps_excl",
]
for pgbench_pod in pgbench_pods:
output = run_cmd(f"oc logs {pgbench_pod.name} -n {BMO_NAME}")
pg_output = utils.parse_pgsql_logs(output)
for pod_output in pg_output:
for pod in pod_output.values():
pgbench_pod_table.add_row(
[
pgbench_pod.name,
pod["scaling_factor"],
pod["num_clients"],
pod["num_threads"],
pod["number_of_transactions_per_client"],
pod["number_of_transactions_actually_processed"],
pod["latency_avg"],
pod["lat_stddev"],
pod["tps_incl"],
pod["tps_excl"],
]
)
log.info(f"\n{pgbench_pod_table}\n")
def export_pgoutput_to_googlesheet(self, pg_output, sheet_name, sheet_index):
"""
Collect pgbench output to google spreadsheet
Args:
pg_output (list): pgbench outputs in list
sheet_name (str): Name of the sheet
sheet_index (int): Index of sheet
"""
# Collect data and export to Google doc spreadsheet
g_sheet = GoogleSpreadSheetAPI(sheet_name=sheet_name, sheet_index=sheet_index)
log.info("Exporting pgoutput data to google spreadsheet")
for pgbench_pod in range(len(pg_output)):
for run in range(len(pg_output[pgbench_pod][0])):
run_id = list(pg_output[pgbench_pod][0][run].keys())[0]
lat_avg = pg_output[pgbench_pod][0][run][run_id]["latency_avg"]
lat_stddev = pg_output[pgbench_pod][0][run][run_id]["lat_stddev"]
tps_incl = pg_output[pgbench_pod][0][run][run_id]["lat_stddev"]
tps_excl = pg_output[pgbench_pod][0][run][run_id]["tps_excl"]
g_sheet.insert_row(
[
f"Pgbench-pod{pg_output[pgbench_pod][1]}-run-{run_id}",
int(lat_avg),
int(lat_stddev),
int(tps_incl),
int(tps_excl),
],
2,
)
g_sheet.insert_row(
["", "latency_avg", "lat_stddev", "lat_stddev", "tps_excl"], 2
)
# Capturing versions(OCP, OCS and Ceph) and test run name
g_sheet.insert_row(
[
f"ocp_version:{utils.get_cluster_version()}",
f"ocs_build_number:{utils.get_ocs_build_number()}",
f"ceph_version:{utils.get_ceph_version()}",
f"test_run_name:{utils.get_testrun_name()}",
],
2,
)
def cleanup(self):
"""
Clean up
"""
log.info("Deleting postgres pods and configuration")
if self.pgsql_is_setup:
self.pgsql_sset.delete()
self.pgsql_cmap.delete()
self.pgsql_service.delete()
log.info("Deleting pgbench pods")
pods_obj = self.get_pgbench_pods()
for pod in pods_obj:
pod.delete()
pod.ocp.wait_for_delete(pod.name)
log.info("Deleting benchmark operator configuration")
BenchmarkOperator.cleanup(self)
def attach_pgsql_pod_to_claim_pvc(
self, pvc_objs, postgres_name, run_benchmark=True, pgbench_name=None
):
"""
Attaches pgsql pod to created claim PVC
Args:
pvc_objs (list): List of PVC objs which needs to attached to pod
postgres_name (str): Name of the postgres pod
run_benchmark (bool): On true, runs pgbench benchmark on postgres pod
pgbench_name (str): Name of pgbench benchmark
Returns:
pgsql_obj_list (list): List of pod objs created
"""
pgsql_obj_list = []
for pvc_obj in pvc_objs:
try:
pgsql_sset = templating.load_yaml(constants.PGSQL_STATEFULSET_YAML)
del pgsql_sset["spec"]["volumeClaimTemplates"]
pgsql_sset["metadata"]["name"] = (
f"{postgres_name}" + f"{pvc_objs.index(pvc_obj)}"
)
pgsql_sset["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][
0
]["name"] = pvc_obj.name
pgsql_sset["spec"]["template"]["spec"]["volumes"] = [
{
"name": f"{pvc_obj.name}",
"persistentVolumeClaim": {"claimName": f"{pvc_obj.name}"},
}
]
pgsql_sset = OCS(**pgsql_sset)
pgsql_sset.create()
pgsql_obj_list.append(pgsql_sset)
self.wait_for_postgres_status(
status=constants.STATUS_RUNNING, timeout=300
)
if run_benchmark:
pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
pg_data["metadata"]["name"] = (
f"{pgbench_name}" + f"{pvc_objs.index(pvc_obj)}"
if pgbench_name
else create_unique_resource_name("benchmark", "pgbench")
)
pg_data["spec"]["workload"]["args"]["databases"][0]["host"] = (
f"{postgres_name}"
+ f"{pvc_objs.index(pvc_obj)}-0"
+ ".postgres"
)
pg_obj = OCS(**pg_data)
pg_obj.create()
pgsql_obj_list.append(pg_obj)
wait_time = 120
log.info(f"Wait {wait_time} seconds before mounting pod")
time.sleep(wait_time)
except (CommandFailed, CalledProcessError) as cf:
log.error("Failed during creation of postgres pod")
raise cf
if run_benchmark:
log.info("Checking all pgbench benchmark reached Completed state")
self.wait_for_pgbench_status(
status=constants.STATUS_COMPLETED, timeout=1800
)
return pgsql_obj_list
def get_postgres_used_file_space(self, pod_obj_list):
"""
Get the used file space on a mount point
Args:
pod_obj_list (POD): List of pod objects
Returns:
list: List of pod object
"""
# Get the used file space on a mount point
for pod_obj in pod_obj_list:
filepath = get_file_path(pod_obj, "pgdata")
filespace = pod_obj.exec_cmd_on_pod(
command=f"du -sh {filepath}", out_yaml_format=False
)
filespace = filespace.split()[0]
pod_obj.filespace = filespace
return pod_obj_list
|
xknx/io/const.py | iligiddi/xknx | 179 | 11070645 | """KNX Constants used within io."""
DEFAULT_MCAST_GRP = "172.16.17.32"
DEFAULT_MCAST_PORT = 3671
CONNECTION_ALIVE_TIME = 120
CONNECTIONSTATE_REQUEST_TIMEOUT = 10
HEARTBEAT_RATE = CONNECTION_ALIVE_TIME - (CONNECTIONSTATE_REQUEST_TIMEOUT * 5)
|
checkov/common/checks_infra/solvers/attribute_solvers/base_attribute_solver.py | vangundy-jason-pfg/checkov | 4,013 | 11070653 | import concurrent.futures
import re
from typing import List, Tuple, Dict, Any, Optional, Pattern
from networkx import DiGraph
from checkov.common.graph.checks_infra.enums import SolverType
from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
from concurrent.futures import ThreadPoolExecutor
WILDCARD_PATTERN = re.compile(r"(\S+[.][*][.]*)+")
class BaseAttributeSolver(BaseSolver):
operator = ""
def __init__(self, resource_types: List[str], attribute: Optional[str], value: Any) -> None:
super().__init__(SolverType.ATTRIBUTE)
self.resource_types = resource_types
self.attribute = attribute
self.value = value
def run(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
executer = ThreadPoolExecutor()
jobs = []
passed_vertices: List[Dict[str, Any]] = []
failed_vertices: List[Dict[str, Any]] = []
for _, data in graph_connector.nodes(data=True):
jobs.append(executer.submit(self._process_node, data, passed_vertices, failed_vertices))
concurrent.futures.wait(jobs)
return passed_vertices, failed_vertices
def get_operation(self, vertex: Dict[str, Any]) -> bool:
if self.attribute and re.match(WILDCARD_PATTERN, self.attribute):
attribute_patterns = self.get_attribute_patterns(self.attribute)
attribute_matches = [
attr
for attr in vertex
if any(re.match(attribute_pattern, attr) for attribute_pattern in attribute_patterns)
]
if attribute_matches:
return self.resource_type_pred(vertex, self.resource_types) and any(
self._get_operation(vertex=vertex, attribute=attr) for attr in attribute_matches
)
return self.resource_type_pred(vertex, self.resource_types) and self._get_operation(
vertex=vertex, attribute=self.attribute
)
def _get_operation(self, vertex: Dict[str, Any], attribute: Optional[str]) -> bool:
raise NotImplementedError
def _process_node(
self, data: Dict[str, Any], passed_vartices: List[Dict[str, Any]], failed_vertices: List[Dict[str, Any]]
) -> None:
if not self.resource_type_pred(data, self.resource_types):
return
if self.get_operation(vertex=data):
passed_vartices.append(data)
else:
failed_vertices.append(data)
@staticmethod
def get_attribute_patterns(attribute: str) -> Tuple[Pattern[str], Pattern[str]]:
index_pattern = r"[\d]+"
split_by_dots = attribute.split(".")
pattern_parts = []
pattern_parts_without_index = []
for attr_part in split_by_dots:
if attr_part == "*":
pattern_parts.append(index_pattern)
else:
attr_part_pattern = f"({attr_part})"
pattern_parts.append(attr_part_pattern)
pattern_parts_without_index.append(attr_part_pattern)
pattern = "[.]".join(pattern_parts)
pattern_with_index = re.compile(pattern)
pattern = "[.]".join(pattern_parts_without_index)
pattern_without_index = re.compile(pattern)
return pattern_with_index, pattern_without_index
|
code/test/AiSpeech/lib/aispeech/api_aispeech.py | FACEGOOD/Audio2BlendshapeWeights | 266 | 11070668 | <filename>code/test/AiSpeech/lib/aispeech/api_aispeech.py
# -*- coding: utf-8 -*-
# !/usr/bin/env python36
"""
tgshg/aispeech/api_aispeech.py
:copyright:facegood © 2019 by the tang.
url: https://help.tgenie.cn/#/ba_token
"""
import requests
import hashlib
import time
import json
class AiSpeech(object):
def __init__(self,productId,publicKey,secretKey,productIdChat=None,token = None,expireTimeSecs=5):
self.productId = productId
self.publicKey = publicKey
self.secretKey = secretKey
if productIdChat is None:
self.productIdChat = productId
else:
self.productIdChat = productIdChat
self.token = token
self.expireTime = None
# 过期时间 5秒前更新token
self.expireTimeSecs = expireTimeSecs
def update_token(self,url = None):
if url is None:
url = "https://api.talkinggenie.com/api/v2/public/authToken"
headers = {'Content-Type': 'application/json;charset=UTF-8'}
requests_body = {}
timeStamp = str(int(time.time()*1000)) # 1565589691183
sign = hashlib.md5((self.publicKey+self.productId+timeStamp+self.secretKey).encode('utf-8')).hexdigest()
requests_body['productId'] = self.productId
requests_body['publicKey'] = self.publicKey
requests_body['sign'] = sign
requests_body['timeStamp']=timeStamp
r_token = requests.post(url, headers = headers, data=json.dumps(requests_body))
if r_token.status_code != 200:
print("error requests post url:",url,"\nheaders:",headers,"\nrequest body:",requests_body,"\ncode:",r_token.status_code)
r_data = json.loads(r_token.text)
if r_data['code'] == '200':
self.token = r_data['result']['token']
self.expireTime = int(r_data['result']['expireTime'])
return self.expireTime
else:
print("ERROR:",r_data)
return False
def chat(self,url = None,text=None):
if url is None:
url = "https://api.talkinggenie.com/api/v1/ba"
# 判断是否过期
if self.expireTime < int((time.time()+self.expireTimeSecs)*1000):
self.update_token()
headers = {}
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['X-AISPEECH-TOKEN']= self.token
headers['X-AISPEECH-PRODUCT-ID'] = self.productId
requests_body = {}
query = {
"type":"text",
"text":text
}
context= {
"session":"1",
"recordId":"100"
}
dialog={
"productId":self.productIdChat
}
output = {
"type":"text"
}
requests_body['query'] = query
requests_body['dialog'] = dialog
requests_body['context'] = context
requests_body['output'] = output
# print("text chat***********:",json.dumps(requests_body))
r_chat = requests.post(url, headers = headers, data=json.dumps(requests_body))
if r_chat.status_code != 200:
print("error requests post url:",url,"\nheaders:",headers,"\nrequest body:",requests_body,"\ncode:",r.status_code)
return False
r_data = json.loads(r_chat.text)
try:
if r_data["status"] == 200:
question = r_data['result']['query']
answer = r_data['result']['answer']
if question == text:
return answer
except Exception as error:
print(error)
print("ERROR:query is not this answer\n","query is:",text,"request is:",r_data)
return False
def tts(self,url = None,text=None,speaker="zsmeif"):
if url is None:
url = "https://api.talkinggenie.com/api/v1/ba/tts"
# 判断是否过期
if self.expireTime < int((time.time()+self.expireTimeSecs)*1000):
self.update_token()
headers = {}
headers['Content-Type'] = 'application/json;charset=UTF-8'
headers['X-AISPEECH-TOKEN']= self.token
headers['X-AISPEECH-PRODUCT-ID'] = self.productId
requests_body = {}
tts = {
"speed": 1.1,
"volume": 100,
"voiceId": speaker,
"enableRealTimeFeedback": False,
"text": text
}
audio = {
"audioType": "wav",
"sampleRate": "16000",
"sampleBytes": 2
}
requests_body['tts'] = tts
requests_body['audio'] = audio
requests_body['type'] = "tts"
r_tts = requests.post(url, headers = headers, data=json.dumps(requests_body))
if r_tts.status_code == 200:
return r_tts.content
else:
print("ERROR:tts is failed\n","text is:",text)
def dm_tts(self,url = None,text=None,speaker="zsmeif"):
if url is None:
url = "https://api.talkinggenie.com/api/v1/ba"
headers = {}
headers['Content-Type'] = 'application/json;charset=UTF-8'
headers['X-AISPEECH-TOKEN']= self.token
headers['X-AISPEECH-PRODUCT-ID'] = self.productId
requests_body = {
"query":{
"type":"text",
"text":text
},
"tts":{
"speed": 1.1,
"volume": 100,
"voiceId": speaker
},
"dialog":{
"productId":self.productIdChat
},
"output":{
"audio": {
"audioType": "wav",
"channel":1,
"sampleRate": "16000",
"sampleBytes": 2
},
"type": "tts"
}
}
r_dm_tts = requests.post(url, headers = headers, data=json.dumps(requests_body))
# print(r_dm_tts)
return r_dm_tts
# if r_dm_tts.status_code == 200:
# return r_dm_tts.content
# else:
# print("ERROR:tts is failed\n","text is:",text)
if __name__ == "__main__":
productId = "914008290"
publicKey = "<KEY>"
secretkey ="<KEY>"
productIdChat = "914008349"
ai = AiSpeech(productId,publicKey,secretkey,productIdChat)
ai.update_token()
print(ai.token)
dm_tts = ai.dm_tts(text = "你是谁?")
dm_tts.status_code
dm_tts
chat_text = ai.chat(text="你是谁?")
b_wav_data = ai.tts(text = chat_text)
text_self = "您好!我是子书美,来自数字虚拟世界。我可以告诉你你想知道的一切,想与我面对面聊天吗?12月5日到8日来第十届中国国际新媒体短片节找我!"
b_wav_data = ai.tts(text = text_self,speaker = "lchuam")
import wave
wav_path = "G:/test.wav"
f = open(wav_path,"wb")
f.write(b_wav_data)
f.close()
signal = b_wav_data[44:]
|
apps/user/serializers.py | PyCN/BlogBackendProject | 335 | 11070669 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2017/12/7 上午11:49
# @Author : LennonChin
# @Email : <EMAIL>
# @File : serializers.py
# @Software: PyCharm
import re
from datetime import datetime, timedelta
from rest_framework import serializers
from .models import GuestProfile, EmailVerifyRecord
from base.const import REGEX_EMAIL
class GuestSerializer(serializers.ModelSerializer):
is_blogger = serializers.SerializerMethodField()
def get_is_blogger(self, guest):
if guest.email:
return guest.email == '243316474@<EMAIL>'
else:
return False
class Meta:
model = GuestProfile
fields = ('id', 'nick_name', 'avatar', 'is_blogger')
class EmailSerializer(serializers.Serializer):
nick_name = serializers.CharField(max_length=50, min_length=1, required=True, label='昵称')
email = serializers.EmailField(required=True, label='邮箱')
def validate_email(self, email):
"""
验证邮箱
:param email:
:return:
"""
# 验证邮箱是否合法
if not re.match(REGEX_EMAIL, email):
raise serializers.ValidationError("邮箱格式错误")
# 验证发送频率
ten_minutes_ago = datetime.now() - timedelta(hours=0, minutes=0, seconds=30)
if EmailVerifyRecord.objects.filter(send_time__gt=ten_minutes_ago, email=email):
raise serializers.ValidationError("请求发送过于频繁,请间隔30秒后重试")
return email
class EmailVerifySerializer(serializers.Serializer):
nick_name = serializers.CharField(max_length=50, min_length=1, required=True, label='昵称')
email = serializers.EmailField(required=True, label='邮箱')
code = serializers.CharField(max_length=4, min_length=4, required=False, label='验证码')
def validate_email(self, email):
"""
验证邮箱
:param email:
:return:
"""
# 验证邮箱是否合法
if not re.match(REGEX_EMAIL, email):
raise serializers.ValidationError("邮箱格式错误")
return email
|
geopyspark/tests/geotrellis/tiled_layer_tests/focal_test.py | geotrellis/geotrellis-python | 182 | 11070676 | <reponame>geotrellis/geotrellis-python
import numpy as np
import os
import pytest
import unittest
from geopyspark.geotrellis import SpatialKey, Extent, Tile, SpatialPartitionStrategy, HashPartitionStrategy
from geopyspark.geotrellis.layer import TiledRasterLayer
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis.constants import LayerType, Operation, Neighborhood
from geopyspark.geotrellis.neighborhood import Square, Annulus, Wedge, Circle, Nesw
class FocalTest(BaseTestClass):
cells = np.array([[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0]]])
tile = Tile.from_numpy_array(cells, -1.0)
layer = [(SpatialKey(0, 0), tile),
(SpatialKey(1, 0), tile),
(SpatialKey(0, 1), tile),
(SpatialKey(1, 1), tile)]
rdd = BaseTestClass.pysc.parallelize(layer)
extent = {'xmin': 0.0, 'ymin': 0.0, 'xmax': 33.0, 'ymax': 33.0}
layout = {'layoutCols': 2, 'layoutRows': 2, 'tileCols': 5, 'tileRows': 5}
metadata = {'cellType': 'float32ud-1.0',
'extent': extent,
'crs': '+proj=longlat +datum=WGS84 +no_defs ',
'bounds': {
'minKey': {'col': 0, 'row': 0},
'maxKey': {'col': 1, 'row': 1}},
'layoutDefinition': {
'extent': extent,
'tileLayout': {'tileCols': 5, 'tileRows': 5, 'layoutCols': 2, 'layoutRows': 2}}}
raster_rdd = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, metadata)
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
def test_focal_sum_square(self):
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=Neighborhood.SQUARE,
param_1=1.0,
partition_strategy=HashPartitionStrategy())
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 6)
def test_focal_sum_wedge(self):
neighborhood = Wedge(radius=1.0, start_angle=0.0, end_angle=180.0)
self.assertEqual(str(neighborhood), repr(neighborhood))
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=neighborhood,
partition_strategy=HashPartitionStrategy(2))
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 3)
def test_focal_sum_circle(self):
neighborhood = Circle(radius=1.0)
self.assertEqual(str(neighborhood), repr(neighborhood))
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=neighborhood,
partition_strategy=SpatialPartitionStrategy(2))
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 4)
def test_focal_sum_nesw(self):
neighborhood = Nesw(extent=1.0)
self.assertEqual(str(neighborhood), repr(neighborhood))
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=neighborhood,
partition_strategy=SpatialPartitionStrategy())
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 4)
def test_focal_sum_annulus(self):
neighborhood = Annulus(inner_radius=0.5, outer_radius=1.5)
self.assertEqual(str(neighborhood), repr(neighborhood))
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=neighborhood)
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 5.0)
def test_square(self):
neighborhood = Square(extent=1.0)
self.assertEqual(str(neighborhood), repr(neighborhood))
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=neighborhood)
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 6.0)
def test_focal_sum_int(self):
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=Neighborhood.SQUARE,
param_1=1)
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 6)
def test_focal_sum_square_with_square(self):
square = Square(extent=1.0)
result = self.raster_rdd.focal(
operation=Operation.SUM,
neighborhood=square)
self.assertTrue(result.to_numpy_rdd().first()[1].cells[0][1][0] >= 6)
def test_focal_min(self):
result = self.raster_rdd.focal(operation=Operation.MIN, neighborhood=Neighborhood.ANNULUS,
param_1=2.0, param_2=1.0)
self.assertEqual(result.to_numpy_rdd().first()[1].cells[0][0][0], -1)
def test_focal_min_annulus(self):
annulus = Annulus(inner_radius=2.0, outer_radius=1.0)
result = self.raster_rdd.focal(operation=Operation.MIN, neighborhood=annulus)
self.assertEqual(result.to_numpy_rdd().first()[1].cells[0][0][0], -1)
def test_focal_min_int(self):
result = self.raster_rdd.focal(operation=Operation.MIN, neighborhood=Neighborhood.ANNULUS,
param_1=2, param_2=1)
self.assertEqual(result.to_numpy_rdd().first()[1].cells[0][0][0], -1)
def test_tobler(self):
result = self.raster_rdd.tobler()
if __name__ == "__main__":
unittest.main()
BaseTestClass.pysc.stop()
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2020_09_01/models/_models.py | rsdoherty/azure-sdk-for-python | 2,728 | 11070691 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class Alias(msrest.serialization.Model):
"""The alias type.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The alias name.
:type name: str
:param paths: The paths for an alias.
:type paths: list[~azure.mgmt.resource.policy.v2020_09_01.models.AliasPath]
:param type: The type of the alias. Possible values include: "NotSpecified", "PlainText",
"Mask".
:type type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.AliasType
:param default_path: The default path for an alias.
:type default_path: str
:param default_pattern: The default pattern for an alias.
:type default_pattern: ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPattern
:ivar default_metadata: The default alias path metadata. Applies to the default path and to any
alias path that doesn't have metadata.
:vartype default_metadata: ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathMetadata
"""
_validation = {
'default_metadata': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'paths': {'key': 'paths', 'type': '[AliasPath]'},
'type': {'key': 'type', 'type': 'str'},
'default_path': {'key': 'defaultPath', 'type': 'str'},
'default_pattern': {'key': 'defaultPattern', 'type': 'AliasPattern'},
'default_metadata': {'key': 'defaultMetadata', 'type': 'AliasPathMetadata'},
}
def __init__(
self,
**kwargs
):
super(Alias, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.paths = kwargs.get('paths', None)
self.type = kwargs.get('type', None)
self.default_path = kwargs.get('default_path', None)
self.default_pattern = kwargs.get('default_pattern', None)
self.default_metadata = None
class AliasPath(msrest.serialization.Model):
"""The type of the paths for alias.
Variables are only populated by the server, and will be ignored when sending a request.
:param path: The path of an alias.
:type path: str
:param api_versions: The API versions.
:type api_versions: list[str]
:param pattern: The pattern for an alias path.
:type pattern: ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPattern
:ivar metadata: The metadata of the alias path. If missing, fall back to the default metadata
of the alias.
:vartype metadata: ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathMetadata
"""
_validation = {
'metadata': {'readonly': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'pattern': {'key': 'pattern', 'type': 'AliasPattern'},
'metadata': {'key': 'metadata', 'type': 'AliasPathMetadata'},
}
def __init__(
self,
**kwargs
):
super(AliasPath, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
self.api_versions = kwargs.get('api_versions', None)
self.pattern = kwargs.get('pattern', None)
self.metadata = None
class AliasPathMetadata(msrest.serialization.Model):
"""AliasPathMetadata.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of the token that the alias path is referring to. Possible values include:
"NotSpecified", "Any", "String", "Object", "Array", "Integer", "Number", "Boolean".
:vartype type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathTokenType
:ivar attributes: The attributes of the token that the alias path is referring to. Possible
values include: "None", "Modifiable".
:vartype attributes: str or ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathAttributes
"""
_validation = {
'type': {'readonly': True},
'attributes': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AliasPathMetadata, self).__init__(**kwargs)
self.type = None
self.attributes = None
class AliasPattern(msrest.serialization.Model):
"""The type of the pattern for an alias path.
:param phrase: The alias pattern phrase.
:type phrase: str
:param variable: The alias pattern variable.
:type variable: str
:param type: The type of alias pattern. Possible values include: "NotSpecified", "Extract".
:type type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPatternType
"""
_attribute_map = {
'phrase': {'key': 'phrase', 'type': 'str'},
'variable': {'key': 'variable', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AliasPattern, self).__init__(**kwargs)
self.phrase = kwargs.get('phrase', None)
self.variable = kwargs.get('variable', None)
self.type = kwargs.get('type', None)
class DataEffect(msrest.serialization.Model):
"""The data effect definition.
:param name: The data effect name.
:type name: str
:param details_schema: The data effect details schema.
:type details_schema: any
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'details_schema': {'key': 'detailsSchema', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(DataEffect, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.details_schema = kwargs.get('details_schema', None)
class DataManifestCustomResourceFunctionDefinition(msrest.serialization.Model):
"""The custom resource function definition.
:param name: The function name as it will appear in the policy rule. eg - 'vault'.
:type name: str
:param fully_qualified_resource_type: The fully qualified control plane resource type that this
function represents. eg - 'Microsoft.KeyVault/vaults'.
:type fully_qualified_resource_type: str
:param default_properties: The top-level properties that can be selected on the function's
output. eg - [ "name", "location" ] if vault().name and vault().location are supported.
:type default_properties: list[str]
:param allow_custom_properties: A value indicating whether the custom properties within the
property bag are allowed. Needs api-version to be specified in the policy rule eg -
vault('2019-06-01').
:type allow_custom_properties: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'fully_qualified_resource_type': {'key': 'fullyQualifiedResourceType', 'type': 'str'},
'default_properties': {'key': 'defaultProperties', 'type': '[str]'},
'allow_custom_properties': {'key': 'allowCustomProperties', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(DataManifestCustomResourceFunctionDefinition, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.fully_qualified_resource_type = kwargs.get('fully_qualified_resource_type', None)
self.default_properties = kwargs.get('default_properties', None)
self.allow_custom_properties = kwargs.get('allow_custom_properties', None)
class DataPolicyManifest(msrest.serialization.Model):
"""The data policy manifest.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the data policy manifest.
:vartype id: str
:ivar name: The name of the data policy manifest (it's the same as the Policy Mode).
:vartype name: str
:ivar type: The type of the resource (Microsoft.Authorization/dataPolicyManifests).
:vartype type: str
:param namespaces: The list of namespaces for the data policy manifest.
:type namespaces: list[str]
:param policy_mode: The policy mode of the data policy manifest.
:type policy_mode: str
:param is_built_in_only: A value indicating whether policy mode is allowed only in built-in
definitions.
:type is_built_in_only: bool
:param resource_type_aliases: An array of resource type aliases.
:type resource_type_aliases:
list[~azure.mgmt.resource.policy.v2020_09_01.models.ResourceTypeAliases]
:param effects: The effect definition.
:type effects: list[~azure.mgmt.resource.policy.v2020_09_01.models.DataEffect]
:param field_values: The non-alias field accessor values that can be used in the policy rule.
:type field_values: list[str]
:param standard: The standard resource functions (subscription and/or resourceGroup).
:type standard: list[str]
:param custom: An array of data manifest custom resource definition.
:type custom:
list[~azure.mgmt.resource.policy.v2020_09_01.models.DataManifestCustomResourceFunctionDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'namespaces': {'key': 'properties.namespaces', 'type': '[str]'},
'policy_mode': {'key': 'properties.policyMode', 'type': 'str'},
'is_built_in_only': {'key': 'properties.isBuiltInOnly', 'type': 'bool'},
'resource_type_aliases': {'key': 'properties.resourceTypeAliases', 'type': '[ResourceTypeAliases]'},
'effects': {'key': 'properties.effects', 'type': '[DataEffect]'},
'field_values': {'key': 'properties.fieldValues', 'type': '[str]'},
'standard': {'key': 'properties.resourceFunctions.standard', 'type': '[str]'},
'custom': {'key': 'properties.resourceFunctions.custom', 'type': '[DataManifestCustomResourceFunctionDefinition]'},
}
def __init__(
self,
**kwargs
):
super(DataPolicyManifest, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.namespaces = kwargs.get('namespaces', None)
self.policy_mode = kwargs.get('policy_mode', None)
self.is_built_in_only = kwargs.get('is_built_in_only', None)
self.resource_type_aliases = kwargs.get('resource_type_aliases', None)
self.effects = kwargs.get('effects', None)
self.field_values = kwargs.get('field_values', None)
self.standard = kwargs.get('standard', None)
self.custom = kwargs.get('custom', None)
class DataPolicyManifestListResult(msrest.serialization.Model):
"""List of data policy manifests.
:param value: An array of data policy manifests.
:type value: list[~azure.mgmt.resource.policy.v2020_09_01.models.DataPolicyManifest]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DataPolicyManifest]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataPolicyManifestListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.resource.policy.v2020_09_01.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.resource.policy.v2020_09_01.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponse]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal ID of the resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of the resource identity.
:vartype tenant_id: str
:param type: The identity type. This is the only required field when adding a system assigned
identity to a resource. Possible values include: "SystemAssigned", "None".
:type type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
class NonComplianceMessage(msrest.serialization.Model):
"""A message that describes why a resource is non-compliant with the policy. This is shown in 'deny' error messages and on resource's non-compliant compliance results.
All required parameters must be populated in order to send to Azure.
:param message: Required. A message that describes why a resource is non-compliant with the
policy. This is shown in 'deny' error messages and on resource's non-compliant compliance
results.
:type message: str
:param policy_definition_reference_id: The policy definition reference ID within a policy set
definition the message is intended for. This is only applicable if the policy assignment
assigns a policy set definition. If this is not provided the message applies to all policies
assigned by this policy assignment.
:type policy_definition_reference_id: str
"""
_validation = {
'message': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'policy_definition_reference_id': {'key': 'policyDefinitionReferenceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NonComplianceMessage, self).__init__(**kwargs)
self.message = kwargs['message']
self.policy_definition_reference_id = kwargs.get('policy_definition_reference_id', None)
class ParameterDefinitionsValue(msrest.serialization.Model):
"""The definition of a parameter that can be provided to the policy.
:param type: The data type of the parameter. Possible values include: "String", "Array",
"Object", "Boolean", "Integer", "Float", "DateTime".
:type type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.ParameterType
:param allowed_values: The allowed values for the parameter.
:type allowed_values: list[any]
:param default_value: The default value for the parameter if no value is provided.
:type default_value: any
:param metadata: General metadata for the parameter.
:type metadata:
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterDefinitionsValueMetadata
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'allowed_values': {'key': 'allowedValues', 'type': '[object]'},
'default_value': {'key': 'defaultValue', 'type': 'object'},
'metadata': {'key': 'metadata', 'type': 'ParameterDefinitionsValueMetadata'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinitionsValue, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.allowed_values = kwargs.get('allowed_values', None)
self.default_value = kwargs.get('default_value', None)
self.metadata = kwargs.get('metadata', None)
class ParameterDefinitionsValueMetadata(msrest.serialization.Model):
"""General metadata for the parameter.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, any]
:param display_name: The display name for the parameter.
:type display_name: str
:param description: The description of the parameter.
:type description: str
:param strong_type: Used when assigning the policy definition through the portal. Provides a
context aware list of values for the user to choose from.
:type strong_type: str
:param assign_permissions: Set to true to have Azure portal create role assignments on the
resource ID or resource scope value of this parameter during policy assignment. This property
is useful in case you wish to assign permissions outside the assignment scope.
:type assign_permissions: bool
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'strong_type': {'key': 'strongType', 'type': 'str'},
'assign_permissions': {'key': 'assignPermissions', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinitionsValueMetadata, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.strong_type = kwargs.get('strong_type', None)
self.assign_permissions = kwargs.get('assign_permissions', None)
class ParameterValuesValue(msrest.serialization.Model):
"""The value of a parameter.
:param value: The value of the parameter.
:type value: any
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ParameterValuesValue, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PolicyAssignment(msrest.serialization.Model):
"""The policy assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the policy assignment.
:vartype id: str
:ivar type: The type of the policy assignment.
:vartype type: str
:ivar name: The name of the policy assignment.
:vartype name: str
:param location: The location of the policy assignment. Only required when utilizing managed
identity.
:type location: str
:param identity: The managed identity associated with the policy assignment.
:type identity: ~azure.mgmt.resource.policy.v2020_09_01.models.Identity
:param display_name: The display name of the policy assignment.
:type display_name: str
:param policy_definition_id: The ID of the policy definition or policy set definition being
assigned.
:type policy_definition_id: str
:ivar scope: The scope for the policy assignment.
:vartype scope: str
:param not_scopes: The policy's excluded scopes.
:type not_scopes: list[str]
:param parameters: The parameter values for the assigned policy rule. The keys are the
parameter names.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterValuesValue]
:param description: This message will be part of response in case of policy violation.
:type description: str
:param metadata: The policy assignment metadata. Metadata is an open ended object and is
typically a collection of key value pairs.
:type metadata: any
:param enforcement_mode: The policy assignment enforcement mode. Possible values are Default
and DoNotEnforce. Possible values include: "Default", "DoNotEnforce". Default value: "Default".
:type enforcement_mode: str or ~azure.mgmt.resource.policy.v2020_09_01.models.EnforcementMode
:param non_compliance_messages: The messages that describe why a resource is non-compliant with
the policy.
:type non_compliance_messages:
list[~azure.mgmt.resource.policy.v2020_09_01.models.NonComplianceMessage]
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'scope': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'not_scopes': {'key': 'properties.notScopes', 'type': '[str]'},
'parameters': {'key': 'properties.parameters', 'type': '{ParameterValuesValue}'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'enforcement_mode': {'key': 'properties.enforcementMode', 'type': 'str'},
'non_compliance_messages': {'key': 'properties.nonComplianceMessages', 'type': '[NonComplianceMessage]'},
}
def __init__(
self,
**kwargs
):
super(PolicyAssignment, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.location = kwargs.get('location', None)
self.identity = kwargs.get('identity', None)
self.display_name = kwargs.get('display_name', None)
self.policy_definition_id = kwargs.get('policy_definition_id', None)
self.scope = None
self.not_scopes = kwargs.get('not_scopes', None)
self.parameters = kwargs.get('parameters', None)
self.description = kwargs.get('description', None)
self.metadata = kwargs.get('metadata', None)
self.enforcement_mode = kwargs.get('enforcement_mode', "Default")
self.non_compliance_messages = kwargs.get('non_compliance_messages', None)
class PolicyAssignmentListResult(msrest.serialization.Model):
"""List of policy assignments.
:param value: An array of policy assignments.
:type value: list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyAssignment]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PolicyAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PolicyAssignmentListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PolicyDefinition(msrest.serialization.Model):
"""The policy definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the policy definition.
:vartype id: str
:ivar name: The name of the policy definition.
:vartype name: str
:ivar type: The type of the resource (Microsoft.Authorization/policyDefinitions).
:vartype type: str
:param policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn,
Custom, and Static. Possible values include: "NotSpecified", "BuiltIn", "Custom", "Static".
:type policy_type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.PolicyType
:param mode: The policy definition mode. Some examples are All, Indexed,
Microsoft.KeyVault.Data.
:type mode: str
:param display_name: The display name of the policy definition.
:type display_name: str
:param description: The policy definition description.
:type description: str
:param policy_rule: The policy rule.
:type policy_rule: any
:param metadata: The policy definition metadata. Metadata is an open ended object and is
typically a collection of key value pairs.
:type metadata: any
:param parameters: The parameter definitions for parameters used in the policy rule. The keys
are the parameter names.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterDefinitionsValue]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'policy_type': {'key': 'properties.policyType', 'type': 'str'},
'mode': {'key': 'properties.mode', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'policy_rule': {'key': 'properties.policyRule', 'type': 'object'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': '{ParameterDefinitionsValue}'},
}
def __init__(
self,
**kwargs
):
super(PolicyDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.policy_type = kwargs.get('policy_type', None)
self.mode = kwargs.get('mode', "Indexed")
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.policy_rule = kwargs.get('policy_rule', None)
self.metadata = kwargs.get('metadata', None)
self.parameters = kwargs.get('parameters', None)
class PolicyDefinitionGroup(msrest.serialization.Model):
"""The policy definition group.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the group.
:type name: str
:param display_name: The group's display name.
:type display_name: str
:param category: The group's category.
:type category: str
:param description: The group's description.
:type description: str
:param additional_metadata_id: A resource ID of a resource that contains additional metadata
about the group.
:type additional_metadata_id: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'additional_metadata_id': {'key': 'additionalMetadataId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PolicyDefinitionGroup, self).__init__(**kwargs)
self.name = kwargs['name']
self.display_name = kwargs.get('display_name', None)
self.category = kwargs.get('category', None)
self.description = kwargs.get('description', None)
self.additional_metadata_id = kwargs.get('additional_metadata_id', None)
class PolicyDefinitionListResult(msrest.serialization.Model):
"""List of policy definitions.
:param value: An array of policy definitions.
:type value: list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyDefinition]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PolicyDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PolicyDefinitionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PolicyDefinitionReference(msrest.serialization.Model):
"""The policy definition reference.
All required parameters must be populated in order to send to Azure.
:param policy_definition_id: Required. The ID of the policy definition or policy set
definition.
:type policy_definition_id: str
:param parameters: The parameter values for the referenced policy rule. The keys are the
parameter names.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterValuesValue]
:param policy_definition_reference_id: A unique id (within the policy set definition) for this
policy definition reference.
:type policy_definition_reference_id: str
:param group_names: The name of the groups that this policy definition reference belongs to.
:type group_names: list[str]
"""
_validation = {
'policy_definition_id': {'required': True},
}
_attribute_map = {
'policy_definition_id': {'key': 'policyDefinitionId', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterValuesValue}'},
'policy_definition_reference_id': {'key': 'policyDefinitionReferenceId', 'type': 'str'},
'group_names': {'key': 'groupNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PolicyDefinitionReference, self).__init__(**kwargs)
self.policy_definition_id = kwargs['policy_definition_id']
self.parameters = kwargs.get('parameters', None)
self.policy_definition_reference_id = kwargs.get('policy_definition_reference_id', None)
self.group_names = kwargs.get('group_names', None)
class PolicyExemption(msrest.serialization.Model):
"""The policy exemption.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.resource.policy.v2020_09_01.models.SystemData
:ivar id: The ID of the policy exemption.
:vartype id: str
:ivar name: The name of the policy exemption.
:vartype name: str
:ivar type: The type of the resource (Microsoft.Authorization/policyExemptions).
:vartype type: str
:param policy_assignment_id: Required. The ID of the policy assignment that is being exempted.
:type policy_assignment_id: str
:param policy_definition_reference_ids: The policy definition reference ID list when the
associated policy assignment is an assignment of a policy set definition.
:type policy_definition_reference_ids: list[str]
:param exemption_category: Required. The policy exemption category. Possible values are Waiver
and Mitigated. Possible values include: "Waiver", "Mitigated".
:type exemption_category: str or
~azure.mgmt.resource.policy.v2020_09_01.models.ExemptionCategory
:param expires_on: The expiration date and time (in UTC ISO 8601 format yyyy-MM-ddTHH:mm:ssZ)
of the policy exemption.
:type expires_on: ~datetime.datetime
:param display_name: The display name of the policy exemption.
:type display_name: str
:param description: The description of the policy exemption.
:type description: str
:param metadata: The policy exemption metadata. Metadata is an open ended object and is
typically a collection of key value pairs.
:type metadata: any
"""
_validation = {
'system_data': {'readonly': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'policy_assignment_id': {'required': True},
'exemption_category': {'required': True},
}
_attribute_map = {
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'policy_assignment_id': {'key': 'properties.policyAssignmentId', 'type': 'str'},
'policy_definition_reference_ids': {'key': 'properties.policyDefinitionReferenceIds', 'type': '[str]'},
'exemption_category': {'key': 'properties.exemptionCategory', 'type': 'str'},
'expires_on': {'key': 'properties.expiresOn', 'type': 'iso-8601'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(PolicyExemption, self).__init__(**kwargs)
self.system_data = None
self.id = None
self.name = None
self.type = None
self.policy_assignment_id = kwargs['policy_assignment_id']
self.policy_definition_reference_ids = kwargs.get('policy_definition_reference_ids', None)
self.exemption_category = kwargs['exemption_category']
self.expires_on = kwargs.get('expires_on', None)
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.metadata = kwargs.get('metadata', None)
class PolicyExemptionListResult(msrest.serialization.Model):
"""List of policy exemptions.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of policy exemptions.
:type value: list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyExemption]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PolicyExemption]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PolicyExemptionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class PolicySetDefinition(msrest.serialization.Model):
"""The policy set definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the policy set definition.
:vartype id: str
:ivar name: The name of the policy set definition.
:vartype name: str
:ivar type: The type of the resource (Microsoft.Authorization/policySetDefinitions).
:vartype type: str
:param policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn,
Custom, and Static. Possible values include: "NotSpecified", "BuiltIn", "Custom", "Static".
:type policy_type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.PolicyType
:param display_name: The display name of the policy set definition.
:type display_name: str
:param description: The policy set definition description.
:type description: str
:param metadata: The policy set definition metadata. Metadata is an open ended object and is
typically a collection of key value pairs.
:type metadata: any
:param parameters: The policy set definition parameters that can be used in policy definition
references.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterDefinitionsValue]
:param policy_definitions: An array of policy definition references.
:type policy_definitions:
list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyDefinitionReference]
:param policy_definition_groups: The metadata describing groups of policy definition references
within the policy set definition.
:type policy_definition_groups:
list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyDefinitionGroup]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'policy_type': {'key': 'properties.policyType', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': '{ParameterDefinitionsValue}'},
'policy_definitions': {'key': 'properties.policyDefinitions', 'type': '[PolicyDefinitionReference]'},
'policy_definition_groups': {'key': 'properties.policyDefinitionGroups', 'type': '[PolicyDefinitionGroup]'},
}
def __init__(
self,
**kwargs
):
super(PolicySetDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.policy_type = kwargs.get('policy_type', None)
self.display_name = kwargs.get('display_name', None)
self.description = kwargs.get('description', None)
self.metadata = kwargs.get('metadata', None)
self.parameters = kwargs.get('parameters', None)
self.policy_definitions = kwargs.get('policy_definitions', None)
self.policy_definition_groups = kwargs.get('policy_definition_groups', None)
class PolicySetDefinitionListResult(msrest.serialization.Model):
"""List of policy set definitions.
:param value: An array of policy set definitions.
:type value: list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicySetDefinition]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PolicySetDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PolicySetDefinitionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ResourceTypeAliases(msrest.serialization.Model):
"""The resource type aliases definition.
:param resource_type: The resource type name.
:type resource_type: str
:param aliases: The aliases for property names.
:type aliases: list[~azure.mgmt.resource.policy.v2020_09_01.models.Alias]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'aliases': {'key': 'aliases', 'type': '[Alias]'},
}
def __init__(
self,
**kwargs
):
super(ResourceTypeAliases, self).__init__(**kwargs)
self.resource_type = kwargs.get('resource_type', None)
self.aliases = kwargs.get('aliases', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure.mgmt.resource.policy.v2020_09_01.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
|
readthedocs/embed/v3/urls.py | mehrdad-khojastefar/readthedocs.org | 2,092 | 11070743 | from django.conf.urls import url
from .views import EmbedAPI
urlpatterns = [
url(r'', EmbedAPI.as_view(), name='embed_api_v3'),
]
|
test/streamparse/cli/test_list.py | Pandinosaurus/streamparse | 1,050 | 11070751 | import argparse
import unittest
from streamparse.cli.list import subparser_hook
def test_subparser_hook():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparser_hook(subparsers)
subcommands = parser._optionals._actions[1].choices.keys()
assert "list" in subcommands
|
capirca/lib/nsxv.py | google-admin/capirca | 604 | 11070758 | # Copyright 2015 The Capirca Project Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Nsxv generator."""
import datetime
import re
import xml
from absl import logging
from capirca.lib import aclgenerator
from capirca.lib import nacaddr
import six
_ACTION_TABLE = {
'accept': 'allow',
'deny': 'deny',
'reject': 'reject',
'reject-with-tcp-rst': 'reject', # tcp rst not supported
}
_XML_TABLE = {
'actionStart': '<action>',
'actionEnd': '</action>',
'srcIpv4Start': '<source><type>Ipv4Address</type><value>',
'srcIpv4End': '</value></source>',
'destIpv4Start': '<destination><type>Ipv4Address</type><value>',
'destIpv4End': '</value></destination>',
'protocolStart': '<protocol>',
'protocolEnd': '</protocol>',
'serviceStart': '<service>',
'serviceEnd': '</service>',
'appliedToStart': '<appliedTo><type>SecurityGroup</type><value>',
'appliedToEnd': '</value></appliedTo>',
'srcPortStart': '<sourcePort>',
'srcPortEnd': '</sourcePort>',
'destPortStart': '<destinationPort>',
'destPortEnd': '</destinationPort>',
'icmpTypeStart': '<subProtocol>',
'icmpTypeEnd': '</subProtocol>',
'logTrue': '<loggingEnabled>true</loggingEnabled>',
'logFalse': '<loggingEnabled>false</loggingEnabled>',
'sectionStart': '<section>',
'sectionEnd': '</section>',
'nameStart': '<name>',
'nameEnd': '</name>',
'srcIpv6Start': '<source><type>Ipv6Address</type><value>',
'srcIpv6End': '</value></source>',
'destIpv6Start': '<destination><type>Ipv6Address</type><value>',
'destIpv6End': '</value></destination>',
'noteStart': '<notes>',
'noteEnd': '</notes>',
}
_NSXV_SUPPORTED_KEYWORDS = [
'name',
'action',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'expiration',
'icmp_type',
'protocol',
'source_address',
'source_address_exclude',
'source_port',
'expiration',
'logging'
]
# generic error class
class Error(Exception):
"""Generic error class."""
pass
class UnsupportedNsxvAccessListError(Error):
"""Raised when we're give a non named access list."""
pass
class NsxvAclTermError(Error):
"""Raised when there is a problem in a nsxv access list."""
pass
class NsxvDuplicateTermError(Error):
"""Raised when there is a duplicate."""
pass
class Term(aclgenerator.Term):
"""Creates a single ACL Term for Nsxv."""
def __init__(self, term, filter_type, applied_to=None, af=4):
self.term = term
# Our caller should have already verified the address family.
assert af in (4, 6)
self.af = af
self.filter_type = filter_type
self.applied_to = applied_to
def __str__(self):
"""Convert term to a rule string.
Returns:
A rule as a string.
Raises:
NsxvAclTermError: When unknown icmp-types are specified
"""
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if 'nsxv' not in self.term.platform:
return ''
if self.term.platform_exclude:
if 'nsxv' in self.term.platform_exclude:
return ''
ret_str = ['']
# Don't render icmpv6 protocol terms under inet, or icmp under inet6
if ((self.af == 6 and 'icmp' in self.term.protocol) or
(self.af == 4 and 'icmpv6' in self.term.protocol)):
logging.debug(self.NO_AF_LOG_PROTO.substitute(term=self.term.name,
proto=self.term.protocol,
af=self.filter_type))
return ''
# Term verbatim is not supported
if self.term.verbatim:
raise NsxvAclTermError(
'Verbatim are not implemented in standard ACLs')
# Term option is not supported
if self.term.option:
for opt in [str(single_option) for single_option in self.term.option]:
if((opt.find('tcp-established') == 0)
or (opt.find('established') == 0)):
return ''
else:
raise NsxvAclTermError(
'Option are not implemented in standard ACLs')
# check for keywords Nsxv does not support
term_keywords = self.term.__dict__
unsupported_keywords = []
for key in term_keywords:
if term_keywords[key]:
# translated is obj attribute not keyword
if ('translated' not in key) and (key not in _NSXV_SUPPORTED_KEYWORDS):
unsupported_keywords.append(key)
if unsupported_keywords:
logging.warning('WARNING: The keywords %s in Term %s are not supported '
'in Nsxv ', unsupported_keywords, self.term.name)
name = '%s%s%s' % (_XML_TABLE.get('nameStart'), self.term.name,
_XML_TABLE.get('nameEnd'))
notes = ''
if self.term.comment:
for comment in self.term.comment:
notes = '%s%s' %(notes, comment)
notes = '%s%s%s' % (_XML_TABLE.get('noteStart'), notes,
_XML_TABLE.get('noteEnd'))
# protocol
protocol = None
if self.term.protocol:
protocol = list(map(self.PROTO_MAP.get, self.term.protocol,
self.term.protocol))
# icmp-types
icmp_types = ['']
if self.term.icmp_type:
icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type,
self.term.protocol,
self.af)
# for mixed filter type get both IPV4address and IPv6Address
af_list = []
if self.filter_type == 'mixed':
af_list = [4, 6]
else:
af_list = [self.af]
source_address = None
destination_address = None
source_addr = []
destination_addr = []
source_v4_addr = []
source_v6_addr = []
dest_v4_addr = []
dest_v6_addr = []
for af in af_list:
# source address
if self.term.source_address:
source_address = self.term.GetAddressOfVersion('source_address', af)
source_address_exclude = self.term.GetAddressOfVersion(
'source_address_exclude', af)
if source_address_exclude:
source_address = nacaddr.ExcludeAddrs(
source_address,
source_address_exclude)
if source_address:
if af == 4:
source_v4_addr = source_address
else:
source_v6_addr = source_address
source_addr = source_v4_addr + source_v6_addr
# destination address
if self.term.destination_address:
destination_address = self.term.GetAddressOfVersion(
'destination_address', af)
destination_address_exclude = self.term.GetAddressOfVersion(
'destination_address_exclude', af)
if destination_address_exclude:
destination_address = nacaddr.ExcludeAddrs(
destination_address,
destination_address_exclude)
if destination_address:
if af == 4:
dest_v4_addr = destination_address
else:
dest_v6_addr = destination_address
destination_addr = dest_v4_addr + dest_v6_addr
# Check for mismatch IP for source and destination address for mixed filter
if self.filter_type == 'mixed':
if source_addr and destination_addr:
if source_v4_addr and not dest_v4_addr:
source_addr = source_v6_addr
elif source_v6_addr and not dest_v6_addr:
source_addr = source_v4_addr
elif dest_v4_addr and not source_v4_addr:
destination_addr = dest_v6_addr
elif dest_v6_addr and not source_v6_addr:
destination_addr = dest_v4_addr
if not source_addr or not destination_addr:
logging.warning('Term %s will not be rendered as it has IPv4/IPv6 '
'mismatch for source/destination for mixed address '
'family.', self.term.name)
return ''
# ports
source_port = None
destination_port = None
if self.term.source_port:
source_port = self.term.source_port
if self.term.destination_port:
destination_port = self.term.destination_port
# logging
log = 'false'
if self.term.logging:
log = 'true'
sources = ''
if source_addr:
sources = '<sources excluded="false">'
for saddr in source_addr:
# inet4
if isinstance(saddr, nacaddr.IPv4):
if saddr.num_addresses > 1:
saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv4Start'),
saddr.with_prefixlen,
_XML_TABLE.get('srcIpv4End'),)
else:
saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv4Start'),
saddr.network_address,
_XML_TABLE.get('srcIpv4End'))
sources = '%s%s' %(sources, saddr)
# inet6
if isinstance(saddr, nacaddr.IPv6):
if saddr.num_addresses > 1:
saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv6Start'),
saddr.with_prefixlen,
_XML_TABLE.get('srcIpv6End'),)
else:
saddr = '%s%s%s' % (
_XML_TABLE.get('srcIpv6Start'),
saddr.network_address, _XML_TABLE.get('srcIpv6End'))
sources = '%s%s' %(sources, saddr)
sources = '%s%s' %(sources, '</sources>')
destinations = ''
if destination_addr:
destinations = '<destinations excluded="false">'
for daddr in destination_addr:
# inet4
if isinstance(daddr, nacaddr.IPv4):
if daddr.num_addresses > 1:
daddr = '%s%s%s' % (_XML_TABLE.get('destIpv4Start'),
daddr.with_prefixlen,
_XML_TABLE.get('destIpv4End'),)
else:
daddr = '%s%s%s' % (_XML_TABLE.get('destIpv4Start'),
daddr.network_address,
_XML_TABLE.get('destIpv4End'))
destinations = '%s%s' %(destinations, daddr)
# inet6
if isinstance(daddr, nacaddr.IPv6):
if daddr.num_addresses > 1:
daddr = '%s%s%s' % (_XML_TABLE.get('destIpv6Start'),
daddr.with_prefixlen,
_XML_TABLE.get('destIpv6End'),)
else:
daddr = '%s%s%s' % (_XML_TABLE.get('destIpv6Start'),
daddr.network_address,
_XML_TABLE.get('destIpv6End'))
destinations = '%s%s' %(destinations, daddr)
destinations = '%s%s' %(destinations, '</destinations>')
services = []
if protocol:
services.append('<services>')
for proto in protocol:
if proto != 'any':
services.append(self._ServiceToString(proto,
source_port,
destination_port,
icmp_types))
services.append('</services>')
service = ''
for s in services:
service = '%s%s' % (service, s)
# applied_to
applied_to_list = ''
if self.applied_to:
applied_to_list = '<appliedToList>'
applied_to_element = '%s%s%s' % (_XML_TABLE.get('appliedToStart'),
self.applied_to,
_XML_TABLE.get('appliedToEnd'))
applied_to_list = '%s%s' %(applied_to_list, applied_to_element)
applied_to_list = '%s%s' %(applied_to_list, '</appliedToList>')
# action
action = '%s%s%s' % (_XML_TABLE.get('actionStart'),
_ACTION_TABLE.get(str(self.term.action[0])),
_XML_TABLE.get('actionEnd'))
ret_lines = []
ret_lines.append('<rule logged="%s">%s%s%s%s%s%s%s</rule>' %
(log, name, action, sources, destinations, service,
applied_to_list, notes))
# remove any trailing spaces and replace multiple spaces with singles
stripped_ret_lines = [re.sub(r'\s+', ' ', x).rstrip() for x in ret_lines]
ret_str.extend(stripped_ret_lines)
return ''.join(ret_str)
def _ServiceToString(self, proto, sports, dports, icmp_types):
"""Converts service to string.
Args:
proto: str, protocl
sports: str list or none, the source port
dports: str list or none, the destination port
icmp_types: icmp-type numeric specification (if any)
Returns:
Service definition.
"""
service = ''
# for icmp and icmpv6
if proto == 1 or proto == 58:
# handle icmp protocol
for icmp_type in icmp_types:
icmp_service = '%s%s%s%s' % (_XML_TABLE.get('serviceStart'),
_XML_TABLE.get('protocolStart'), proto,
_XML_TABLE.get('protocolEnd'))
# handle icmp types
if icmp_type:
icmp_type = '%s%s%s' %(_XML_TABLE.get('icmpTypeStart'),
str(icmp_type),
_XML_TABLE.get('icmpTypeEnd'))
icmp_service = '%s%s' % (icmp_service, icmp_type)
icmp_service = '%s%s' % (icmp_service, _XML_TABLE.get('serviceEnd'))
service = '%s%s' % (service, icmp_service)
else:
# handle other protocols
service = '%s%s%s%s' % (_XML_TABLE.get('serviceStart'),
_XML_TABLE.get('protocolStart'), proto,
_XML_TABLE.get('protocolEnd'))
# handle source ports
if sports:
str_sport = []
for sport in sports:
if sport[0] != sport[1]:
str_sport.append('%s-%s' % (sport[0], sport[1]))
else:
str_sport.append('%s' % (sport[0]))
service = '%s%s%s%s' % (service, _XML_TABLE.get('srcPortStart'),
', '.join(str_sport),
_XML_TABLE.get('srcPortEnd'))
# handle destination ports
if dports:
str_dport = []
for dport in dports:
if dport[0] != dport[1]:
str_dport.append('%s-%s' % (dport[0], dport[1]))
else:
str_dport.append('%s' % (dport[0]))
service = '%s%s%s%s' % (service, _XML_TABLE.get('destPortStart'),
', '.join(str_dport),
_XML_TABLE.get('destPortEnd'))
service = '%s%s' % (service, _XML_TABLE.get('serviceEnd'))
return service
class Nsxv(aclgenerator.ACLGenerator):
"""Nsxv rendering class.
This class takes a policy object and renders the output into a syntax
which is understood by nsxv policy.
Attributes:
pol: policy.Policy object
Raises:
UnsupportedNsxvAccessListError: Raised when we're give a non named access
list.
"""
_PLATFORM = 'nsxv'
_DEFAULT_PROTOCOL = 'ip'
SUFFIX = '.nsx'
_OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',
'logging',
])
_FILTER_OPTIONS_DICT = {}
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super()._BuildTokens()
supported_tokens |= {'logging'}
supported_sub_tokens.update({'action': {'accept', 'deny', 'reject',
'reject-with-tcp-rst'}})
del supported_sub_tokens['option']
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
self.nsxv_policies = []
current_date = datetime.datetime.utcnow().date()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
if len(filter_options) >= 2:
filter_name = filter_options[1]
# get filter type, section id and applied To
self._ParseFilterOptions(filter_options)
filter_type = self._FILTER_OPTIONS_DICT['filter_type']
applied_to = self._FILTER_OPTIONS_DICT['applied_to']
term_names = set()
new_terms = []
for term in terms:
# Check for duplicate terms
if term.name in term_names:
raise NsxvDuplicateTermError('There are multiple terms named: %s' %
term.name)
term_names.add(term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info('INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warning('WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
# Get the mapped action value
# If there is no mapped action value term is not rendered
mapped_action = _ACTION_TABLE.get(str(term.action[0]))
if not mapped_action:
logging.warning('WARNING: Action %s in Term %s is not valid and '
'will not be rendered.', term.action, term.name)
continue
term.name = self.FixTermLength(term.name)
if filter_type == 'inet':
af = 'inet'
term = self.FixHighPorts(term, af=af)
if not term:
continue
new_terms.append(Term(term, filter_type, applied_to, 4))
if filter_type == 'inet6':
af = 'inet6'
term = self.FixHighPorts(term, af=af)
if not term:
continue
new_terms.append(Term(term, filter_type, applied_to, 6))
if filter_type == 'mixed':
if 'icmpv6' not in term.protocol:
inet_term = self.FixHighPorts(term, 'inet')
if not inet_term:
continue
new_terms.append(Term(inet_term, filter_type, applied_to, 4))
else:
inet6_term = self.FixHighPorts(term, 'inet6')
if not inet6_term:
continue
new_terms.append(Term(inet6_term, filter_type, applied_to, 6))
self.nsxv_policies.append((header, filter_name, [filter_type],
new_terms))
def _ParseFilterOptions(self, filter_options):
"""Parses the target in header for filter type, section_id and applied_to.
Args:
filter_options: list of remaining target options
Returns:
A dictionary that contains fields necessary to create the firewall
rule.
Raises:
UnsupportedNsxvAccessListError: Raised when we're give a non named access
list.
"""
# check for filter type
if not 2 <= len(filter_options) <= 5:
raise UnsupportedNsxvAccessListError(
'Invalid Number of options specified: %d. Required options '
'are: filter type and section name. Platform: %s' % (
len(filter_options), self._PLATFORM))
# mandatory section_name
section_name = filter_options[0]
# mandatory
filter_type = filter_options[1]
# a mixed filter outputs both ipv4 and ipv6 acls in the same output file
good_filters = ['inet', 'inet6', 'mixed']
# check if filter type is renderable
if filter_type not in good_filters:
raise UnsupportedNsxvAccessListError(
'Access list type %s not supported by %s (good types: %s)' % (
filter_type, self._PLATFORM, str(good_filters)))
section_id = 0
applied_to = None
filter_opt_len = len(filter_options)
if filter_opt_len > 2:
for index in range(2, filter_opt_len):
if index == 2 and filter_options[2] != 'securitygroup':
section_id = filter_options[2]
continue
if filter_options[index] == 'securitygroup':
if index + 1 <= filter_opt_len - 1:
applied_to = filter_options[index + 1]
break
else:
raise UnsupportedNsxvAccessListError(
'Security Group Id is not provided for %s' % (self._PLATFORM))
self._FILTER_OPTIONS_DICT['section_name'] = section_name
self._FILTER_OPTIONS_DICT['filter_type'] = filter_type
self._FILTER_OPTIONS_DICT['section_id'] = section_id
self._FILTER_OPTIONS_DICT['applied_to'] = applied_to
def __str__(self):
"""Render the output of the Nsxv policy."""
target_header = []
target = []
# add the p4 tags
target.append('<!--')
target.extend(aclgenerator.AddRepositoryTags('\n'))
target.append('\n')
target.append('-->')
for (_, _, _, terms) in self.nsxv_policies:
section_name = six.ensure_str(self._FILTER_OPTIONS_DICT['section_name'])
# check section id value
section_id = self._FILTER_OPTIONS_DICT['section_id']
if not section_id or section_id == 0:
logging.warning('WARNING: Section-id is 0. A new Section is created '
'for %s. If there is any existing section, it '
'will remain unreferenced and should be removed '
'manually.', section_name)
target.append('<section name="%s">' % (section_name.strip(' \t\n\r')))
else:
target.append('<section id="%s" name="%s">' %
(section_id, section_name.strip(' \t\n\r')))
# now add the terms
for term in terms:
term_str = str(term)
if term_str:
target.append(term_str)
# ensure that the header is always first
target = target_header + target
target.append('%s' % (_XML_TABLE.get('sectionEnd')))
target.append('\n')
target_as_xml = xml.dom.minidom.parseString(''.join(target))
# TODO(robankeny) utf encoding with refactoring after migration to py3
return target_as_xml.toprettyxml(indent=' ')
|
src/robotide/application/pluginloader.py | guojiajiaok/RIDE | 775 | 11070798 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import importlib.util
import inspect
import os
from ..context import LOG
from ..pluginapi import Plugin
from .pluginconnector import PluginFactory
class PluginLoader(object):
def __init__(self, application, load_dirs, standard_classes):
self._load_errors = []
self.plugins = [PluginFactory(application, cls) for cls in standard_classes + self._find_classes(load_dirs)]
if self._load_errors:
LOG.error('\n\n'.join(self._load_errors))
def enable_plugins(self):
for p in self.plugins:
p.enable_on_startup()
def _find_classes(self, load_dirs):
classes = []
for path in self._find_python_files(load_dirs):
for cls in self._import_classes(path):
if self._is_plugin_class(path, cls):
classes.append(cls)
return classes
def _is_plugin_class(self, path, cls):
try:
return issubclass(cls, Plugin) and cls is not Plugin
except Exception as err:
msg = "Finding classes from module '%s' failed: %s"
self._load_errors.append(msg % (path, err))
def _find_python_files(self, load_dirs):
files = []
for path in load_dirs:
if not os.path.exists(path):
continue
for filename in os.listdir(path):
full_path = os.path.join(path, filename)
if filename[0].isalpha() and \
os.path.splitext(filename)[1].lower() == ".py":
files.append(full_path)
elif os.path.isdir(full_path):
files.extend(self._find_python_files([full_path]))
return files
def _import_classes(self, path):
dirpath, filename = os.path.split(path)
modulename = os.path.splitext(filename)[0]
spec = importlib.util.spec_from_file_location(modulename, path)
if spec is None:
return []
try:
m_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m_module)
except Exception as err:
self._load_errors.append("Importing plugin module '%s' failed:\n%s"
% (path, err))
return []
return [cls for _, cls in
inspect.getmembers(m_module, predicate=inspect.isclass)]
|
test/programytest/clients/test_config.py | RonKhondji/program-y | 345 | 11070868 | import unittest
from programy.clients.config import ClientConfigurationData
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
from programytest.config.bot.test_bot import BotConfigurationTests
from programytest.utils.email.test_config import EmailConfigurationTests
from programytest.triggers.test_config import TriggersConfigurationTests
from programytest.clients.ping.test_config import PingResponderConfigurationTests
from programytest.storage.test_config import StorageConfigurationTests
from programytest.scheduling.test_config import SchedulerConfigurationTests
class ClientConfigurationDataTests(unittest.TestCase):
def test_with_data_single_bot(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
console:
prompt: ">>>"
renderer: programy.clients.render.text.TextRenderer
scheduler:
name: Scheduler1
debug_level: 0
add_listeners: True
remove_all_jobs: True
bot_selector: programy.clients.botfactory.DefaultBotSelector
bots:
bot1:
prompt: ">>>"
initial_question: Hi, how can I help you today?
initial_question_srai: YINITIALQUESTION
default_response: Sorry, I don't have an answer for that!
default_response_srai: YDEFAULTRESPONSE
empty_string: YEMPTY
exit_response: So long, and thanks for the fish!
exit_response_srai: YEXITRESPONSE
override_properties: true
max_question_recursion: 1000
max_question_timeout: 60
max_search_depth: 100
max_search_timeout: 60
spelling:
load: true
classname: programy.spelling.norvig.NorvigSpellingChecker
alphabet: 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
check_before: true
check_and_retry: true
splitter:
classname: programy.dialog.splitter.regex.RegexSentenceSplitter
joiner:
classname: programy.dialog.joiner.SentenceJoiner
conversations:
save: true
load: false
max_histories: 100
restore_last_topic: false
initial_topic: TOPIC1
empty_on_start: false
from_translator:
classname: programy.nlp.translate.textblob_translator.TextBlobTranslator
from: fr
to: en
to_translator:
classname: programy.nlp.translate.textblob_translator.TextBlobTranslator
from: en
to: fr
sentiment:
classname: programy.nlp.sentiment.textblob_sentiment.TextBlobSentimentAnalyser
scores: programy.nlp.sentiment.scores.SentimentScores
brain_selector: programy.bot.DefaultBrainSelector
brains:
brain1:
# Overrides
overrides:
allow_system_aiml: true
allow_learn_aiml: true
allow_learnf_aiml: true
# Defaults
defaults:
default_get: unknown
default_property: unknown
default_map: unknown
learnf-path: file
# Binary
binaries:
save_binary: true
load_binary: true
load_aiml_on_binary_fail: true
# Braintree
braintree:
create: true
security:
authentication:
classname: programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService
denied_srai: AUTHENTICATION_FAILED
authorisation:
classname: programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService
denied_srai: AUTHORISATION_FAILED
usergroups:
storage: file
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
numeric: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
""", ConsoleConfiguration(), ".")
client_config = ClientConfigurationData("console")
client_config.load_configuration(yaml, ".")
self.assertEqual(1, len(client_config.configurations))
self.assertEqual("programy.clients.botfactory.DefaultBotSelector", client_config.bot_selector)
self.assertIsNotNone(client_config.scheduler)
self.assertEqual("Scheduler1", client_config.scheduler.name)
self.assertEqual(0, client_config.scheduler.debug_level)
self.assertTrue(client_config.scheduler.add_listeners)
self.assertTrue(client_config.scheduler.remove_all_jobs)
self.assertEqual("programy.clients.render.text.TextRenderer", client_config.renderer)
def test_with_data_multiple_bots(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
console:
prompt: ">>>"
renderer: programy.clients.render.text.TextRenderer
scheduler:
name: Scheduler1
debug_level: 0
add_listeners: True
remove_all_jobs: True
bot_selector: programy.clients.botfactory.DefaultBotSelector
bots:
bot1:
prompt: ">>>"
initial_question: Hi, how can I help you today?
initial_question_srai: YINITIALQUESTION
default_response: Sorry, I don't have an answer for that!
default_response_srai: YDEFAULTRESPONSE
empty_string: YEMPTY
exit_response: So long, and thanks for the fish!
exit_response_srai: YEXITRESPONSE
override_properties: true
max_question_recursion: 1000
max_question_timeout: 60
max_search_depth: 100
max_search_timeout: 60
spelling:
load: true
classname: programy.spelling.norvig.NorvigSpellingChecker
alphabet: 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
check_before: true
check_and_retry: true
splitter:
classname: programy.dialog.splitter.regex.RegexSentenceSplitter
joiner:
classname: programy.dialog.joiner.SentenceJoiner
conversations:
save: true
load: false
max_histories: 100
restore_last_topic: false
initial_topic: TOPIC1
empty_on_start: false
from_translator:
classname: programy.nlp.translate.textblob_translator.TextBlobTranslator
from: fr
to: en
to_translator:
classname: programy.nlp.translate.textblob_translator.TextBlobTranslator
from: en
to: fr
sentiment:
classname: programy.nlp.sentiment.textblob_sentiment.TextBlobSentimentAnalyser
scores: programy.nlp.sentiment.scores.SentimentScores
brain_selector: programy.bot.DefaultBrainSelector
brains:
brain1:
# Overrides
overrides:
allow_system_aiml: true
allow_learn_aiml: true
allow_learnf_aiml: true
# Defaults
defaults:
default_get: unknown
default_property: unknown
default_map: unknown
learnf-path: file
# Binary
binaries:
save_binary: true
load_binary: true
load_aiml_on_binary_fail: true
# Braintree
braintree:
create: true
security:
authentication:
classname: programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService
denied_srai: AUTHENTICATION_FAILED
authorisation:
classname: programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService
denied_srai: AUTHORISATION_FAILED
usergroups:
storage: file
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
numeric: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
bot2:
prompt: ">>>"
initial_question: Hi, how can I help you today?
initial_question_srai: YINITIALQUESTION
default_response: Sorry, I don't have an answer for that!
default_response_srai: YDEFAULTRESPONSE
empty_string: YEMPTY
exit_response: So long, and thanks for the fish!
exit_response_srai: YEXITRESPONSE
override_properties: true
max_question_recursion: 1000
max_question_timeout: 60
max_search_depth: 100
max_search_timeout: 60
spelling:
load: true
classname: programy.spelling.norvig.NorvigSpellingChecker
alphabet: 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
check_before: true
check_and_retry: true
splitter:
classname: programy.dialog.splitter.regex.RegexSentenceSplitter
joiner:
classname: programy.dialog.joiner.SentenceJoiner
conversations:
save: true
load: false
max_histories: 100
restore_last_topic: false
initial_topic: TOPIC1
empty_on_start: false
from_translator:
classname: programy.nlp.translate.textblob_translator.TextBlobTranslator
from: fr
to: en
to_translator:
classname: programy.nlp.translate.textblob_translator.TextBlobTranslator
from: en
to: fr
sentiment:
classname: programy.nlp.sentiment.textblob_sentiment.TextBlobSentimentAnalyser
scores: programy.nlp.sentiment.scores.SentimentScores
brain_selector: programy.bot.DefaultBrainSelector
brains:
brain1:
# Overrides
overrides:
allow_system_aiml: true
allow_learn_aiml: true
allow_learnf_aiml: true
# Defaults
defaults:
default_get: unknown
default_property: unknown
default_map: unknown
learnf-path: file
# Binary
binaries:
save_binary: true
load_binary: true
load_aiml_on_binary_fail: true
# Braintree
braintree:
create: true
security:
authentication:
classname: programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService
denied_srai: AUTHENTICATION_FAILED
authorisation:
classname: programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService
denied_srai: AUTHORISATION_FAILED
usergroups:
storage: file
dynamic:
variables:
gettime: programy.dynamic.variables.datetime.GetTime
sets:
numeric: programy.dynamic.sets.numeric.IsNumeric
roman: programy.dynamic.sets.roman.IsRomanNumeral
maps:
romantodec: programy.dynamic.maps.roman.MapRomanToDecimal
dectoroman: programy.dynamic.maps.roman.MapDecimalToRoman
""", ConsoleConfiguration(), ".")
client_config = ClientConfigurationData("console")
client_config.load_configuration(yaml, ".")
self.assertEqual(2, len(client_config.configurations))
self.assertEqual("programy.clients.botfactory.DefaultBotSelector", client_config.bot_selector)
self.assertIsNotNone(client_config.scheduler)
self.assertEqual("Scheduler1", client_config.scheduler.name)
self.assertEqual(0, client_config.scheduler.debug_level)
self.assertTrue(client_config.scheduler.add_listeners)
self.assertTrue(client_config.scheduler.remove_all_jobs)
self.assertEqual("programy.clients.render.text.TextRenderer", client_config.renderer)
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
console:
""", ConsoleConfiguration(), ".")
client_config = ClientConfigurationData("console")
client_config.load_configuration(yaml, ".")
self.assertIsNotNone(client_config.bot_selector)
self.assertIsNotNone(client_config.scheduler)
self.assertEqual(None, client_config.scheduler.name)
self.assertEqual(0, client_config.scheduler.debug_level)
self.assertFalse(client_config.scheduler.add_listeners)
self.assertFalse(client_config.scheduler.remove_all_jobs)
self.assertIsNotNone(client_config.renderer)
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
other:
""", ConsoleConfiguration(), ".")
client_config = ClientConfigurationData("console")
client_config.load_configuration(yaml, ".")
self.assertIsNotNone(client_config.bot_selector)
self.assertIsNotNone(client_config.scheduler)
self.assertEqual(None, client_config.scheduler.name)
self.assertEqual(0, client_config.scheduler.debug_level)
self.assertFalse(client_config.scheduler.add_listeners)
self.assertFalse(client_config.scheduler.remove_all_jobs)
self.assertIsNotNone(client_config.renderer)
def test_defaults(self):
client_config = ClientConfigurationData("console")
data = {}
client_config.to_yaml(data, True)
ClientConfigurationDataTests.assert_defaults(self, data)
@staticmethod
def assert_defaults(test, data):
test.assertEqual(data['description'], 'ProgramY AIML2.0 Client')
test.assertEqual(data['renderer'], "programy.clients.render.text.TextRenderer")
test.assertTrue('scheduler' in data)
SchedulerConfigurationTests.assert_defaults(test, data['scheduler'])
test.assertTrue('email' in data)
EmailConfigurationTests.assert_defaults(test, data['email'])
test.assertTrue('triggers' in data)
TriggersConfigurationTests.assert_defaults(test, data['triggers'])
test.assertTrue('responder' in data)
PingResponderConfigurationTests.assert_defaults(test, data['responder'])
test.assertTrue('storage' in data)
StorageConfigurationTests.assert_defaults(test, data['storage'])
test.assertTrue('bots' in data)
test.assertTrue('bot' in data['bots'])
BotConfigurationTests.assert_defaults(test, data['bots']['bot'])
test.assertEqual(data['bot_selector'], "programy.clients.botfactory.DefaultBotSelector")
|
AppServer/google/net/proto/RawMessage.py | loftwah/appscale | 790 | 11070896 | <filename>AppServer/google/net/proto/RawMessage.py
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is the Python counterpart to the RawMessage class defined in rawmessage.h.
To use this, put the following line in your .proto file:
python from google.net.proto.RawMessage import RawMessage
"""
__pychecker__ = 'no-callinit no-argsused'
from google.net.proto import ProtocolBuffer
class RawMessage(ProtocolBuffer.ProtocolMessage):
"""
This is a special subclass of ProtocolMessage that doesn't interpret its data
in any way. Instead, it just stores it in a string.
See rawmessage.h for more details.
"""
def __init__(self, initial=None):
self.__contents = ''
if initial is not None:
self.MergeFromString(initial)
def contents(self):
return self.__contents
def set_contents(self, contents):
self.__contents = contents
def Clear(self):
self.__contents = ''
def IsInitialized(self, debug_strs=None):
return 1
def __str__(self, prefix="", printElemNumber=0):
return prefix + self.DebugFormatString(self.__contents)
def OutputUnchecked(self, e):
e.putRawString(self.__contents)
def OutputPartial(self, e):
return self.OutputUnchecked(e)
def TryMerge(self, d):
self.__contents = d.getRawString()
def MergeFrom(self, pb):
assert pb is not self
if pb.__class__ != self.__class__:
return 0
self.__contents = pb.__contents
return 1
def Equals(self, pb):
return self.__contents == pb.__contents
def __eq__(self, other):
return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
def __ne__(self, other):
return not (self == other)
def ByteSize(self):
return len(self.__contents)
def ByteSizePartial(self):
return self.ByteSize()
|
magenta/common/concurrency_test.py | sandutsar/magenta | 16,143 | 11070917 | # Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for concurrency."""
import threading
import time
from magenta.common import concurrency
import tensorflow.compat.v1 as tf
class ConcurrencyTest(tf.test.TestCase):
def testSleeper_SleepUntil(self):
# Burn in.
for _ in range(10):
concurrency.Sleeper().sleep(.01)
future_time = time.time() + 0.5
concurrency.Sleeper().sleep_until(future_time)
self.assertAlmostEqual(time.time(), future_time, delta=0.005)
def testSleeper_Sleep(self):
# Burn in.
for _ in range(10):
concurrency.Sleeper().sleep(.01)
def sleep_test_thread(duration):
start_time = time.time()
concurrency.Sleeper().sleep(duration)
self.assertAlmostEqual(time.time(), start_time + duration, delta=0.005)
threads = [threading.Thread(target=sleep_test_thread, args=[i * 0.1])
for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
tf.test.main()
|
dash_docs/chapters/reference/index.py | wesleyacheng/dash-docs | 379 | 11070928 | import dash
import dash_html_components as html
import dash_core_components as dcc
import textwrap
import re
import inspect
def convert_docstring_to_markdown(docstring):
if not docstring:
return '\n\(No docstring available)\n'
lines = docstring.split('\n')
# For some reason the second block of lines is indented
docstring = lines[0] + textwrap.dedent('\n'.join(lines[1:]))
# Replace ':param <variable>' with `<variable>`
docstring = re.sub(r'\:param (\w*)\:', r'**`\1`**\n\n', docstring)
docstring = re.sub(r'\:type (\w*)\:', r'\ntype:', docstring)
# Remove leading ': from rst Example
docstring = docstring.replace(':Example:', 'Example:')
docstring = docstring.replace(
':return:', '\n\nreturns:'
)
docstring = docstring.replace('``', '`')
return docstring
app = dash.Dash(__name__)
SKIP = ['dash', 'json', 'dedent']
def doc_signature(obj, method, prefix):
try:
signature = str(inspect.signature(getattr(obj, method)))
except:
signature = ''
try:
name = method.__name__
except:
name = str(method)
if not prefix:
try:
prefix = obj.__name__
except:
prefix = str(obj)
prefix_signature = prefix
return html.Div(
[
html.H2(
html.Code("{}.{}".format(prefix, name)),
className="docs-article",
style={'overflowX': 'auto'}
),
html.Pre(
className="docs-article",
children=html.Code(
"{}.{}{}".format(
prefix_signature,
name,
signature
.replace(",", ",\n ")
.replace("(", "(\n ")
.replace(")", "\n)")
.replace(" self,\n", "")
.replace("\n self\n", ""),
)
),
),
]
)
PUBLIC_API = [
# app,
# dash.Dash,
# dash.resources,
dict(obj=dash, prefix='', skip=[
'fingerprint',
], preamble=dcc.Markdown(
'''
# The `dash` module
```
import dash
```
'''
)),
dict(obj=app, prefix='app', skip=[
'css',
'dependencies',
'dispatch',
'exceptions',
'logger',
'registered_paths',
'renderer',
'resources',
'routes',
'scripts',
'serve_component_suites',
'serve_layout',
'serve_reload_hash',
'validation_layout'
], preamble=dcc.Markdown(
'''
# The `app` Object
```
import dash
app = dash.Dash(__name__)
```
'''
), override=dict(
server=dcc.Markdown(
'''
The Flask server associated with this app.
Often used in conjunction with `gunicorn` when running the app
in production with multiple workers:
`app.py`
```
app = dash.Dash(__name__)
# expose the flask variable in the file
server = app.server
```
`Procfile`
```
gunicorn app:server
```
'''
),
title=dcc.Markdown(
'''
Configures the document.title (the text that appears in a browser tab).
Default is "Dash".
This is now configurable in the `dash.Dash(title='...')` constructor
instead of as a property of `app`. We have kept this property
in the `app` object for backwards compatibility.
'''
)
)),
dict(obj=dash.dependencies, prefix='', skip=[], preamble=dcc.Markdown(
'''
# The `dash.dependencies` module
The classes in `dash.dependencies` are all used in the `app.callback`
signature.
'''
)),
dict(obj=dash.exceptions, prefix='', skip=[], preamble=dcc.Markdown(
'''
# The `dash.exceptions` module
Dash will raise exceptions under certain scenarios.
Dash will always use a special exception class that can be caught to
handle this particular scenario.
These exception classes are in this module.
'''
), global_override='')
]
def public_methods(obj):
methods = []
for method in dir(obj):
if (obj and
not method.startswith('_') and
method not in SKIP and
'dash' in str(obj) and
'class' not in method and
'development' not in method):
methods.append(method)
return methods
def create_docstrings():
docstring = []
for docitem in PUBLIC_API:
docstring.append(docitem['preamble']),
for method in public_methods(docitem['obj']):
if method not in docitem['skip']:
docstring.append(doc_signature(docitem['obj'], method, docitem['prefix']))
if 'override' in docitem and method in docitem['override']:
docstring.append(docitem['override'][method])
elif 'global_override' in docitem:
docstring.append(docitem['global_override'])
else:
docstring.append(dcc.Markdown(convert_docstring_to_markdown(getattr(docitem['obj'], method).__doc__)))
docstring.append(html.Hr())
return docstring
layout = html.Div([
dcc.Markdown(
'''
# API Reference
This page displays the docstrings for the public methods of the
`dash` module including the `app` object.
Curious about the implementation details?
[Browse the Dash source code](https://github.com/plotly/dash).
'''
),
html.Div(create_docstrings()),
])
|
examples/run_bidafplus/run_bidafplus_squadv2.py | ishine/SMRCToolkit | 1,238 | 11070929 | <reponame>ishine/SMRCToolkit
# coding: utf-8
from sogou_mrc.data.vocabulary import Vocabulary
from sogou_mrc.data.batch_generator import BatchGenerator
from sogou_mrc.dataset.squadv2 import SquadV2Reader, SquadV2Evaluator
from sogou_mrc.model.bidafplus_squad2 import BiDAFPlus
import tensorflow as tf
import logging
import json
import time
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
tf.logging.set_verbosity(tf.logging.ERROR)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
train_file = "train-v2.0.json"
dev_file = "dev-v2.0.json"
t0 = time.time()
reader = SquadV2Reader()
train_data = reader.read(train_file)
eval_data = reader.read(dev_file)
evaluator = SquadV2Evaluator(dev_file)
cost = time.time() - t0
logging.info("seg cost=%.3f" % cost)
t0 = time.time()
vocab = Vocabulary(do_lowercase=True)
vocab.build_vocab(train_data + eval_data, min_word_count=3, min_char_count=10)
word_embedding = vocab.make_word_embedding("glove.840B.300d.txt", init_scale=0.05)
cost = time.time() - t0
logging.info("make vocab cost=%.3f" % cost)
train_batch_generator = BatchGenerator(vocab, train_data, batch_size=16, training=True, additional_fields=["abstractive_answer_mask"])
eval_batch_generator = BatchGenerator(vocab, eval_data, batch_size=16, training=False, additional_fields=["abstractive_answer_mask"])
use_elmo=True
save_path="squad2_elmo"
if use_elmo:
model = BiDAFPlus(vocab, pretrained_word_embedding=word_embedding, abstractive_answer=[""], use_elmo=True, elmo_local_path="path_to_elmo")
else:
model = BiDAFPlus(vocab, pretrained_word_embedding=word_embedding, abstractive_answer=[""])
model.compile(tf.train.AdadeltaOptimizer, 1.0)
model.train_and_evaluate(train_batch_generator, eval_batch_generator, evaluator, epochs=40, eposides=2, save_dir=save_path)
|
official/utils/flags/_base.py | 873040/Abhishek | 357 | 11070934 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flags which will be nearly universal across models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from official.utils.flags._conventions import help_wrap
from official.utils.logs import hooks_helper
def define_base(data_dir=True, model_dir=True, clean=False, train_epochs=False,
epochs_between_evals=False, stop_threshold=False,
batch_size=True, num_gpu=False, hooks=False, export_dir=False,
distribution_strategy=False, run_eagerly=False):
"""Register base flags.
Args:
data_dir: Create a flag for specifying the input data directory.
model_dir: Create a flag for specifying the model file directory.
clean: Create a flag for removing the model_dir.
train_epochs: Create a flag to specify the number of training epochs.
epochs_between_evals: Create a flag to specify the frequency of testing.
stop_threshold: Create a flag to specify a threshold accuracy or other
eval metric which should trigger the end of training.
batch_size: Create a flag to specify the batch size.
num_gpu: Create a flag to specify the number of GPUs used.
hooks: Create a flag to specify hooks for logging.
export_dir: Create a flag to specify where a SavedModel should be exported.
distribution_strategy: Create a flag to specify which Distribution Strategy
to use.
run_eagerly: Create a flag to specify to run eagerly op by op.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if data_dir:
flags.DEFINE_string(
name="data_dir", short_name="dd", default="/tmp",
help=help_wrap("The location of the input data."))
key_flags.append("data_dir")
if model_dir:
flags.DEFINE_string(
name="model_dir", short_name="md", default="/tmp",
help=help_wrap("The location of the model checkpoint files."))
key_flags.append("model_dir")
if clean:
flags.DEFINE_boolean(
name="clean", default=False,
help=help_wrap("If set, model_dir will be removed if it exists."))
key_flags.append("clean")
if train_epochs:
flags.DEFINE_integer(
name="train_epochs", short_name="te", default=1,
help=help_wrap("The number of epochs used to train."))
key_flags.append("train_epochs")
if epochs_between_evals:
flags.DEFINE_integer(
name="epochs_between_evals", short_name="ebe", default=1,
help=help_wrap("The number of training epochs to run between "
"evaluations."))
key_flags.append("epochs_between_evals")
if stop_threshold:
flags.DEFINE_float(
name="stop_threshold", short_name="st",
default=None,
help=help_wrap("If passed, training will stop at the earlier of "
"train_epochs and when the evaluation metric is "
"greater than or equal to stop_threshold."))
if batch_size:
flags.DEFINE_integer(
name="batch_size", short_name="bs", default=32,
help=help_wrap("Batch size for training and evaluation. When using "
"multiple gpus, this is the global batch size for "
"all devices. For example, if the batch size is 32 "
"and there are 4 GPUs, each GPU will get 8 examples on "
"each step."))
key_flags.append("batch_size")
if num_gpu:
flags.DEFINE_integer(
name="num_gpus", short_name="ng",
default=1,
help=help_wrap(
"How many GPUs to use at each worker with the "
"DistributionStrategies API. The default is 1."))
if run_eagerly:
flags.DEFINE_boolean(
name="run_eagerly", default=False,
help="Run the model op by op without building a model function.")
if hooks:
# Construct a pretty summary of hooks.
hook_list_str = (
u"\ufeff Hook:\n" + u"\n".join([u"\ufeff {}".format(key) for key
in hooks_helper.HOOKS]))
flags.DEFINE_list(
name="hooks", short_name="hk", default="LoggingTensorHook",
help=help_wrap(
u"A list of (case insensitive) strings to specify the names of "
u"training hooks.\n{}\n\ufeff Example: `--hooks ProfilerHook,"
u"ExamplesPerSecondHook`\n See official.utils.logs.hooks_helper "
u"for details.".format(hook_list_str))
)
key_flags.append("hooks")
if export_dir:
flags.DEFINE_string(
name="export_dir", short_name="ed", default=None,
help=help_wrap("If set, a SavedModel serialization of the model will "
"be exported to this directory at the end of training. "
"See the README for more details and relevant links.")
)
key_flags.append("export_dir")
if distribution_strategy:
flags.DEFINE_string(
name="distribution_strategy", short_name="ds", default="mirrored",
help=help_wrap("The Distribution Strategy to use for training. "
"Accepted values are 'off', 'one_device', "
"'mirrored', 'parameter_server', 'collective', "
"case insensitive. 'off' means not to use "
"Distribution Strategy; 'default' means to choose "
"from `MirroredStrategy` or `OneDeviceStrategy` "
"according to the number of GPUs.")
)
return key_flags
def get_num_gpus(flags_obj):
"""Treat num_gpus=-1 as 'use all'."""
if flags_obj.num_gpus != -1:
return flags_obj.num_gpus
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
local_device_protos = device_lib.list_local_devices()
return sum([1 for d in local_device_protos if d.device_type == "GPU"])
|
chrome/test/pyautolib/history_info.py | nagineni/chromium-crosswalk | 231 | 11070986 | <filename>chrome/test/pyautolib/history_info.py
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""History: python representation for history.
Obtain one of these from PyUITestSuite::GetHistoryInfo() call.
Example:
class MyTest(pyauto.PyUITest):
def testBasic(self):
url = 'http://www.google.com/'
self.NavigateToURL(url)
history = self.GetHistoryInfo()
self.assertEqual(1, len(history))
self.assertEqual(url, history[0]['url'])
See more tests in chrome/test/functional/history.py.
"""
import simplejson as json
from pyauto_errors import JSONInterfaceError
class HistoryInfo(object):
"""Represent info about browsing history.
The info is represented as a list of history items containing url, title,
time, etc.
"""
def __init__(self, history_dict):
"""Initialize a HistoryInfo from a string of json.
Args:
json_string: a dictionary as returned by the IPC command 'GetHistoryInfo'.
A typical dict representing history info looks like:
{'history': [
{'url': 'http://www.google.com/',
'title': 'Google',
...,
...,
}, ] }
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# JSON string prepared in GetHistoryInfo() in automation_provider.cc
self.historydict = history_dict
def History(self):
"""Get history list.
History is ordered latest first, that is in the same order as
chrome://history/ would list.
Example:
[ { u'snippet': u'',
u'starred': False,
u'time': 1271781612,
u'title': u'Google News',
u'url': u'http://news.google.com/'},
{ u'snippet': u'',
u'starred': True,
u'time': 1271781602,
u'title': u'Google',
u'url': u'http://www.google.com/'}]
The snippet attribute will be empty in most cases. If GetHistoryInfo() is
provided a non-empty search_text arg, the snippet attribute will contain the
snippet as it would be visible when searching for that text in the
chrome://history/ UI.
Returns:
[item1, item2, ...]
"""
return self.historydict.get('history', [])
|
setup.py | timmahrt/praatIO | 208 | 11070987 | <gh_stars>100-1000
#!/usr/bin/env python
# encoding: utf-8
"""
Created on Aug 29, 2014
@author: tmahrt
"""
from setuptools import setup
import io
setup(
name="praatio",
python_requires=">3.6.0",
version="5.0.0",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/timmahrt/praatIO",
package_dir={"praatio": "praatio"},
packages=["praatio", "praatio.utilities", "praatio.data_classes"],
package_data={
"praatio": [
"praatScripts/*.praat",
]
},
install_requires=[
"typing_extensions",
],
license="LICENSE",
description=(
"A library for working with praat, textgrids, "
"time aligned audio transcripts, and audio files."
),
long_description=io.open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
)
|
examples/units_and_coordinates/AIA_limb_STEREO.py | Octaves0911/sunpy | 628 | 11070992 | <filename>examples/units_and_coordinates/AIA_limb_STEREO.py
"""
===========================================
Drawing the AIA limb on a STEREO EUVI image
===========================================
In this example we use a STEREO-B and an SDO image to demonstrate how to
overplot the limb as seen by AIA on an EUVI-B image. Then we overplot the AIA
coordinate grid on the STEREO image.
"""
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.coordinates.wcs_utils
import sunpy.map
from sunpy.net import Fido
from sunpy.net import attrs as a
###############################################################################
# The first step is to download some data. We download an image from STEREO-A
# and an image from SDO, which are separated in longitude.
stereo = (a.Source('STEREO_A') &
a.Instrument("EUVI") &
a.Time('2021-01-01 00:06', '2021-01-01 00:07'))
aia = (a.Instrument.aia &
a.Sample(24 * u.hour) &
a.Time('2021-01-01 00:06', '2021-01-02 00:06'))
wave = a.Wavelength(30 * u.nm, 31 * u.nm)
result = Fido.search(wave, aia | stereo)
###############################################################################
# Let's inspect the result and download the files.
print(result)
downloaded_files = Fido.fetch(result)
print(downloaded_files)
##############################################################################
# Let's create a dictionary with the two maps, which we crop to full disk.
maps = {m.detector: m.submap(SkyCoord([-1100, 1100], [-1100, 1100],
unit=u.arcsec, frame=m.coordinate_frame))
for m in sunpy.map.Map(downloaded_files)}
maps['AIA'].plot_settings['vmin'] = 0 # set the minimum plotted pixel value
##############################################################################
# Now, let's plot both maps, and we draw the limb as seen by AIA onto the
# EUVI image. We remove the part of the limb that is hidden because it is on
# the far side of the Sun from STEREO's point of view.
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_subplot(1, 2, 1, projection=maps['AIA'])
maps['AIA'].plot(axes=ax1)
maps['AIA'].draw_limb()
ax2 = fig.add_subplot(1, 2, 2, projection=maps['EUVI'])
maps['EUVI'].plot(axes=ax2)
visible, hidden = maps['AIA'].draw_limb()
hidden.remove()
##############################################################################
# Let's plot the helioprojective coordinate grid as seen by SDO on the STEREO
# image in a cropped view. Note that only those grid lines that intersect the
# edge of the plot will have corresponding ticks and tick labels.
fig = plt.figure()
ax = plt.subplot(projection=maps['EUVI'])
maps['EUVI'].plot()
# Crop the view using pixel coordinates
ax.set_xlim(500, 1300)
ax.set_ylim(100, 900)
# Shrink the plot slightly and move the title up to make room for new labels.
ax.set_position([0.1, 0.1, 0.8, 0.7])
ax.set_title(ax.get_title(), pad=45)
# Change the default grid labels and line properties.
stereo_x, stereo_y = ax.coords
stereo_x.set_axislabel("Helioprojective Longitude (STEREO B) [arcsec]")
stereo_y.set_axislabel("Helioprojective Latitude (STEREO B) [arcsec]")
ax.coords.grid(color='white', linewidth=1)
# Add a new coordinate overlay in the SDO frame.
overlay = ax.get_coords_overlay(maps['AIA'].coordinate_frame)
overlay.grid()
# Configure the grid:
x, y = overlay
# Wrap the longitude at 180 deg rather than the default 360.
x.set_coord_type('longitude', 180.)
# Set the tick spacing
x.set_ticks(spacing=250*u.arcsec)
y.set_ticks(spacing=250*u.arcsec)
# Set the ticks to be on the top and left axes.
x.set_ticks_position('tr')
y.set_ticks_position('tr')
# Change the defaults to arcseconds
x.set_major_formatter('s.s')
y.set_major_formatter('s.s')
# Add axes labels
x.set_axislabel("Helioprojective Longitude (SDO) [arcsec]")
y.set_axislabel("Helioprojective Latitude (SDO) [arcsec]")
plt.show()
|
src/oci/data_integration/models/oracle_adwc_write_attribute.py | Manny27nyc/oci-python-sdk | 249 | 11070994 | <filename>src/oci/data_integration/models/oracle_adwc_write_attribute.py<gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .abstract_write_attribute import AbstractWriteAttribute
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class OracleAdwcWriteAttribute(AbstractWriteAttribute):
"""
Properties to configure writing to Oracle Autonomous Data Warehouse Cloud.
"""
def __init__(self, **kwargs):
"""
Initializes a new OracleAdwcWriteAttribute object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.OracleAdwcWriteAttribute.model_type` attribute
of this class is ``ORACLEADWCWRITEATTRIBUTE`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this OracleAdwcWriteAttribute.
Allowed values for this property are: "ORACLEWRITEATTRIBUTE", "ORACLEATPWRITEATTRIBUTE", "ORACLEADWCWRITEATTRIBUTE", "OBJECTSTORAGEWRITEATTRIBUTE", "ORACLE_WRITE_ATTRIBUTE", "ORACLE_ATP_WRITE_ATTRIBUTE", "ORACLE_ADWC_WRITE_ATTRIBUTE", "OBJECT_STORAGE_WRITE_ATTRIBUTE"
:type model_type: str
:param bucket_name:
The value to assign to the bucket_name property of this OracleAdwcWriteAttribute.
:type bucket_name: str
:param staging_file_name:
The value to assign to the staging_file_name property of this OracleAdwcWriteAttribute.
:type staging_file_name: str
:param staging_data_asset:
The value to assign to the staging_data_asset property of this OracleAdwcWriteAttribute.
:type staging_data_asset: oci.data_integration.models.DataAsset
:param staging_connection:
The value to assign to the staging_connection property of this OracleAdwcWriteAttribute.
:type staging_connection: oci.data_integration.models.Connection
"""
self.swagger_types = {
'model_type': 'str',
'bucket_name': 'str',
'staging_file_name': 'str',
'staging_data_asset': 'DataAsset',
'staging_connection': 'Connection'
}
self.attribute_map = {
'model_type': 'modelType',
'bucket_name': 'bucketName',
'staging_file_name': 'stagingFileName',
'staging_data_asset': 'stagingDataAsset',
'staging_connection': 'stagingConnection'
}
self._model_type = None
self._bucket_name = None
self._staging_file_name = None
self._staging_data_asset = None
self._staging_connection = None
self._model_type = 'ORACLEADWCWRITEATTRIBUTE'
@property
def bucket_name(self):
"""
Gets the bucket_name of this OracleAdwcWriteAttribute.
The bucket name for the attribute.
:return: The bucket_name of this OracleAdwcWriteAttribute.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""
Sets the bucket_name of this OracleAdwcWriteAttribute.
The bucket name for the attribute.
:param bucket_name: The bucket_name of this OracleAdwcWriteAttribute.
:type: str
"""
self._bucket_name = bucket_name
@property
def staging_file_name(self):
"""
Gets the staging_file_name of this OracleAdwcWriteAttribute.
The file name for the attribute.
:return: The staging_file_name of this OracleAdwcWriteAttribute.
:rtype: str
"""
return self._staging_file_name
@staging_file_name.setter
def staging_file_name(self, staging_file_name):
"""
Sets the staging_file_name of this OracleAdwcWriteAttribute.
The file name for the attribute.
:param staging_file_name: The staging_file_name of this OracleAdwcWriteAttribute.
:type: str
"""
self._staging_file_name = staging_file_name
@property
def staging_data_asset(self):
"""
Gets the staging_data_asset of this OracleAdwcWriteAttribute.
:return: The staging_data_asset of this OracleAdwcWriteAttribute.
:rtype: oci.data_integration.models.DataAsset
"""
return self._staging_data_asset
@staging_data_asset.setter
def staging_data_asset(self, staging_data_asset):
"""
Sets the staging_data_asset of this OracleAdwcWriteAttribute.
:param staging_data_asset: The staging_data_asset of this OracleAdwcWriteAttribute.
:type: oci.data_integration.models.DataAsset
"""
self._staging_data_asset = staging_data_asset
@property
def staging_connection(self):
"""
Gets the staging_connection of this OracleAdwcWriteAttribute.
:return: The staging_connection of this OracleAdwcWriteAttribute.
:rtype: oci.data_integration.models.Connection
"""
return self._staging_connection
@staging_connection.setter
def staging_connection(self, staging_connection):
"""
Sets the staging_connection of this OracleAdwcWriteAttribute.
:param staging_connection: The staging_connection of this OracleAdwcWriteAttribute.
:type: oci.data_integration.models.Connection
"""
self._staging_connection = staging_connection
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
snlds/examples/tensorboard_utils.py | deepneuralmachine/google-research | 23,901 | 11070998 | <reponame>deepneuralmachine/google-research<filename>snlds/examples/tensorboard_utils.py<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for plotting in Tensorboard."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
colors = [
(0.21568627450980393, 0.47058823529411764, 0.7490196078431373), # blue
(0.8980392156862745, 0.0, 0.0), # red
(0.996078431372549, 0.7019607843137254, 0.03137254901960784), # amber
(0.4823529411764706, 0.6980392156862745, 0.4549019607843137), # faded green
(0.5098039215686274, 0.37254901960784315, 0.5294117647058824), # purple
(0.5490196078431373, 0.0, 0.058823529411764705), # crimson
(0.6588235294117647, 0.6431372549019608, 0.5843137254901961)] # greyish
def plot_to_image(figure):
"""Converts the matplotlib figure to a PNG image."""
# The function is adapted from
# github.com/tensorflow/tensorboard/blob/master/docs/image_summaries.ipynb
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# tf.summary.image requires 4-D inputs. [num_samples, height, weight, color].
image = tf.expand_dims(image, 0)
return image
def show_lorenz_attractor_3d(fig_size, inputs, reconstructed_inputs,
fig_title=None):
"""Compare reconstructed lorenz attractor.
Args:
fig_size: 2-tuple of floats for figure dimension (width, height) in inches.
inputs: a `2-D` numpy array, with shape [num_steps, 3]. At each timestep,
it records the [x, y, z] position of lorenz attractor.
reconstructed_inputs: a `2-D` numpy array, with the same shape as inputs,
recording the reconstructed lorenz attractor trajectories.
fig_title: Optional. A string to set title of figure.
Returns:
fig: the Matplotlib figure handle.
"""
assert inputs.ndim == 2
assert reconstructed_inputs.shape == inputs.shape
fig = plt.figure(figsize=fig_size)
if fig_title:
plt.title(fig_title)
ax = fig.add_subplot(1, 2, 1, projection="3d")
ax.plot(inputs[:, 0], inputs[:, 1], inputs[:, 2],
lw=1, color="k", alpha=0.5)
for i in range(3):
ax.set_xlabel("$x_{}$".format(i+1), fontsize=12, labelpad=3)
ax = fig.add_subplot(1, 2, 2, projection="3d")
ax.plot(reconstructed_inputs[:, 0],
reconstructed_inputs[:, 1],
reconstructed_inputs[:, 2],
lw=1, color="k", alpha=0.5)
for i in range(3):
ax.set_xlabel("$x_{}$".format(i+1), fontsize=12, labelpad=3)
return fig
def show_lorenz_segmentation(fig_size, inputs, segmentation, fig_title=None):
"""Show discrete state segmentation on input data along each dimension.
Args:
fig_size: 2-tuple of floats for figure dimension (width, height) in inches.
inputs: a `2-D` numpy array, with shape [num_steps, 3]. At each timestep,
it records the [x, y, z] position of lorenz attractor.
segmentation: a 1-D numpy array, with shape [num_steps], recoding the
most likely states at each time steps.
fig_title: Optional. A string to set title of figure.
Returns:
fig: the Matplotlib figure handle.
"""
fig = plt.figure(figsize=fig_size)
if fig_title:
plt.title(fig_title)
inputs = np.squeeze(inputs)
s_seq = np.squeeze(segmentation)
z_cps = np.concatenate(([0], np.where(np.diff(s_seq))[0]+1, [s_seq.size]))
for i in range(3):
ax = fig.add_subplot(3, 1, i+1)
for start, stop in zip(z_cps[:-1], z_cps[1:]):
stop = min(s_seq.size, stop+1)
ax.plot(np.arange(start, stop),
inputs[start:stop, i],
lw=1,
color=colors[s_seq[start]])
ax.set_ylabel("$x_{}(t)$".format(i+1))
if i < 2:
ax.set_xticklabels([])
return fig
def show_discrete_states(fig_size, discrete_states_lk, segmentation,
fig_title=None):
"""Show likelihoods of discrete states s[t] and segmentation.
Args:
fig_size: 2-tuple of floats for figure dimension (width, height) in inches.
discrete_states_lk: a 2-D numpy array, with shape [num_steps, num_states],
recording the likelihood of each discrete states.
segmentation: a 1-D numpy array, with shape [num_steps], recoding the
most likely states at each time steps.
fig_title: Optional. A string to set title of figure.
Returns:
fig: the Matplotlib figure handle.
"""
fig = plt.figure(figsize=fig_size)
if fig_title:
plt.title(fig_title)
ax = fig.add_subplot(1, 1, 1)
s_seq = np.squeeze(segmentation)
turning_loc = np.concatenate(
([0], np.where(np.diff(s_seq))[0]+1, [s_seq.size]))
for i in range(discrete_states_lk.shape[-1]):
ax.plot(np.reshape(discrete_states_lk[Ellipsis, i], [-1]))
for tl in turning_loc:
ax.axvline(tl, color="k", linewidth=2., linestyle="-.")
ax.set_ylim(-0.1, 1.1)
return fig
def show_hidden_states(fig_size, zt, segmentation, fig_title=None):
"""Show z[t] as series of line plots.
Args:
fig_size: 2-tuple of floats for figure dimension (width, height) in inches.
zt: a 2-D numpy array, with shape [num_steps, num_hidden_states],
recording the values of continuous hidden states z[t].
segmentation: a 1-D numpy array, with shape [num_steps], recoding the
most likely states at each time steps.
fig_title: Optional. A string to set title of figure.
Returns:
fig: the Matplotlib figure handle.
"""
fig = plt.figure(figsize=fig_size)
if fig_title:
plt.title(fig_title)
ax = fig.add_subplot(1, 1, 1)
s_seq = np.squeeze(segmentation)
turning_loc = np.concatenate(
([0], np.where(np.diff(s_seq))[0]+1, [s_seq.size]))
for i in range(zt.shape[-1]):
ax.plot(zt[:, i])
for tl in turning_loc:
ax.axvline(tl, color="k", linewidth=2., linestyle="-.")
return fig
|
aws_inventory/store.py | UA-SOlsen/aws-inventory | 594 | 11071001 | """Data persistence for responses and any exceptions while invoking operations."""
import datetime
import json
import logging
import pickle
import string
import sys
import time
import uuid
import botocore
import config
import version
LOGGER = logging.getLogger(__name__)
class ResponseEncoder(json.JSONEncoder):
"""Encode responses from operations in order to serialize to JSON."""
def default(self, o):
if isinstance(o, datetime.datetime):
return o.isoformat()
return super(ResponseEncoder, self).default(o)
class ResultStore(object):
"""Storage and serialization for responses and exceptions."""
def __init__(self, profile):
self.profile = profile
self._response_store = {} # {svc: {region: {svc_op: response}}}
self._exception_store = {} # {svc: {svc_op: {region: exception}}}
self.run_date = time.strftime('%Y-%m-%d %H:%M:%S %Z')
self.commandline = ' '.join(sys.argv)
self.version = version.__version__
def add_response(self, service, region, svc_op, resp):
"""Add a response to the store for a given service for an operation in a region. Replace
existing values.
:param str service: service name
:param str region: region name
:param str svc_op: service operation name
:param dict resp: response from invoking an API
"""
svc_store = self._response_store.setdefault(service, {})
svc_store.setdefault(region, {})[svc_op] = resp
def add_exception(self, service, region, svc_op, exc):
"""Add an exception to the store for a given service for an operation in a region. Replace
existing values.
:param str service: service name
:param str region: region name
:param str svc_op: service operation name
:param dict exc: exception from invoking an API
"""
svc_store = self._exception_store.setdefault(service, {})
svc_store.setdefault(svc_op, {})[region] = str(exc)
def has_exceptions(self, service, svc_op):
"""Check whether a service operation has any exceptions.
:param str service: service name
:param str svc_op: service operation name
:rtype: bool
:return: whether there are exceptions
"""
try:
return len(self._exception_store[service][svc_op]) > 0
except KeyError:
return False
def get_response_store(self):
"""Serialize response store to JSON.
:rtype: str
:return: serialized response store in JSON format
"""
LOGGER.debug('Building the response store.')
return json.dumps(self._response_store, cls=ResponseEncoder)
def dump_response_store(self, fp):
"""Pickle the response store.
:param file fp: file to write to
"""
LOGGER.debug('Writing the response store to file "%s".', fp.name)
pickle.dump(self._response_store, fp)
def dump_exception_store(self, fp):
"""Pickle the exception store.
:param file fp: file to write to
"""
LOGGER.debug('Writing the exception store to file "%s".', fp.name)
pickle.dump(self._exception_store, fp)
def generate_data_file(self, fp):
"""Generate the data file for consumption by the data GUI.
:param file fp: file to write to
"""
# format of data file for jsTree
#[
# {
# "text" : "Root node",
# "children" : [
# { "text" : "Child node 1" },
# { "text" : "Child node 2" }
# ]
# }
#]
def build_children(obj):
children = []
if isinstance(obj, dict):
for key, val in obj.items():
child = build_children(val)
if isinstance(child, (dict, list, tuple)) and child:
children.append({'text': key, 'children': child})
else:
# leaf node
try:
children.append({'text': u'{} = {}'.format(key, val)})
except UnicodeDecodeError:
# key or value is probably binary. For example, CloudTrail API ListPublicKeys
children.append({'text': u'{} = {!r}'.format(key, val)})
elif isinstance(obj, (list, tuple)):
for i, val in enumerate(obj):
child = build_children(val)
if isinstance(child, (dict, list, tuple)) and child:
children.append({'text': '[{:d}]'.format(i), 'children': child})
else:
# leaf node
children.append({'text': child})
else:
return obj
return children
LOGGER.debug('Building the GUI data model.')
data = build_children({'[inventory]': self._response_store})
# assign types to nodes so jsTree can handle them appropriately
data[0]['type'] = 'root'
data[0]['state'] = {'opened': True}
for service in data[0]['children']:
service['type'] = 'service'
service['state'] = {'opened': True}
for region in service['children']:
region['type'] = 'region'
region['state'] = {'opened': True}
num_hidden_operations = 0
for operation in region['children']:
operation['type'] = 'operation'
# add count of non empty response to operation name
try:
num_non_empty_responses = 0
for response in operation['children']:
try:
if response['text'] == 'ResponseMetadata':
response['type'] = 'response_metadata'
continue # ignore metadata nodes in count
num_non_empty_responses += 1 if response['children'] else 0
except KeyError:
# an empty response
pass
if num_non_empty_responses:
operation['text'] += ' ({:d})'.format(num_non_empty_responses)
else:
num_hidden_operations += 1
operation['state'] = {"hidden": True}
except KeyError:
# no response
pass
region['a_attr'] = {'title': '{:d} hidden operations'.format(num_hidden_operations)}
out_obj = {'run_date': self.run_date,
'commandline': self.commandline,
'version': self.version,
'botocore_version': botocore.__version__,
'responses': data}
LOGGER.debug('Writing the GUI data model to file "%s".', fp.name)
json.dump(out_obj, fp, cls=ResponseEncoder)
|
pysteps/tests/test_tracking_tdating.py | leabeusch/pysteps | 285 | 11071020 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pysteps.tracking.tdating import dating
from pysteps.utils import to_reflectivity
from pysteps.tests.helpers import get_precipitation_fields
arg_names = ("source", "dry_input")
arg_values = [
("mch", False),
("mch", False),
("mch", True),
]
@pytest.mark.parametrize(arg_names, arg_values)
def test_tracking_tdating_dating(source, dry_input):
pytest.importorskip("skimage")
pandas = pytest.importorskip("pandas")
if not dry_input:
input, metadata = get_precipitation_fields(0, 2, True, True, 4000, source)
input, __ = to_reflectivity(input, metadata)
else:
input = np.zeros((3, 50, 50))
metadata = {"timestamps": ["00", "01", "02"]}
timelist = metadata["timestamps"]
output = dating(input, timelist, mintrack=1)
# Check output format
assert isinstance(output, tuple)
assert len(output) == 3
assert isinstance(output[0], list)
assert isinstance(output[1], list)
assert isinstance(output[2], list)
assert len(output[1]) == input.shape[0]
assert len(output[2]) == input.shape[0]
assert isinstance(output[1][0], pandas.DataFrame)
assert isinstance(output[2][0], np.ndarray)
assert output[1][0].shape[1] == 9
assert output[2][0].shape == input.shape[1:]
if not dry_input:
assert len(output[0]) > 0
assert isinstance(output[0][0], pandas.DataFrame)
assert output[0][0].shape[1] == 9
else:
assert len(output[0]) == 0
assert output[1][0].shape[0] == 0
assert output[2][0].sum() == 0
|
functions/source/sitewisemonitorfunction/jmespath/__init__.py | jieatelement/quickstart-aws-industrial-machine-connectivity | 1,738 | 11071026 | <filename>functions/source/sitewisemonitorfunction/jmespath/__init__.py<gh_stars>1000+
from jmespath import parser
from jmespath.visitor import Options
__version__ = '0.9.5'
def compile(expression):
return parser.Parser().parse(expression)
def search(expression, data, options=None):
return parser.Parser().parse(expression).search(data, options=options)
|
tests/generators/merkle/main.py | sifraitech/eth2.0-specs | 497 | 11071049 | <reponame>sifraitech/eth2.0-specs
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
if __name__ == "__main__":
altair_mods = {key: 'eth2spec.test.altair.merkle.test_' + key for key in [
'single_proof',
]}
bellatrix_mods = altair_mods
all_mods = {
ALTAIR: altair_mods,
BELLATRIX: bellatrix_mods,
}
run_state_test_generators(runner_name="merkle", all_mods=all_mods)
|
google/colab/_pip.py | figufema/TesteClone | 1,521 | 11071050 | <gh_stars>1000+
"""Routines for extracting information about pip installed packages.
The intent is to provide users a useful warning if they !pip install a package
that is already loaded in sys.modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import re
import site
import sys
from IPython.core.display import _display_mimetype
__all__ = ["is_pip_install_command", "print_previous_import_warning"]
_COLAB_DATA_MIMETYPE = "application/vnd.colab-display-data+json"
def is_pip_install_command(cmd, *args, **kwargs): # pylint: disable=unused-argument
"""Check if cmd represents a pip command."""
# Check if the command starts with pip/pip2/pip3 and a space.
# This won't trigger on every pip invocation, but will catch the most common.
return re.match(r"^pip[23]?\s+install", cmd.strip())
def _extract_installed_packages(pip_output):
"""Extract the list of successfully installed packages from pip output."""
regex = re.compile("^Successfully installed (.*)$", re.MULTILINE)
results = regex.findall(pip_output)
return itertools.chain(*map(str.split, results))
def _get_distinfo_path(distname, paths):
"""Find the filesystem path to a package's distribution info.
Distribution names must be treated as case-insensitive, with '-' and '_'
characters treated as equivalent
(See https://www.python.org/dev/peps/pep-0426/#name).
Args:
distname: distribution name.
paths: list of directory path to search
Returns:
path: (string or None) the valid filesystem path to the distribution.
"""
paths = [p for p in paths if os.path.exists(p)]
if not paths:
return None
# Python packages can be installed as wheels or as eggs. Account for both
# (see https://packaging.python.org/discussions/wheel-vs-egg/)
distinfo = ["{}.dist-info".format(distname), "{}.egg-info".format(distname)]
def normalize_dist(dist):
return dist.lower().replace("_", "-")
distinfo = [normalize_dist(info) for info in distinfo]
for path in paths:
path_map = {normalize_dist(f): f for f in os.listdir(path)}
for info in distinfo:
if info in path_map:
joined = os.path.join(path, path_map[info])
if os.path.isdir(joined):
return joined
return None
def _extract_toplevel_packages(pip_output):
"""Extract the list of toplevel packages associated with a pip install."""
# Account for default installations and --user installations (most common).
# Note: we should possibly also account for --root, --prefix, & -t/--target.
sitepackages = site.getsitepackages() + [site.getusersitepackages()]
for package in _extract_installed_packages(pip_output):
infodir = _get_distinfo_path(package, sitepackages)
if not infodir:
continue
toplevel = os.path.join(infodir, "top_level.txt")
if not os.path.exists(toplevel):
continue
for line in open(toplevel):
line = line.strip()
if line:
yield line
def _previously_imported_packages(pip_output):
"""List all previously imported packages from a pip install."""
installed = set(_extract_toplevel_packages(pip_output))
return sorted(installed.intersection(set(sys.modules)))
def print_previous_import_warning(output):
"""Prints a warning about previously imported packages."""
packages = _previously_imported_packages(output)
if packages:
# display a list of packages using the colab-display-data mimetype, which
# will be printed as a warning + restart button by the Colab frontend.
_display_mimetype(
_COLAB_DATA_MIMETYPE, ({
"pip_warning": {
"packages": packages,
}
},),
raw=True)
|
solo/methods/swav.py | xwyzsn/solo-learn | 693 | 11071051 | <reponame>xwyzsn/solo-learn
# Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.swav import swav_loss_func
from solo.methods.base import BaseMethod
from solo.utils.sinkhorn_knopp import SinkhornKnopp
class SwAV(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
num_prototypes: int,
sk_iters: int,
sk_epsilon: float,
temperature: float,
queue_size: int,
epoch_queue_starts: int,
freeze_prototypes_epochs: int,
**kwargs,
):
"""Implements SwAV (https://arxiv.org/abs/2006.09882).
Args:
proj_output_dim (int): number of dimensions of the projected features.
proj_hidden_dim (int): number of neurons in the hidden layers of the projector.
num_prototypes (int): number of prototypes.
sk_iters (int): number of iterations for the sinkhorn-knopp algorithm.
sk_epsilon (float): weight for the entropy regularization term.
temperature (float): temperature for the softmax normalization.
queue_size (int): number of samples to hold in the queue.
epoch_queue_starts (int): epochs the queue starts.
freeze_prototypes_epochs (int): number of epochs during which the prototypes are frozen.
"""
super().__init__(**kwargs)
self.proj_output_dim = proj_output_dim
self.sk_iters = sk_iters
self.sk_epsilon = sk_epsilon
self.temperature = temperature
self.queue_size = queue_size
self.epoch_queue_starts = epoch_queue_starts
self.freeze_prototypes_epochs = freeze_prototypes_epochs
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
)
# prototypes
self.prototypes = nn.utils.weight_norm(
nn.Linear(proj_output_dim, num_prototypes, bias=False)
)
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(SwAV, SwAV).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("swav")
# projector
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# queue settings
parser.add_argument("--queue_size", default=3840, type=int)
# parameters
parser.add_argument("--temperature", type=float, default=0.1)
parser.add_argument("--num_prototypes", type=int, default=3000)
parser.add_argument("--sk_epsilon", type=float, default=0.05)
parser.add_argument("--sk_iters", type=int, default=3)
parser.add_argument("--freeze_prototypes_epochs", type=int, default=1)
parser.add_argument("--epoch_queue_starts", type=int, default=15)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector and prototypes parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [
{"params": self.projector.parameters()},
{"params": self.prototypes.parameters()},
]
return super().learnable_params + extra_learnable_params
def on_train_start(self):
"""Gets the world size and sets it in the sinkhorn and the queue."""
# sinkhorn-knopp needs the world size
world_size = self.trainer.world_size if self.trainer else 1
self.sk = SinkhornKnopp(self.sk_iters, self.sk_epsilon, world_size)
# queue also needs the world size
if self.queue_size > 0:
self.register_buffer(
"queue",
torch.zeros(
2,
self.queue_size // world_size,
self.proj_output_dim,
device=self.device,
),
)
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the backbone, the projector and the prototypes.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]:
a dict containing the outputs of the parent,
the projected features and the logits.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
z = F.normalize(z)
p = self.prototypes(z)
return {**out, "z": z, "p": p}
@torch.no_grad()
def get_assignments(self, preds: List[torch.Tensor]) -> List[torch.Tensor]:
"""Computes cluster assignments from logits, optionally using a queue.
Args:
preds (List[torch.Tensor]): a batch of logits.
Returns:
List[torch.Tensor]: assignments for each sample in the batch.
"""
bs = preds[0].size(0)
assignments = []
for i, p in enumerate(preds):
# optionally use the queue
if self.queue_size > 0 and self.current_epoch >= self.epoch_queue_starts:
p_queue = self.prototypes(self.queue[i]) # type: ignore
p = torch.cat((p, p_queue))
# compute assignments with sinkhorn-knopp
assignments.append(self.sk(p)[:bs])
return assignments
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for SwAV reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size num_crops containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of SwAV loss and classification loss.
"""
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = F.normalize(self.projector(feats1))
z2 = F.normalize(self.projector(feats2))
p1 = self.prototypes(z1)
p2 = self.prototypes(z2)
# ------- swav loss -------
preds = [p1, p2]
assignments = self.get_assignments(preds)
swav_loss = swav_loss_func(preds, assignments, self.temperature)
# ------- update queue -------
if self.queue_size > 0:
z = torch.stack((z1, z2))
self.queue[:, z.size(1) :] = self.queue[:, : -z.size(1)].clone()
self.queue[:, : z.size(1)] = z.detach()
self.log("train_swav_loss", swav_loss, on_epoch=True, sync_dist=True)
return swav_loss + class_loss
def on_after_backward(self):
"""Zeroes the gradients of the prototypes."""
if self.current_epoch < self.freeze_prototypes_epochs:
for p in self.prototypes.parameters():
p.grad = None
|
scripts/color.py | vfinotti/vhdl-extras | 131 | 11071059 | <gh_stars>100-1000
#!usr/bin/python
# -*- coding: utf-8 -*-
'''Color formatting
'''
from __future__ import print_function, division
try:
import colorama
colorama.init()
from colorama import Fore, Back, Style
except ImportError:
def note(t): return t
def success(t): return t
def warn(t): return t
def error(t): return t
else:
import os
_no_color = os.getenv('NO_COLOR', 'false')
_no_color = True if _no_color.lower() in ['1', 'true', 't', 'y', 'yes'] else False
def stdout_redirected():
return os.fstat(0) != os.fstat(1)
_redir_stdout = stdout_redirected()
def colorize(t, code):
if _no_color or _redir_stdout:
return t
return ''.join([code, t, Style.RESET_ALL])
def note(t):
return colorize(t, Fore.BLUE)
def success(t):
return colorize(t, Fore.GREEN)
def warn(t):
return colorize(t, Fore.YELLOW + Style.BRIGHT)
def error(t):
return colorize(t, Fore.RED + Style.BRIGHT)
if __name__ == '__main__':
print('Colorized text:\n')
print('note("foobar") : ' + note('foobar'))
print('success("foobar") : ' + success('foobar'))
print('warn("foobar") : ' + warn('foobar'))
print('error("foobar") : ' + error('foobar'))
#import os
#print('redir?', os.fstat(0) == os.fstat(1))
|
app/tests/challenges_tests/test_models.py | kaczmarj/grand-challenge.org | 101 | 11071090 | <gh_stars>100-1000
import pytest
from actstream.actions import is_following
from actstream.models import Action
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import ProtectedError
from machina.apps.forum_conversation.models import Topic
from grandchallenge.challenges.models import Challenge
from grandchallenge.notifications.models import Notification
from tests.factories import ChallengeFactory, UserFactory
from tests.notifications_tests.factories import TopicFactory
@pytest.mark.django_db
def test_group_deletion():
challenge = ChallengeFactory()
participants_group = challenge.participants_group
admins_group = challenge.admins_group
assert participants_group
assert admins_group
challenge.page_set.all().delete()
challenge.phase_set.all().delete()
Challenge.objects.filter(pk__in=[challenge.pk]).delete()
with pytest.raises(ObjectDoesNotExist):
participants_group.refresh_from_db()
with pytest.raises(ObjectDoesNotExist):
admins_group.refresh_from_db()
@pytest.mark.django_db
@pytest.mark.parametrize("group", ["participants_group", "admins_group"])
def test_group_deletion_reverse(group):
challenge = ChallengeFactory()
participants_group = challenge.participants_group
admins_group = challenge.admins_group
assert participants_group
assert admins_group
with pytest.raises(ProtectedError):
getattr(challenge, group).delete()
@pytest.mark.django_db
def test_default_page_is_created():
c = ChallengeFactory()
assert c.page_set.count() == 1
@pytest.mark.django_db
@pytest.mark.parametrize("group", ("participant", "admin"))
def test_participants_follow_forum(group):
u = UserFactory()
c = ChallengeFactory()
add_method = getattr(c, f"add_{group}")
remove_method = getattr(c, f"remove_{group}")
add_method(user=u)
assert is_following(user=u, obj=c.forum)
remove_method(user=u)
assert is_following(user=u, obj=c.forum) is False
# No actions involving the forum should be created
for i in Action.objects.all():
assert c.forum != i.target
assert c.forum != i.action_object
assert c.forum != i.actor
@pytest.mark.django_db
@pytest.mark.parametrize("group", ("participant", "admin"))
def test_non_posters_notified(group):
p = UserFactory()
u = UserFactory()
c = ChallengeFactory()
c.add_admin(user=p)
add_method = getattr(c, f"add_{group}")
add_method(user=u)
# delete all notifications for easier testing below
Notification.objects.all().delete()
TopicFactory(forum=c.forum, poster=p, type=Topic.TOPIC_ANNOUNCE)
assert u.user_profile.has_unread_notifications is True
assert p.user_profile.has_unread_notifications is False
|
tools/build/v2/test/property_expansion.py | jmuskaan72/Boost | 198 | 11071116 | <reponame>jmuskaan72/Boost<filename>tools/build/v2/test/property_expansion.py
#!/usr/bin/python
# Copyright 2003 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that free property inside.
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
project ;
variant debug-AA : debug : <define>AA ;
alias all : hello ;
exe hello : hello.cpp ;
explicit hello ;
""")
t.write("hello.cpp", """
#ifdef AA
int main() {}
#endif
""")
t.run_build_system("debug-AA")
t.cleanup()
|
lib/PyAMF-0.6.1/pyamf/util/__init__.py | MiCHiLU/google_appengine_sdk | 790 | 11071145 | <reponame>MiCHiLU/google_appengine_sdk<filename>lib/PyAMF-0.6.1/pyamf/util/__init__.py<gh_stars>100-1000
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Utilities.
@since: 0.1.0
"""
import calendar
import datetime
import inspect
import pyamf
from pyamf import python
try:
from cpyamf.util import BufferedByteStream
except ImportError:
from pyamf.util.pure import BufferedByteStream
#: On some Python versions retrieving a negative timestamp, like
#: C{datetime.datetime.utcfromtimestamp(-31536000.0)} is broken.
negative_timestamp_broken = False
def get_timestamp(d):
"""
Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@return: UTC timestamp.
@rtype: C{float}
@see: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}.
"""
if isinstance(d, datetime.date) and not isinstance(d, datetime.datetime):
d = datetime.datetime.combine(d, datetime.time(0, 0, 0, 0))
msec = str(d.microsecond).rjust(6).replace(' ', '0')
return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec))
def get_datetime(secs):
"""
Return a UTC date from a timestamp.
@type secs: C{long}
@param secs: Seconds since 1970.
@return: UTC timestamp.
@rtype: C{datetime.datetime}
"""
if negative_timestamp_broken and secs < 0:
return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=secs)
return datetime.datetime.utcfromtimestamp(secs)
def get_properties(obj):
"""
Returns a list of properties for L{obj}
@since: 0.5
"""
if hasattr(obj, 'keys'):
return obj.keys()
elif hasattr(obj, '__dict__'):
return obj.__dict__.keys()
return []
def set_attrs(obj, attrs):
"""
Applies a collection of attributes C{attrs} to object C{obj} in the most
generic way possible.
@param obj: An instance implementing C{__setattr__}, or C{__setitem__}
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict
"""
o = setattr
if hasattr(obj, '__setitem__'):
o = type(obj).__setitem__
[o(obj, k, v) for k, v in attrs.iteritems()]
def get_class_alias(klass):
"""
Tries to find a suitable L{pyamf.ClassAlias} subclass for C{klass}.
"""
for k, v in pyamf.ALIAS_TYPES.iteritems():
for kl in v:
try:
if issubclass(klass, kl):
return k
except TypeError:
# not a class
if hasattr(kl, '__call__'):
if kl(klass) is True:
return k
def is_class_sealed(klass):
"""
Whether or not the supplied class can accept dynamic properties.
@rtype: C{bool}
@since: 0.5
"""
mro = inspect.getmro(klass)
new = False
if mro[-1] is object:
mro = mro[:-1]
new = True
for kls in mro:
if new and '__dict__' in kls.__dict__:
return False
if not hasattr(kls, '__slots__'):
return False
return True
def get_class_meta(klass):
"""
Returns a C{dict} containing meta data based on the supplied class, useful
for class aliasing.
@rtype: C{dict}
@since: 0.5
"""
if not isinstance(klass, python.class_types) or klass is object:
raise TypeError('klass must be a class object, got %r' % type(klass))
meta = {
'static_attrs': None,
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'amf3': None,
'dynamic': None,
'alias': None,
'external': None,
'synonym_attrs': None
}
if not hasattr(klass, '__amf__'):
return meta
a = klass.__amf__
if type(a) is dict:
in_func = lambda x: x in a
get_func = a.__getitem__
else:
in_func = lambda x: hasattr(a, x)
get_func = lambda x: getattr(a, x)
for prop in ['alias', 'amf3', 'dynamic', 'external']:
if in_func(prop):
meta[prop] = get_func(prop)
for prop in ['static', 'exclude', 'readonly', 'proxy', 'synonym']:
if in_func(prop):
meta[prop + '_attrs'] = get_func(prop)
return meta
def get_module(mod_name):
"""
Load and return a module based on C{mod_name}.
"""
if mod_name is '':
raise ImportError('Unable to import empty module')
mod = __import__(mod_name)
components = mod_name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
try:
datetime.datetime.utcfromtimestamp(-31536000.0)
except ValueError:
negative_timestamp_broken = True
|
rssant/middleware/prometheus.py | zuzhi/rssant | 1,176 | 11071150 | <reponame>zuzhi/rssant<gh_stars>1000+
import time
from django.http import HttpRequest, HttpResponse
from prometheus_client import Histogram
from prometheus_client.exposition import choose_encoder, REGISTRY
def django_metrics_view(request: HttpRequest) -> HttpResponse:
registry = REGISTRY
accept = request.headers.get('Accept')
encoder, content_type = choose_encoder(accept)
if 'name[]' in request.GET:
name = request.GET['name[]']
registry = registry.restricted_registry(name)
output = encoder(registry)
return HttpResponse(content=output, content_type=content_type)
API_TIME = Histogram(
'rssant_api_time', 'api execute time', [
'path', 'method', 'status'
],
buckets=(
.010, .025, .050, .075, .100, .150, .250, .350, .500,
.750, 1.0, 1.5, 2.5, 5.0, 10.0, 15.0, 30.0, 60.0,
)
)
class RssantPrometheusMiddleware:
"""
https://github.com/korfuri/django-prometheus/blob/master/django_prometheus/middleware.py
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request: HttpRequest):
t_begin = time.monotonic()
response = self.get_response(request)
cost = time.monotonic() - t_begin
path_name = self._get_path_name(request)
status = str(response.status_code)
API_TIME.labels(path_name, request.method, status).observe(cost)
return response
def _get_path_name(self, request):
path_name = "<unnamed_path>"
if hasattr(request, "resolver_match"):
if request.resolver_match is not None:
# resolver_match.route requires django 2.2+
if request.resolver_match.route is not None:
path_name = request.resolver_match.route
return path_name
|
montreal_forced_aligner/multiprocessing/alignment.py | MontrealCorpusTools/Montreal-Forced-Aligner | 702 | 11071153 | <reponame>MontrealCorpusTools/Montreal-Forced-Aligner
import subprocess
import os
import re
import sys
import time
import traceback
from decimal import Decimal
import statistics
from collections import defaultdict
from multiprocessing import Lock
from .helper import make_path_safe, run_mp, run_non_mp, thirdparty_binary
from ..textgrid import parse_from_word, parse_from_word_no_cleanup, parse_from_phone, \
ctms_to_textgrids_non_mp, output_textgrid_writing_errors, generate_tiers, export_textgrid, construct_output_path
from ..exceptions import AlignmentError
import multiprocessing as mp
from ..multiprocessing.helper import Stopped
from queue import Empty
queue_polling_timeout = 1
def acc_stats_func(directory, iteration, job_name, feature_string):
log_path = os.path.join(directory, 'log', 'acc.{}.{}.log'.format(iteration, job_name))
model_path = os.path.join(directory, '{}.mdl'.format(iteration))
acc_path = os.path.join(directory, '{}.{}.acc'.format(iteration, job_name))
ali_path = os.path.join(directory, 'ali.{}'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
acc_proc = subprocess.Popen([thirdparty_binary('gmm-acc-stats-ali'), model_path,
'{}'.format(feature_string), "ark:" + ali_path, acc_path],
stderr=log_file)
acc_proc.communicate()
def acc_stats(iteration, directory, split_directory, num_jobs, config):
"""
Multiprocessing function that computes stats for GMM training
See http://kaldi-asr.org/doc/gmm-acc-stats-ali_8cc.html for more details
on the Kaldi binary this runs.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/train_mono.sh
for the bash script this function was extracted from
Parameters
----------
iteration : int
Iteration to calculate stats for
directory : str
Directory of training (monophone, triphone, speaker-adapted triphone
training directories)
split_directory : str
Directory of training data split into the number of jobs
num_jobs : int
The number of processes to use in calculation
"""
jobs = [(directory, iteration, x,
config.feature_config.construct_feature_proc_string(split_directory, directory, x)
) for x in range(num_jobs)]
if config.use_mp:
run_mp(acc_stats_func, jobs, config.log_directory)
else:
run_non_mp(acc_stats_func, jobs, config.log_directory)
def compile_train_graphs_func(directory, lang_directory, split_directory, job_name, dictionary_names=None, debug=True):
tree_path = os.path.join(directory, 'tree')
mdl_path = os.path.join(directory, '0.mdl')
if not os.path.exists(mdl_path):
mdl_path = os.path.join(directory, 'final.mdl')
if dictionary_names is None:
log_path = os.path.join(directory, 'log', 'compile-graphs.{}.log'.format(job_name))
fst_scp_path = os.path.join(directory, 'fsts.{}.scp'.format(job_name))
fst_ark_path = os.path.join(directory, 'fsts.{}.ark'.format(job_name))
text_path = os.path.join(split_directory, 'text.{}.int'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
proc = subprocess.Popen([thirdparty_binary('compile-train-graphs'),
'--read-disambig-syms={}'.format(
os.path.join(lang_directory, 'phones', 'disambig.int')),
tree_path, mdl_path,
os.path.join(lang_directory, 'L.fst'),
"ark:" + text_path, "ark,scp:{},{}".format(fst_ark_path, fst_scp_path)],
stderr=log_file)
proc.communicate()
else:
for name in dictionary_names:
log_path = os.path.join(directory, 'log', 'compile-graphs.{}.{}.log'.format(job_name, name))
fst_scp_path = os.path.join(directory, 'fsts.{}.{}.scp'.format(job_name, name))
fst_ark_path = os.path.join(directory, 'fsts.{}.{}.ark'.format(job_name, name))
text_path = os.path.join(split_directory, 'text.{}.{}.int'.format(job_name, name))
with open(log_path, 'w', encoding='utf8') as log_file:
proc = subprocess.Popen([thirdparty_binary('compile-train-graphs'),
'--read-disambig-syms={}'.format(
os.path.join(lang_directory, 'phones', 'disambig.int')),
tree_path, mdl_path,
os.path.join(lang_directory, name, 'dictionary', 'L.fst'),
"ark:" + text_path, "ark,scp:{},{}".format(fst_ark_path, fst_scp_path)],
stderr=log_file)
proc.communicate()
fst_scp_path = os.path.join(directory, 'fsts.{}.scp'.format(job_name))
lines = []
for name in dictionary_names:
with open(os.path.join(directory, 'fsts.{}.{}.scp'.format(job_name, name)), 'r', encoding='utf8') as inf:
for line in inf:
lines.append(line)
with open(fst_scp_path, 'w', encoding='utf8') as outf:
for line in sorted(lines):
outf.write(line)
def compile_train_graphs(directory, lang_directory, split_directory, num_jobs, aligner, debug=False):
"""
Multiprocessing function that compiles training graphs for utterances
See http://kaldi-asr.org/doc/compile-train-graphs_8cc.html for more details
on the Kaldi binary this function calls.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/train_mono.sh
for the bash script that this function was extracted from.
Parameters
----------
directory : str
Directory of training (monophone, triphone, speaker-adapted triphone
training directories)
lang_directory : str
Directory of the language model used
split_directory : str
Directory of training data split into the number of jobs
num_jobs : int
The number of processes to use
"""
aligner.logger.debug('Compiling training graphs...')
begin = time.time()
log_directory = os.path.join(directory, 'log')
os.makedirs(log_directory, exist_ok=True)
jobs = [(directory, lang_directory, split_directory, x, aligner.dictionaries_for_job(x), debug)
for x in range(num_jobs)]
if aligner.use_mp:
run_mp(compile_train_graphs_func, jobs, log_directory)
else:
run_non_mp(compile_train_graphs_func, jobs, log_directory)
aligner.logger.debug(f'Compiling training graphs took {time.time() - begin}')
def mono_align_equal_func(mono_directory, job_name, feature_string):
fst_path = os.path.join(mono_directory, 'fsts.{}.scp'.format(job_name))
mdl_path = os.path.join(mono_directory, '0.mdl')
log_path = os.path.join(mono_directory, 'log', 'align.0.{}.log'.format(job_name))
ali_path = os.path.join(mono_directory, 'ali.{}'.format(job_name))
acc_path = os.path.join(mono_directory, '0.{}.acc'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
align_proc = subprocess.Popen([thirdparty_binary('align-equal-compiled'), "scp:" + fst_path,
'{}'.format(feature_string), 'ark:' + ali_path],
stderr=log_file)
align_proc.communicate()
stats_proc = subprocess.Popen([thirdparty_binary('gmm-acc-stats-ali'), '--binary=true',
mdl_path, '{}'.format(feature_string), 'ark:' + ali_path, acc_path],
stdin=align_proc.stdout, stderr=log_file)
stats_proc.communicate()
def mono_align_equal(mono_directory, split_directory, num_jobs, config):
"""
Multiprocessing function that creates equal alignments for base monophone training
See http://kaldi-asr.org/doc/align-equal-compiled_8cc.html for more details
on the Kaldi binary this function calls.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/train_mono.sh
for the bash script that this function was extracted from.
Parameters
----------
mono_directory : str
Directory of monophone training
split_directory : str
Directory of training data split into the number of jobs
num_jobs : int
The number of processes to use
"""
jobs = [(mono_directory, x,
config.feature_config.construct_feature_proc_string(split_directory, mono_directory, x),
)
for x in range(num_jobs)]
if config.use_mp:
run_mp(mono_align_equal_func, jobs, config.log_directory)
else:
run_non_mp(mono_align_equal_func, jobs, config.log_directory)
def align_func(directory, iteration, job_name, mdl, config, feature_string, output_directory, debug=False):
fst_path = os.path.join(directory, 'fsts.{}.scp'.format(job_name))
log_path = os.path.join(output_directory, 'log', 'align.{}.{}.log'.format(iteration, job_name))
ali_path = os.path.join(output_directory, 'ali.{}'.format(job_name))
score_path = os.path.join(output_directory, 'ali.{}.scores'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
log_file.write('DEBUG: {}'.format(debug))
if debug:
loglike_path = os.path.join(output_directory, 'ali.{}.loglikes'.format(job_name))
com = [thirdparty_binary('gmm-align-compiled'),
'--transition-scale={}'.format(config['transition_scale']),
'--acoustic-scale={}'.format(config['acoustic_scale']),
'--self-loop-scale={}'.format(config['self_loop_scale']),
'--beam={}'.format(config['beam']),
'--retry-beam={}'.format(config['retry_beam']),
'--careful=false',
'--write-per-frame-acoustic-loglikes=ark,t:{}'.format(loglike_path),
mdl,
"scp:" + fst_path, '{}'.format(feature_string), "ark:" + ali_path,
"ark,t:" + score_path]
else:
com = [thirdparty_binary('gmm-align-compiled'),
'--transition-scale={}'.format(config['transition_scale']),
'--acoustic-scale={}'.format(config['acoustic_scale']),
'--self-loop-scale={}'.format(config['self_loop_scale']),
'--beam={}'.format(config['beam']),
'--retry-beam={}'.format(config['retry_beam']),
'--careful=false',
mdl,
"scp:" + fst_path, '{}'.format(feature_string), "ark:" + ali_path]
align_proc = subprocess.Popen(com,
stderr=log_file)
align_proc.communicate()
def align(iteration, directory, split_directory, optional_silence, num_jobs, config,
output_directory=None, speaker_independent=False):
"""
Multiprocessing function that aligns based on the current model
See http://kaldi-asr.org/doc/gmm-align-compiled_8cc.html and
http://kaldi-asr.org/doc/gmm-boost-silence_8cc.html for more details
on the Kaldi binary this function calls.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/align_si.sh
for the bash script this function was based on.
Parameters
----------
iteration : int or str
Iteration to align
directory : str
Directory of training (monophone, triphone, speaker-adapted triphone
training directories)
split_directory : str
Directory of training data split into the number of jobs
optional_silence : str
Colon-separated list of silence phones to boost
num_jobs : int
The number of processes to use in calculation
config : :class:`~aligner.config.MonophoneConfig`, :class:`~aligner.config.TriphoneConfig` or :class:`~aligner.config.TriphoneFmllrConfig`
Configuration object for training
"""
begin = time.time()
if output_directory is None:
output_directory = directory
log_directory = os.path.join(output_directory, 'log')
align_model_path = os.path.join(directory, '{}.alimdl'.format(iteration))
if not speaker_independent or not os.path.exists(align_model_path):
align_model_path = os.path.join(directory, '{}.mdl'.format(iteration))
if config.boost_silence != 1.0:
mdl = "{} --boost={} {} {} - |".format(thirdparty_binary('gmm-boost-silence'),
config.boost_silence, optional_silence, make_path_safe(align_model_path))
else:
mdl = align_model_path
jobs = [(directory, iteration, x, mdl, config.align_options,
config.feature_config.construct_feature_proc_string(split_directory, directory, x),
output_directory, config.debug) for x in range(num_jobs)]
if config.use_mp:
run_mp(align_func, jobs, log_directory)
else:
run_non_mp(align_func, jobs, log_directory)
error_logs = []
for i in range(num_jobs):
log_path = os.path.join(output_directory, 'log', 'align.{}.{}.log'.format(iteration, i))
with open(log_path, 'r', encoding='utf8') as f:
for line in f:
if line.strip().startswith('ERROR'):
error_logs.append(log_path)
break
if error_logs:
message = 'There were {} job(s) with errors. For more information, please see the following logs:\n\n{}'
raise (AlignmentError(message.format(len(error_logs), '\n'.join(error_logs))))
config.logger.debug(f'Alignment round took {time.time() - begin}')
def compile_information_func(log_directory, split_directory, job_num):
align_path = os.path.join(log_directory, 'align.final.{}.log'.format(job_num))
log_like_pattern = re.compile(
r'^LOG .* Overall log-likelihood per frame is (?P<log_like>[-0-9.]+) over (?P<frames>\d+) frames.*$')
decode_error_pattern = re.compile(r'^WARNING .* Did not successfully decode file (?P<utt>.*?), .*$')
feature_pattern = re.compile(r'Segment (?P<utt>.*?) too short')
data = {'unaligned': [], 'too_short': [], 'log_like': 0, 'total_frames': 0}
with open(align_path, 'r', encoding='utf8') as f:
for line in f:
decode_error_match = re.match(decode_error_pattern, line)
if decode_error_match:
data['unaligned'].append(decode_error_match.group('utt'))
continue
log_like_match = re.match(log_like_pattern, line)
if log_like_match:
log_like = log_like_match.group('log_like')
frames = log_like_match.group('frames')
data['log_like'] = float(log_like)
data['total_frames'] = int(frames)
features_path = os.path.join(split_directory, 'log', 'make_mfcc.{}.log'.format(job_num))
with open(features_path, 'r', encoding='utf8') as f:
for line in f:
m = re.search(feature_pattern, line)
if m is not None:
utt = m.groups('utt')
data['too_short'].append(utt)
return data
def compile_information(model_directory, corpus, num_jobs, config):
compile_info_begin = time.time()
log_dir = os.path.join(model_directory, 'log')
manager = mp.Manager()
alignment_info = manager.dict()
jobs = [(log_dir, corpus.split_directory(), x)
for x in range(num_jobs)]
if config.use_mp:
run_mp(compile_information_func, jobs, log_dir, alignment_info)
else:
run_non_mp(compile_information_func, jobs, log_dir)
unaligned = {}
total_frames = sum(data['total_frames'] for data in alignment_info.values())
average_log_like = 0
for x, data in alignment_info.items():
if total_frames:
weight = data['total_frames'] / total_frames
average_log_like += data['log_like'] * weight
for u in data['unaligned']:
unaligned[u] = 'Beam too narrow'
for u in data['too_short']:
unaligned[u] = 'Segment too short'
if not total_frames:
corpus.logger.warning('No files were aligned, this likely indicates serious problems with the aligner.')
corpus.logger.debug(f'Compiling information took {time.time() - compile_info_begin}')
return unaligned, average_log_like
def compute_alignment_improvement_func(iteration, data_directory, model_directory, phones_dir, job_name,
frame_shift, reversed_phone_mapping, positions):
try:
text_int_path = os.path.join(data_directory, 'text.{}.int'.format(job_name))
log_path = os.path.join(model_directory, 'log', 'get_ctm.{}.{}.log'.format(iteration, job_name))
ali_path = os.path.join(model_directory, 'ali.{}'.format(job_name))
model_path = os.path.join(model_directory, '{}.mdl'.format(iteration))
phone_ctm_path = os.path.join(model_directory, 'phone.{}.{}.ctm'.format(iteration, job_name))
if os.path.exists(phone_ctm_path):
return
frame_shift = frame_shift / 1000
with open(log_path, 'w', encoding='utf8') as log_file:
lin_proc = subprocess.Popen([thirdparty_binary('linear-to-nbest'), "ark:" + ali_path,
"ark:" + text_int_path,
'', '', 'ark:-'],
stdout=subprocess.PIPE, stderr=log_file)
det_proc = subprocess.Popen([thirdparty_binary('lattice-determinize-pruned'),
'ark:-', 'ark:-'],
stdin=lin_proc.stdout, stderr=log_file,
stdout=subprocess.PIPE)
align_proc = subprocess.Popen([thirdparty_binary('lattice-align-words'),
os.path.join(phones_dir, 'word_boundary.int'), model_path,
'ark:-', 'ark:-'],
stdin=det_proc.stdout, stderr=log_file,
stdout=subprocess.PIPE)
phone_proc = subprocess.Popen([thirdparty_binary('lattice-to-phone-lattice'), model_path,
'ark:-', "ark:-"],
stdin=align_proc.stdout,
stdout=subprocess.PIPE,
stderr=log_file)
nbest_proc = subprocess.Popen([thirdparty_binary('nbest-to-ctm'),
'--frame-shift={}'.format(frame_shift),
"ark:-", phone_ctm_path],
stdin=phone_proc.stdout,
stderr=log_file)
nbest_proc.communicate()
mapping = reversed_phone_mapping
actual_lines = []
with open(phone_ctm_path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if line == '':
continue
line = line.split(' ')
utt = line[0]
begin = Decimal(line[2])
duration = Decimal(line[3])
end = begin + duration
label = line[4]
try:
label = mapping[int(label)]
except KeyError:
pass
for p in positions:
if label.endswith(p):
label = label[:-1 * len(p)]
actual_lines.append([utt, begin, end, label])
with open(phone_ctm_path, 'w', encoding='utf8') as f:
for line in actual_lines:
f.write('{}\n'.format(' '.join(map(str, line))))
except Exception as e:
raise (Exception(str(e)))
def parse_iteration_alignments(directory, iteration, num_jobs):
data = {}
for j in range(num_jobs):
phone_ctm_path = os.path.join(directory, 'phone.{}.{}.ctm'.format(iteration, j))
with open(phone_ctm_path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if line == '':
continue
line = line.split(' ')
utt = line[0]
begin = Decimal(line[1])
end = Decimal(line[2])
label = line[3]
if utt not in data:
data[utt] = []
data[utt].append([begin, end, label])
return data
def compare_alignments(alignments_one, alignments_two, frame_shift):
utterances_aligned_diff = len(alignments_two) - len(alignments_one)
utts_one = set(alignments_one.keys())
utts_two = set(alignments_two.keys())
common_utts = utts_one.intersection(utts_two)
differences = []
for u in common_utts:
end = alignments_one[u][-1][1]
t = Decimal('0.0')
one_alignment = alignments_one[u]
two_alignment = alignments_two[u]
difference = 0
while t < end:
one_label = None
two_label = None
for b, e, l in one_alignment:
if t < b:
continue
if t >= e:
break
one_label = l
for b, e, l in two_alignment:
if t < b:
continue
if t >= e:
break
two_label = l
if one_label != two_label:
difference += frame_shift
t += frame_shift
difference /= end
differences.append(difference)
if differences:
mean_difference = statistics.mean(differences)
else:
mean_difference = 'N/A'
return utterances_aligned_diff, mean_difference
def compute_alignment_improvement(iteration, config, model_directory, num_jobs):
jobs = [(iteration, config.data_directory, model_directory, config.dictionary.phones_dir, x,
config.feature_config.frame_shift, config.dictionary.reversed_phone_mapping, config.dictionary.positions)
for x in range(num_jobs)]
if config.use_mp:
run_mp(compute_alignment_improvement_func, jobs, config.log_directory)
else:
run_non_mp(compute_alignment_improvement_func, jobs, config.log_directory)
alignment_diff_path = os.path.join(model_directory, 'train_change.csv')
if iteration == 0 or iteration not in config.realignment_iterations:
return
ind = config.realignment_iterations.index(iteration)
if ind != 0:
previous_iteration = config.realignment_iterations[ind - 1]
else:
previous_iteration = 0
try:
previous_alignments = parse_iteration_alignments(model_directory, previous_iteration, num_jobs)
except FileNotFoundError:
return
current_alignments = parse_iteration_alignments(model_directory, iteration, num_jobs)
utterance_aligned_diff, mean_difference = compare_alignments(previous_alignments, current_alignments,
config.feature_config.frame_shift)
if not os.path.exists(alignment_diff_path):
with open(alignment_diff_path, 'w', encoding='utf8') as f:
f.write('iteration,number_aligned,number_previously_aligned,'
'difference_in_utts_aligned,mean_boundary_change\n')
if iteration in config.realignment_iterations:
with open(alignment_diff_path, 'a', encoding='utf8') as f:
f.write('{},{},{},{},{}\n'.format(iteration, len(current_alignments),
len(previous_alignments), utterance_aligned_diff, mean_difference))
if not config.debug:
for j in range(num_jobs):
phone_ctm_path = os.path.join(model_directory, 'phone.{}.{}.ctm'.format(previous_iteration, j))
os.remove(phone_ctm_path)
def ali_to_ctm_func(model_directory, word_path, split_directory, job_name, frame_shift, word_mode=True):
text_int_path = os.path.join(split_directory, 'text.{}.int'.format(job_name))
ali_path = os.path.join(model_directory, 'ali.{}'.format(job_name))
model_path = os.path.join(model_directory, 'final.mdl')
if word_mode:
ctm_path = os.path.join(model_directory, 'word_ctm.{}'.format(job_name))
log_path = os.path.join(model_directory, 'log', 'get_word_ctm_.{}.log'.format(job_name))
else:
ctm_path = os.path.join(model_directory, 'phone_ctm.{}'.format(job_name))
log_path = os.path.join(model_directory, 'log', 'get_phone_ctm_.{}.log'.format(job_name))
if os.path.exists(ctm_path):
return
with open(log_path, 'w', encoding='utf8') as log_file:
lin_proc = subprocess.Popen([thirdparty_binary('linear-to-nbest'), "ark:" + ali_path,
"ark:" + text_int_path,
'', '', 'ark:-'],
stdout=subprocess.PIPE, stderr=log_file)
align_words_proc = subprocess.Popen([thirdparty_binary('lattice-align-words'),
word_path, model_path,
'ark:-', 'ark:-'],
stdin=lin_proc.stdout, stdout=subprocess.PIPE, stderr=log_file)
if word_mode:
nbest_proc = subprocess.Popen([thirdparty_binary('nbest-to-ctm'),
'--frame-shift={}'.format(frame_shift),
'ark:-',
ctm_path],
stderr=log_file, stdin=align_words_proc.stdout)
else:
phone_proc = subprocess.Popen([thirdparty_binary('lattice-to-phone-lattice'), model_path,
'ark:-', "ark:-"],
stdout=subprocess.PIPE, stdin=align_words_proc.stdout,
stderr=log_file)
nbest_proc = subprocess.Popen([thirdparty_binary('nbest-to-ctm'),
'--frame-shift={}'.format(frame_shift),
"ark:-", ctm_path],
stdin=phone_proc.stdout,
stderr=log_file)
nbest_proc.communicate()
def process_line(line, utt_begin):
line = line.split(' ')
utt = line[0]
begin = round(float(line[2]), 4)
duration = float(line[3])
end = round(begin + duration, 4)
label = line[4]
begin += utt_begin
end += utt_begin
return utt, begin, end, label
class NoCleanupWordCtmProcessWorker(mp.Process):
def __init__(self, job_name, ctm_path, to_process_queue, stopped, error_catching,
segments, utt_speak_mapping,
reversed_word_mapping, speaker_mapping):
mp.Process.__init__(self)
self.job_name = job_name
self.ctm_path = ctm_path
self.to_process_queue = to_process_queue
self.stopped = stopped
self.error_catching = error_catching
# Corpus information
self.segments = segments
self.utt_speak_mapping = utt_speak_mapping
# Dictionary information
self.reversed_word_mapping = reversed_word_mapping
self.speaker_mapping = speaker_mapping
def run(self):
current_file_data = {}
def process_current(cur_utt, cur_file, current_labels):
speaker = self.utt_speak_mapping[cur_utt]
reversed_word_mapping = self.reversed_word_mapping
if self.speaker_mapping is not None:
dict_lookup_speaker = speaker
if speaker not in self.speaker_mapping:
dict_lookup_speaker = 'default'
reversed_word_mapping = self.reversed_word_mapping[self.speaker_mapping[dict_lookup_speaker]]
actual_labels = parse_from_word_no_cleanup(current_labels, reversed_word_mapping)
if speaker not in current_file_data:
current_file_data[speaker] = []
current_file_data[speaker].extend(actual_labels)
def process_current_file(cur_file):
self.to_process_queue.put(('word', cur_file, current_file_data))
cur_utt = None
cur_file = None
utt_begin = 0
current_labels = []
sum_time = 0
count_time = 0
try:
with open(self.ctm_path, 'r') as word_file:
for line in word_file:
line = line.strip()
if not line:
continue
utt, begin, end, label = process_line(line, utt_begin)
if cur_utt is None:
cur_utt = utt
begin_time = time.time()
if cur_utt in self.segments:
seg = self.segments[cur_utt]
cur_file = seg['file_name']
utt_begin = seg['begin']
else:
utt_begin = 0
cur_file = utt
begin += utt_begin
end += utt_begin
if utt != cur_utt:
process_current(cur_utt, cur_file, current_labels)
cur_utt = utt
if cur_utt in self.segments:
seg = self.segments[cur_utt]
file_name = seg['file_name']
utt_begin = seg['begin']
else:
utt_begin = 0
file_name = utt
if file_name != cur_file:
process_current_file(cur_file)
current_file_data = {}
sum_time += time.time() - begin_time
count_time += 1
begin_time = time.time()
cur_file = file_name
current_labels = []
current_labels.append([begin, end, label])
if current_labels:
process_current(cur_utt, cur_file, current_labels)
process_current_file(cur_file)
sum_time += time.time() - begin_time
count_time += 1
except Exception as e:
self.stopped.stop()
exc_type, exc_value, exc_traceback = sys.exc_info()
self.error_catching[('word', self.job_name)] = '\n'.join(
traceback.format_exception(exc_type, exc_value, exc_traceback))
class CleanupWordCtmProcessWorker(mp.Process):
def __init__(self, job_name, ctm_path, to_process_queue, stopped, error_catching,
segments, text_mapping, utt_speak_mapping,
words_mapping, speaker_mapping,
punctuation, clitic_set, clitic_markers, compound_markers, oov_int):
mp.Process.__init__(self)
self.job_name = job_name
self.ctm_path = ctm_path
self.to_process_queue = to_process_queue
self.stopped = stopped
self.error_catching = error_catching
# Corpus information
self.segments = segments
self.text_mapping = text_mapping
self.utt_speak_mapping = utt_speak_mapping
# Dictionary information
self.words_mapping = words_mapping
self.speaker_mapping = speaker_mapping
self.punctuation = punctuation
self.clitic_set = clitic_set
self.clitic_markers = clitic_markers
self.compound_markers = compound_markers
self.oov_int = oov_int
def run(self):
current_file_data = {}
def process_current(cur_utt, cur_file, current_labels):
text = self.text_mapping[cur_utt].split()
speaker = self.utt_speak_mapping[cur_utt]
words_mapping = self.words_mapping
oov_int = self.oov_int
if self.speaker_mapping is not None:
dict_lookup_speaker = speaker
if speaker not in self.speaker_mapping:
dict_lookup_speaker = 'default'
words_mapping = self.words_mapping[self.speaker_mapping[dict_lookup_speaker]]
oov_int = self.oov_int[self.speaker_mapping[dict_lookup_speaker]]
actual_labels = parse_from_word(current_labels, text, words_mapping, self.punctuation, self.clitic_set,
self.clitic_markers, self.compound_markers, oov_int)
if speaker not in current_file_data:
current_file_data[speaker] = []
current_file_data[speaker].extend(actual_labels)
def process_current_file(cur_file):
self.to_process_queue.put(('word', cur_file, current_file_data))
cur_utt = None
cur_file = None
utt_begin = 0
current_labels = []
sum_time = 0
count_time = 0
try:
with open(self.ctm_path, 'r') as word_file:
for line in word_file:
line = line.strip()
if not line:
continue
utt, begin, end, label = process_line(line, utt_begin)
if cur_utt is None:
cur_utt = utt
begin_time = time.time()
if cur_utt in self.segments:
seg = self.segments[cur_utt]
cur_file = seg['file_name']
utt_begin = seg['begin']
else:
utt_begin = 0
cur_file = utt
begin += utt_begin
end += utt_begin
if utt != cur_utt:
process_current(cur_utt, cur_file, current_labels)
cur_utt = utt
if cur_utt in self.segments:
seg = self.segments[cur_utt]
file_name = seg['file_name']
utt_begin = seg['begin']
else:
utt_begin = 0
file_name = utt
if file_name != cur_file:
process_current_file(cur_file)
current_file_data = {}
sum_time += time.time() - begin_time
count_time += 1
begin_time = time.time()
cur_file = file_name
current_labels = []
current_labels.append([begin, end, label])
if current_labels:
process_current(cur_utt, cur_file, current_labels)
process_current_file(cur_file)
sum_time += time.time() - begin_time
count_time += 1
except Exception as e:
self.stopped.stop()
exc_type, exc_value, exc_traceback = sys.exc_info()
self.error_catching[('word', self.job_name)] = '\n'.join(
traceback.format_exception(exc_type, exc_value, exc_traceback))
class PhoneCtmProcessWorker(mp.Process):
def __init__(self, job_name, ctm_path, to_process_queue, stopped, error_catching,
segments, utt_speak_mapping,
reversed_phone_mapping, speaker_mapping, positions):
mp.Process.__init__(self)
self.job_name = job_name
self.ctm_path = ctm_path
self.to_process_queue = to_process_queue
self.stopped = stopped
self.error_catching = error_catching
self.segments = segments
self.utt_speak_mapping = utt_speak_mapping
self.reversed_phone_mapping = reversed_phone_mapping
self.speaker_mapping = speaker_mapping
self.positions = positions
def run(self):
main_begin = time.time()
cur_utt = None
cur_file = None
utt_begin = 0
current_labels = []
sum_time = 0
count_time = 0
current_file_data = {}
def process_current_utt(cur_utt, cur_file, current_labels):
speaker = self.utt_speak_mapping[cur_utt]
reversed_phone_mapping = self.reversed_phone_mapping
if self.speaker_mapping is not None:
dict_lookup_speaker = speaker
if speaker not in self.speaker_mapping:
dict_lookup_speaker = 'default'
reversed_phone_mapping = self.reversed_phone_mapping[self.speaker_mapping[dict_lookup_speaker]]
actual_labels = parse_from_phone(current_labels, reversed_phone_mapping, self.positions)
if speaker not in current_file_data:
current_file_data[speaker] = []
current_file_data[speaker].extend(actual_labels)
def process_current_file(cur_file):
self.to_process_queue.put(('phone', cur_file, current_file_data))
try:
with open(self.ctm_path, 'r') as word_file:
for line in word_file:
line = line.strip()
if not line:
continue
utt, begin, end, label = process_line(line, utt_begin)
if cur_utt is None:
cur_utt = utt
begin_time = time.time()
if cur_utt in self.segments:
seg = self.segments[cur_utt]
cur_file = seg['file_name']
utt_begin = seg['begin']
else:
utt_begin = 0
cur_file = utt
begin += utt_begin
end += utt_begin
if utt != cur_utt:
process_current_utt(cur_utt, cur_file, current_labels)
cur_utt = utt
if cur_utt in self.segments:
seg = self.segments[cur_utt]
file_name = seg['file_name']
utt_begin = seg['begin']
else:
utt_begin = 0
file_name = utt
if file_name != cur_file:
process_current_file(cur_file)
current_file_data = {}
sum_time += time.time() - begin_time
count_time += 1
begin_time = time.time()
cur_file = file_name
current_labels = []
current_labels.append([begin, end, label])
if current_labels:
process_current_utt(cur_utt, cur_file, current_labels)
process_current_file(cur_file)
sum_time += time.time() - begin_time
count_time += 1
except Exception as e:
self.stopped.stop()
exc_type, exc_value, exc_traceback = sys.exc_info()
self.error_catching[('phone', self.job_name)] = '\n'.join(
traceback.format_exception(exc_type, exc_value, exc_traceback))
class CombineProcessWorker(mp.Process):
def __init__(self, job_name, to_process_queue, to_export_queue, stopped, finished_combining, error_catching,
silences, multilingual_ipa, words_mapping, speaker_mapping,
punctuation, clitic_set, clitic_markers, compound_markers, oov_code, words,
strip_diacritics, cleanup_textgrids):
mp.Process.__init__(self)
self.job_name = job_name
self.to_process_queue = to_process_queue
self.to_export_queue = to_export_queue
self.stopped = stopped
self.finished_combining = finished_combining
self.error_catching = error_catching
self.silences = silences
self.multilingual_ipa = multilingual_ipa
self.words_mapping = words_mapping
self.speaker_mapping = speaker_mapping
self.punctuation = punctuation
self.clitic_set = clitic_set
self.clitic_markers = clitic_markers
self.compound_markers = compound_markers
self.oov_code = oov_code
self.words = words
self.strip_diacritics = strip_diacritics
self.cleanup_textgrids = cleanup_textgrids
def run(self):
sum_time = 0
count_time = 0
phone_data = {}
word_data = {}
while True:
try:
w_p, file_name, data = self.to_process_queue.get(timeout=queue_polling_timeout)
begin_time = time.time()
except Empty as error:
if self.finished_combining.stop_check():
break
continue
self.to_process_queue.task_done()
if self.stopped.stop_check():
continue
if w_p == 'phone':
if file_name in word_data:
word_ctm = word_data.pop(file_name)
phone_ctm = data
else:
phone_data[file_name] = data
continue
else:
if file_name in phone_data:
phone_ctm = phone_data.pop(file_name)
word_ctm = data
else:
word_data[file_name] = data
continue
try:
data = generate_tiers(word_ctm, phone_ctm, self.silences, self.multilingual_ipa,
self.words_mapping, self.speaker_mapping,
self.punctuation, self.clitic_set, self.clitic_markers, self.compound_markers,
self.oov_code, self.words,
self.strip_diacritics, cleanup_textgrids=self.cleanup_textgrids)
self.to_export_queue.put((file_name, data))
except Exception as e:
self.stopped.stop()
exc_type, exc_value, exc_traceback = sys.exc_info()
self.error_catching[('combining', self.job_name)] = '\n'.join(
traceback.format_exception(exc_type, exc_value, exc_traceback))
sum_time += time.time() - begin_time
count_time += 1
class ExportTextGridProcessWorker(mp.Process):
def __init__(self, for_write_queue, stopped, finished_processing, textgrid_errors,
out_directory, backup_output_directory, wav_durations,
frame_shift, file_directory_mapping, file_name_mapping, speaker_ordering):
mp.Process.__init__(self)
self.for_write_queue = for_write_queue
self.stopped = stopped
self.finished_processing = finished_processing
self.textgrid_errors = textgrid_errors
self.out_directory = out_directory
self.backup_output_directory = backup_output_directory
self.wav_durations = wav_durations
self.frame_shift = frame_shift
self.file_directory_mapping = file_directory_mapping
self.file_name_mapping = file_name_mapping
self.speaker_ordering = speaker_ordering
def run(self):
while True:
try:
file_name, data = self.for_write_queue.get(timeout=queue_polling_timeout)
except Empty as error:
if self.finished_processing.stop_check():
break
continue
self.for_write_queue.task_done()
if self.stopped.stop_check():
continue
overwrite = True
speaker = None
if len(data) == 1:
speaker = next(iter(data))
output_name, output_path = construct_output_path(file_name, self.out_directory,
self.file_directory_mapping, self.file_name_mapping,
speaker, self.backup_output_directory)
max_time = round(self.wav_durations[output_name], 4)
try:
export_textgrid(file_name, output_path, data, max_time,
self.frame_shift, self.speaker_ordering, overwrite)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.textgrid_errors[file_name] = '\n'.join(
traceback.format_exception(exc_type, exc_value, exc_traceback))
class ExportPreparationProcessWorker(mp.Process):
def __init__(self, to_export_queue, for_write_queue, stopped, finished_combining, file_speaker_mapping):
mp.Process.__init__(self)
self.to_export_queue = to_export_queue
self.for_write_queue = for_write_queue
self.stopped = stopped
self.finished_combining = finished_combining
self.file_speaker_mapping = file_speaker_mapping
def run(self):
export_data = {}
while True:
try:
file_name, data = self.to_export_queue.get(timeout=queue_polling_timeout)
except Empty as error:
if self.finished_combining.stop_check():
break
continue
self.to_export_queue.task_done()
if self.stopped.stop_check():
continue
if file_name in self.file_speaker_mapping and len(self.file_speaker_mapping[file_name]) > 1:
if file_name not in export_data:
export_data[file_name] = data
else:
export_data[file_name].update(data)
if len(export_data[file_name]) == len(self.file_speaker_mapping[file_name]):
data = export_data.pop(file_name)
self.for_write_queue.put((file_name, data))
else:
self.for_write_queue.put((file_name, data))
for k, v in export_data.items():
self.for_write_queue.put((k, v))
def ctms_to_textgrids_mp(align_config, output_directory, model_directory, dictionary, corpus, num_jobs):
frame_shift = align_config.feature_config.frame_shift / 1000
export_begin = time.time()
manager = mp.Manager()
textgrid_errors = manager.dict()
error_catching = manager.dict()
stopped = Stopped()
backup_output_directory = None
if not align_config.overwrite:
backup_output_directory = os.path.join(model_directory, 'textgrids')
os.makedirs(backup_output_directory, exist_ok=True)
if dictionary.has_multiple:
words_mapping = {}
words = {}
reversed_phone_mapping = {}
reversed_word_mapping = {}
for name, d in dictionary.dictionary_mapping.items():
words_mapping[name] = d.words_mapping
words[name] = d.words
reversed_phone_mapping[name] = d.reversed_phone_mapping
reversed_word_mapping[name] = d.reversed_word_mapping
speaker_mapping = dictionary.speaker_mapping
oov_int = {name: d.oov_int for name, d in dictionary.dictionary_mapping.items()}
else:
words_mapping = dictionary.words_mapping
words = dictionary.words
reversed_phone_mapping = dictionary.reversed_phone_mapping
reversed_word_mapping = dictionary.reversed_word_mapping
speaker_mapping = None
oov_int = dictionary.oov_int
punctuation = dictionary.punctuation
clitic_set = dictionary.clitic_set
clitic_markers = dictionary.clitic_markers
compound_markers = dictionary.compound_markers
corpus.logger.debug('Starting combination process...')
silences = dictionary.silences
corpus.logger.debug('Starting export process...')
corpus.logger.debug('Beginning to process ctm files...')
ctm_begin_time = time.time()
word_procs = []
phone_procs = []
combine_procs = []
finished_signals = [Stopped() for _ in range(num_jobs)]
finished_processing = Stopped()
to_process_queue = [mp.JoinableQueue() for _ in range(num_jobs)]
to_export_queue = mp.JoinableQueue()
for_write_queue = mp.JoinableQueue()
finished_combining = Stopped()
for i in range(num_jobs):
word_ctm_path = os.path.join(model_directory, 'word_ctm.{}'.format(i))
phone_ctm_path = os.path.join(model_directory, 'phone_ctm.{}'.format(i))
if align_config.cleanup_textgrids:
word_p = CleanupWordCtmProcessWorker(i, word_ctm_path, to_process_queue[i], stopped, error_catching,
corpus.segments, corpus.text_mapping, corpus.utt_speak_mapping,
words_mapping, speaker_mapping,
punctuation, clitic_set, clitic_markers, compound_markers, oov_int)
else:
print('no clean up!')
word_p = NoCleanupWordCtmProcessWorker(i, word_ctm_path, to_process_queue[i], stopped, error_catching,
corpus.segments, corpus.utt_speak_mapping, reversed_word_mapping,
speaker_mapping)
word_procs.append(word_p)
word_p.start()
phone_p = PhoneCtmProcessWorker(i, phone_ctm_path, to_process_queue[i], stopped, error_catching,
corpus.segments, corpus.utt_speak_mapping, reversed_phone_mapping,
speaker_mapping,
dictionary.positions)
phone_p.start()
phone_procs.append(phone_p)
combine_p = CombineProcessWorker(i, to_process_queue[i], to_export_queue, stopped, finished_signals[i],
error_catching,
silences,
dictionary.multilingual_ipa, words_mapping, speaker_mapping,
punctuation, clitic_set, clitic_markers, compound_markers,
dictionary.oov_code, words, dictionary.strip_diacritics,
align_config.cleanup_textgrids)
combine_p.start()
combine_procs.append(combine_p)
preparation_proc = ExportPreparationProcessWorker(to_export_queue, for_write_queue, stopped, finished_combining,
corpus.file_speaker_mapping)
preparation_proc.start()
export_procs = []
for i in range(num_jobs):
export_proc = ExportTextGridProcessWorker(for_write_queue, stopped, finished_processing, textgrid_errors,
output_directory, backup_output_directory, corpus.file_durations,
frame_shift, corpus.file_directory_mapping, corpus.file_name_mapping,
corpus.speaker_ordering)
export_proc.start()
export_procs.append(export_proc)
corpus.logger.debug('Waiting for processes to finish...')
for i in range(num_jobs):
word_procs[i].join()
phone_procs[i].join()
finished_signals[i].stop()
corpus.logger.debug(f'Ctm parsers took {time.time() - ctm_begin_time} seconds')
corpus.logger.debug('Waiting for processes to finish...')
for i in range(num_jobs):
to_process_queue[i].join()
combine_procs[i].join()
finished_combining.stop()
to_export_queue.join()
preparation_proc.join()
corpus.logger.debug(f'Combiners took {time.time() - ctm_begin_time} seconds')
corpus.logger.debug('Beginning export...')
corpus.logger.debug(f'Adding jobs for export took {time.time() - export_begin}')
corpus.logger.debug('Waiting for export processes to join...')
for_write_queue.join()
finished_processing.stop()
for i in range(num_jobs):
export_procs[i].join()
for_write_queue.join()
corpus.logger.debug(f'Export took {time.time() - export_begin} seconds')
if error_catching:
corpus.logger.error('Error was encountered in processing CTMs')
for key, error in error_catching.items():
corpus.logger.error(f'{key}:\n\n{error}')
raise AlignmentError()
output_textgrid_writing_errors(output_directory, textgrid_errors)
def convert_ali_to_textgrids(align_config, output_directory, model_directory, dictionary, corpus, num_jobs):
"""
Multiprocessing function that aligns based on the current model
See:
- http://kaldi-asr.org/doc/linear-to-nbest_8cc.html
- http://kaldi-asr.org/doc/lattice-align-words_8cc.html
- http://kaldi-asr.org/doc/lattice-to-phone-lattice_8cc.html
- http://kaldi-asr.org/doc/nbest-to-ctm_8cc.html
for more details
on the Kaldi binaries this function calls.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/get_train_ctm.sh
for the bash script that this function was based on.
Parameters
----------
output_directory : str
Directory to write TextGrid files to
model_directory : str
Directory of training (monophone, triphone, speaker-adapted triphone
training directories)
dictionary : :class:`~montreal_forced_aligner.dictionary.Dictionary`
Dictionary object that has information about pronunciations
corpus : :class:`~montreal_forced_aligner.corpus.AlignableCorpus`
Corpus object that has information about the dataset
num_jobs : int
The number of processes to use in calculation
Raises
------
CorpusError
If the files per speaker exceeds the number of files that are
allowed to be open on the computer (for Unix-based systems)
"""
log_directory = os.path.join(model_directory, 'log')
frame_shift = align_config.feature_config.frame_shift / 1000
word_path = os.path.join(dictionary.phones_dir, 'word_boundary.int')
jobs = [(model_directory, word_path, corpus.split_directory(), x, frame_shift, True) # Word CTM jobs
for x in range(num_jobs)]
jobs += [(model_directory, word_path, corpus.split_directory(), x, frame_shift, False) # Phone CTM jobs
for x in range(num_jobs)]
corpus.logger.info('Generating CTMs from alignment...')
if align_config.use_mp:
run_mp(ali_to_ctm_func, jobs, log_directory)
else:
run_non_mp(ali_to_ctm_func, jobs, log_directory)
corpus.logger.info('Finished generating CTMs!')
corpus.logger.info('Exporting TextGrids from CTMs...')
if align_config.use_mp:
ctms_to_textgrids_mp(align_config, output_directory, model_directory, dictionary, corpus, num_jobs)
else:
ctms_to_textgrids_non_mp(align_config, output_directory, model_directory, dictionary, corpus, num_jobs)
corpus.logger.info('Finished exporting TextGrids!')
def tree_stats_func(directory, ci_phones, mdl, feature_string, ali_path, job_name):
context_opts = []
log_path = os.path.join(directory, 'log', 'acc_tree.{}.log'.format(job_name))
treeacc_path = os.path.join(directory, '{}.treeacc'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
subprocess.call([thirdparty_binary('acc-tree-stats')] + context_opts +
['--ci-phones=' + ci_phones, mdl, '{}'.format(feature_string),
"ark:" + ali_path,
treeacc_path], stderr=log_file)
def tree_stats(directory, align_directory, split_directory, ci_phones, num_jobs, config):
"""
Multiprocessing function that computes stats for decision tree training
See http://kaldi-asr.org/doc/acc-tree-stats_8cc.html for more details
on the Kaldi binary this runs.
Parameters
----------
directory : str
Directory of training (triphone, speaker-adapted triphone
training directories)
align_directory : str
Directory of previous alignment
split_directory : str
Directory of training data split into the number of jobs
ci_phones : str
Colon-separated list of context-independent phones
num_jobs : int
The number of processes to use in calculation
"""
mdl_path = os.path.join(align_directory, 'final.mdl')
jobs = [(directory, ci_phones, mdl_path,
config.feature_config.construct_feature_proc_string(split_directory, directory, x),
os.path.join(align_directory, 'ali.{}'.format(x)), x) for x in range(num_jobs)]
if config.use_mp:
run_mp(tree_stats_func, jobs, config.log_directory)
else:
run_non_mp(tree_stats_func, jobs, config.log_directory)
tree_accs = [os.path.join(directory, '{}.treeacc'.format(x)) for x in range(num_jobs)]
log_path = os.path.join(directory, 'log', 'sum_tree_acc.log')
with open(log_path, 'w', encoding='utf8') as log_file:
subprocess.call([thirdparty_binary('sum-tree-stats'), os.path.join(directory, 'treeacc')] +
tree_accs, stderr=log_file)
# for f in tree_accs:
# os.remove(f)
def convert_alignments_func(directory, align_directory, job_name):
mdl_path = os.path.join(directory, '1.mdl')
tree_path = os.path.join(directory, 'tree')
ali_mdl_path = os.path.join(align_directory, 'final.mdl')
ali_path = os.path.join(align_directory, 'ali.{}'.format(job_name))
new_ali_path = os.path.join(directory, 'ali.{}'.format(job_name))
log_path = os.path.join(directory, 'log', 'convert.{}.log'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
subprocess.call([thirdparty_binary('convert-ali'), ali_mdl_path,
mdl_path, tree_path, "ark:" + ali_path,
"ark:" + new_ali_path], stderr=log_file)
def convert_alignments(directory, align_directory, num_jobs, config):
"""
Multiprocessing function that converts alignments from previous training
See http://kaldi-asr.org/doc/convert-ali_8cc.html for more details
on the Kaldi binary this runs.
Parameters
----------
directory : str
Directory of training (triphone, speaker-adapted triphone
training directories)
align_directory : str
Directory of previous alignment
num_jobs : int
The number of processes to use in calculation
"""
jobs = [(directory, align_directory, x)
for x in range(num_jobs)]
if config.use_mp:
run_mp(convert_alignments_func, jobs, config.log_directory)
else:
run_non_mp(convert_alignments_func, jobs, config.log_directory)
def calc_fmllr_func(directory, split_directory, sil_phones, job_name, feature_string, config, initial,
model_name='final'):
log_path = os.path.join(directory, 'log', 'fmllr.{}.{}.log'.format(model_name, job_name))
ali_path = os.path.join(directory, 'ali.{}'.format(job_name))
mdl_path = os.path.join(directory, '{}.mdl'.format(model_name))
spk2utt_path = os.path.join(split_directory, 'spk2utt.{}'.format(job_name))
if not initial:
tmp_trans_path = os.path.join(directory, 'trans.temp.{}'.format(job_name))
else:
tmp_trans_path = os.path.join(directory, 'trans.{}'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
post_proc = subprocess.Popen([thirdparty_binary('ali-to-post'),
"ark:" + ali_path, 'ark:-'], stderr=log_file, stdout=subprocess.PIPE)
weight_proc = subprocess.Popen([thirdparty_binary('weight-silence-post'), '0.0',
sil_phones, mdl_path, 'ark:-',
'ark:-'], stderr=log_file, stdin=post_proc.stdout, stdout=subprocess.PIPE)
if not initial:
trans_path = os.path.join(directory, 'trans.{}'.format(job_name))
cmp_trans_path = os.path.join(directory, 'trans.cmp.{}'.format(job_name))
est_proc = subprocess.Popen([thirdparty_binary('gmm-est-fmllr'),
'--verbose=4',
'--fmllr-update-type={}'.format(config.fmllr_update_type),
'--spk2utt=ark:' + spk2utt_path, mdl_path, '{}'.format(feature_string),
'ark:-', 'ark:-'],
stderr=log_file, stdin=weight_proc.stdout, stdout=subprocess.PIPE)
comp_proc = subprocess.Popen([thirdparty_binary('compose-transforms'),
'--b-is-affine=true',
'ark:-', 'ark:' + trans_path,
'ark:' + cmp_trans_path], stderr=log_file, stdin=est_proc.stdout)
comp_proc.communicate()
os.remove(trans_path)
os.rename(cmp_trans_path, trans_path)
else:
est_proc = subprocess.Popen([thirdparty_binary('gmm-est-fmllr'),
'--verbose=4',
'--fmllr-update-type={}'.format(config.fmllr_update_type),
'--spk2utt=ark:' + spk2utt_path, mdl_path, '{}'.format(feature_string),
'ark,s,cs:-', 'ark:' + tmp_trans_path],
stderr=log_file, stdin=weight_proc.stdout)
est_proc.communicate()
def calc_fmllr(directory, split_directory, sil_phones, num_jobs, config,
initial=False, iteration=None):
"""
Multiprocessing function that computes speaker adaptation (fMLLR)
See:
- http://kaldi-asr.org/doc/gmm-est-fmllr_8cc.html
- http://kaldi-asr.org/doc/ali-to-post_8cc.html
- http://kaldi-asr.org/doc/weight-silence-post_8cc.html
- http://kaldi-asr.org/doc/compose-transforms_8cc.html
- http://kaldi-asr.org/doc/transform-feats_8cc.html
for more details
on the Kaldi binary this runs.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/align_fmllr.sh
for the original bash script that this function was based on.
Parameters
----------
directory : str
Directory of training (triphone, speaker-adapted triphone
training directories)
split_directory : str
Directory of training data split into the number of jobs
sil_phones : str
Colon-separated list of silence phones
num_jobs : int
The number of processes to use in calculation
config : :class:`~aligner.config.TriphoneFmllrConfig`
Configuration object for training
initial : bool, optional
Whether this is the first computation of speaker-adaptation,
defaults to False
iteration : int or str
Specifies the current iteration, defaults to None
"""
config.logger.info('Calculating fMLLR for speaker adaptation...')
begin = time.time()
if iteration is None:
if initial:
model_name = '1'
else:
model_name = 'final'
else:
model_name = iteration
log_directory = os.path.join(directory, 'log')
jobs = [(directory, split_directory, sil_phones, x,
config.feature_config.construct_feature_proc_string(split_directory, directory, x),
config, initial, model_name) for x in range(num_jobs)]
if config.use_fmllr_mp:
run_mp(calc_fmllr_func, jobs, log_directory)
else:
run_non_mp(calc_fmllr_func, jobs, log_directory)
config.logger.debug(f'Fmllr calculation took {time.time() - begin}')
def acc_stats_two_feats_func(directory, model_path, feature_string, si_feature_string, job_name):
log_path = os.path.join(directory, 'log', 'align_model_est.{}.log'.format(job_name))
acc_path = os.path.join(directory, 'align_model.{}.acc'.format(job_name))
with open(log_path, 'w', encoding='utf8') as log_file:
ali_to_post_proc = subprocess.Popen([thirdparty_binary('ali-to-post'),
'ark:' + os.path.join(directory, 'ali.{}'.format(job_name)),
'ark:-'],
stderr=log_file, stdout=subprocess.PIPE)
acc_proc = subprocess.Popen([thirdparty_binary('gmm-acc-stats-twofeats'), model_path,
feature_string, si_feature_string, "ark,s,cs:-", acc_path],
stderr=log_file, stdin=ali_to_post_proc.stdout)
acc_proc.communicate()
def create_align_model(directory, split_directory, num_jobs, config):
config.logger.info('Creating alignment model for speaker-independent features...')
begin = time.time()
log_directory = os.path.join(directory, 'log')
model_name = 'final'
model_path = os.path.join(directory, '{}.mdl'.format(model_name))
align_model_path = os.path.join(directory, '{}.alimdl'.format(model_name))
jobs = [(directory, model_path,
config.feature_config.construct_feature_proc_string(split_directory, directory, x),
config.feature_config.construct_feature_proc_string(split_directory, directory, x, speaker_independent=True),
x) for x in range(num_jobs)]
if config.use_mp:
run_mp(acc_stats_two_feats_func, jobs, log_directory)
else:
run_non_mp(acc_stats_two_feats_func, jobs, log_directory)
log_path = os.path.join(directory, 'log', 'align_model_est.final.log')
with open(log_path, 'w', encoding='utf8') as log_file:
acc_files = [os.path.join(directory, 'align_model.{}.acc'.format(x))
for x in range(num_jobs)]
est_proc = subprocess.Popen([thirdparty_binary('gmm-est'),
"--remove-low-count-gaussians=false", '--power=' + str(config.power),
model_path,
"{} - {}|".format(thirdparty_binary('gmm-sum-accs'),
' '.join(map(make_path_safe, acc_files))),
align_model_path],
stderr=log_file)
est_proc.communicate()
if not config.debug:
for f in acc_files:
os.remove(f)
config.logger.debug(f'Alignment model creation took {time.time() - begin}')
def lda_acc_stats_func(directory, feature_string, align_directory, config, ci_phones, i):
log_path = os.path.join(directory, 'log', 'ali_to_post.{}.log'.format(i))
with open(log_path, 'w', encoding='utf8') as log_file:
ali_to_post_proc = subprocess.Popen([thirdparty_binary('ali-to-post'),
'ark:' + os.path.join(align_directory, 'ali.{}'.format(i)),
'ark:-'],
stderr=log_file, stdout=subprocess.PIPE)
weight_silence_post_proc = subprocess.Popen([thirdparty_binary('weight-silence-post'),
str(config['boost_silence']), ci_phones,
os.path.join(align_directory, 'final.mdl'),
'ark:-', 'ark:-'],
stdin=ali_to_post_proc.stdout,
stderr=log_file, stdout=subprocess.PIPE)
acc_lda_post_proc = subprocess.Popen([thirdparty_binary('acc-lda'),
'--rand-prune=' + str(config['random_prune']),
os.path.join(align_directory, 'final.mdl'),
'{}'.format(feature_string),
'ark,s,cs:-',
os.path.join(directory, 'lda.{}.acc'.format(i))],
stdin=weight_silence_post_proc.stdout,
stderr=log_file)
acc_lda_post_proc.communicate()
def lda_acc_stats(directory, split_directory, align_directory, config, ci_phones, num_jobs):
"""
Multiprocessing function that accumulates LDA statistics
See:
- http://kaldi-asr.org/doc/ali-to-post_8cc.html
- http://kaldi-asr.org/doc/weight-silence-post_8cc.html
- http://kaldi-asr.org/doc/acc-lda_8cc.html
- http://kaldi-asr.org/doc/est-lda_8cc.html
for more details
on the Kaldi binary this runs.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/train_lda_mllt.sh
for the original bash script that this function was based on.
Parameters
----------
directory : str
Directory of LDA+MLLT training
split_directory : str
Directory of training data split into the number of jobs
align_directory : str
Directory of previous alignment
config : :class:`~aligner.config.LdaMlltConfig`
Configuration object for training
ci_phones : str
Colon-separated list of context-independent phones
num_jobs : int
The number of processes to use in calculation
"""
jobs = [(directory,
config.feature_config.construct_feature_proc_string(split_directory, directory, x, splice=True),
align_directory, config.lda_options, ci_phones, x) for x in range(num_jobs)]
if config.use_mp:
run_mp(lda_acc_stats_func, jobs, config.log_directory)
else:
run_non_mp(lda_acc_stats_func, jobs, config.log_directory)
log_path = os.path.join(directory, 'log', 'lda_est.log')
acc_list = []
for x in range(num_jobs):
acc_list.append(os.path.join(directory, 'lda.{}.acc'.format(x)))
with open(log_path, 'w', encoding='utf8') as log_file:
est_lda_proc = subprocess.Popen([thirdparty_binary('est-lda'),
'--write-full-matrix=' + os.path.join(directory, 'full.mat'),
'--dim=' + str(config.lda_dimension),
os.path.join(directory, 'lda.mat')] + acc_list,
stderr=log_file)
est_lda_proc.communicate()
def calc_lda_mllt_func(directory, feature_string, sil_phones, job_name, config,
initial,
model_name='final'):
log_path = os.path.join(directory, 'log', 'lda_mllt.{}.{}.log'.format(model_name, job_name))
ali_path = os.path.join(directory, 'ali.{}'.format(job_name))
if not initial:
mdl_path = os.path.join(directory, '{}.mdl'.format(model_name))
else:
mdl_path = os.path.join(directory, '1.mdl')
model_name = 1
# Estimating MLLT
with open(log_path, 'a', encoding='utf8') as log_file:
post_proc = subprocess.Popen([thirdparty_binary('ali-to-post'),
"ark:" + ali_path, 'ark:-'],
stdout=subprocess.PIPE, stderr=log_file)
weight_proc = subprocess.Popen([thirdparty_binary('weight-silence-post'), '0.0',
sil_phones, mdl_path, 'ark:-',
'ark:-'],
stdin=post_proc.stdout, stdout=subprocess.PIPE, stderr=log_file)
acc_proc = subprocess.Popen([thirdparty_binary('gmm-acc-mllt'),
'--rand-prune=' + str(config['random_prune']),
mdl_path,
'{}'.format(feature_string),
'ark:-',
os.path.join(directory, '{}.{}.macc'.format(model_name, job_name))],
stdin=weight_proc.stdout, stderr=log_file)
acc_proc.communicate()
def calc_lda_mllt(directory, data_directory, sil_phones, num_jobs, config,
initial=False, iteration=None):
"""
Multiprocessing function that calculates LDA+MLLT transformations
See:
- http://kaldi-asr.org/doc/ali-to-post_8cc.html
- http://kaldi-asr.org/doc/weight-silence-post_8cc.html
- http://kaldi-asr.org/doc/gmm-acc-mllt_8cc.html
- http://kaldi-asr.org/doc/est-mllt_8cc.html
- http://kaldi-asr.org/doc/gmm-transform-means_8cc.html
- http://kaldi-asr.org/doc/compose-transforms_8cc.html
for more details
on the Kaldi binary this runs.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/train_lda_mllt.sh
for the original bash script that this function was based on.
Parameters
----------
directory : str
Directory of LDA+MLLT training
data_directory : str
Directory of training data split into the number of jobs
sil_phones : str
Colon-separated list of silence phones
num_jobs : int
The number of processes to use in calculation
config : :class:`~aligner.config.LdaMlltConfig`
Configuration object for training
initial : bool
Flag for first iteration
iteration : int
Current iteration
"""
if iteration is None:
model_name = 'final'
else:
model_name = iteration
jobs = [(directory,
config.feature_config.construct_feature_proc_string(data_directory, directory, x),
sil_phones, x, config.lda_options, initial, model_name) for x in range(num_jobs)]
if config.use_mp:
run_mp(calc_lda_mllt_func, jobs, config.log_directory)
else:
run_non_mp(calc_lda_mllt_func, jobs, config.log_directory)
mdl_path = os.path.join(directory, '{}.mdl'.format(model_name))
log_path = os.path.join(directory, 'log', 'transform_means.{}.log'.format(model_name))
previous_mat_path = os.path.join(directory, 'lda.mat')
new_mat_path = os.path.join(directory, 'lda_new.mat')
composed_path = os.path.join(directory, 'lda_composed.mat')
with open(log_path, 'a', encoding='utf8') as log_file:
macc_list = []
for x in range(num_jobs):
macc_list.append(os.path.join(directory, '{}.{}.macc'.format(model_name, x)))
subprocess.call([thirdparty_binary('est-mllt'),
new_mat_path]
+ macc_list,
stderr=log_file)
subprocess.call([thirdparty_binary('gmm-transform-means'),
new_mat_path,
mdl_path, mdl_path],
stderr=log_file)
if os.path.exists(previous_mat_path):
subprocess.call([thirdparty_binary('compose-transforms'),
new_mat_path,
previous_mat_path,
composed_path],
stderr=log_file)
os.remove(previous_mat_path)
os.rename(composed_path, previous_mat_path)
else:
os.rename(new_mat_path, previous_mat_path)
|
example/cifar10/resnet56_jpeg.py | KuanKuanQAQ/ares | 206 | 11071167 | from ares.utils import get_res_path
from ares.defense.jpeg_compression import jpeg_compression
import resnet56
MODEL_PATH = get_res_path('./cifar10/resnet56.ckpt')
def load(session):
model = ResNet56_JPEG()
model.load(MODEL_PATH, session)
return model
@jpeg_compression()
class ResNet56_JPEG(resnet56.ResNet56):
pass
if __name__ == '__main__':
resnet56.download(MODEL_PATH) |
package_control/file_not_found_error.py | William-Cao/Less- | 3,373 | 11071169 | import sys
class FileNotFoundError(Exception):
"""If a file is not found"""
def __unicode__(self):
return self.args[0]
def __str__(self):
if sys.version_info < (3,):
return self.__bytes__()
return self.__unicode__()
def __bytes__(self):
return self.__unicode__().encode('utf-8')
|
tests/test_dataset/test_test_time_aug.py | yuexy/mmocr | 2,261 | 11071172 | <filename>tests/test_dataset/test_test_time_aug.py
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmocr.datasets.pipelines.test_time_aug import MultiRotateAugOCR
def test_resize_ocr():
input_img1 = np.ones((64, 256, 3), dtype=np.uint8)
input_img2 = np.ones((64, 32, 3), dtype=np.uint8)
rci = MultiRotateAugOCR(transforms=[], rotate_degrees=[0, 90, 270])
# test invalid arguments
with pytest.raises(AssertionError):
MultiRotateAugOCR(transforms=[], rotate_degrees=[45])
with pytest.raises(AssertionError):
MultiRotateAugOCR(transforms=[], rotate_degrees=[20.5])
# test call with input_img1
results = {'img_shape': input_img1.shape, 'img': input_img1}
results = rci(results)
assert np.allclose([64, 256, 3], results['img_shape'])
assert len(results['img']) == 1
assert len(results['img_shape']) == 1
assert np.allclose([64, 256, 3], results['img_shape'][0])
# test call with input_img2
results = {'img_shape': input_img2.shape, 'img': input_img2}
results = rci(results)
assert np.allclose([64, 32, 3], results['img_shape'])
assert len(results['img']) == 3
assert len(results['img_shape']) == 3
assert np.allclose([64, 32, 3], results['img_shape'][0])
|
typed_python/compiler/merge_type_wrappers_test.py | APrioriInvestments/typed_python | 105 | 11071173 | from typed_python.compiler.merge_type_wrappers import mergeTypes
from typed_python import OneOf, Value, Class
class Base(Class):
pass
class Child(Base):
pass
def test_merge_types():
assert mergeTypes([float, int]) == OneOf(float, int)
assert mergeTypes([float, Value(1)]) == OneOf(1, float)
assert mergeTypes([float, Value(1.5)]) == float
assert mergeTypes([OneOf(1, float), int]) == OneOf(float, int)
assert mergeTypes([OneOf(float, Child), OneOf(int, Base)]) == OneOf(Base, float, int)
assert mergeTypes([object, str]) == object
assert mergeTypes([OneOf(str, None), object]) == object
|
SoftLayer/CLI/dedicatedhost/cancel_guests.py | dvzrv/softlayer-python | 126 | 11071203 | <gh_stars>100-1000
"""Cancel a dedicated host."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Cancel all virtual guests of the dedicated host immediately.
Use the 'slcli vs cancel' command to cancel an specific guest
"""
dh_mgr = SoftLayer.DedicatedHostManager(env.client)
host_id = helpers.resolve_id(dh_mgr.resolve_ids, identifier, 'dedicated host')
if not (env.skip_confirmations or formatting.no_going_back(host_id)):
raise exceptions.CLIAbort('Aborted')
table = formatting.Table(['id', 'server name', 'status'])
result = dh_mgr.cancel_guests(host_id)
if result:
for status in result:
table.add_row([
status['id'],
status['fqdn'],
status['status']
])
env.fout(table)
else:
click.secho('There is not any guest into the dedicated host %s' % host_id, fg='red')
|
dizoo/mujoco/envs/mujoco_env.py | LuciusMos/DI-engine | 464 | 11071223 | from typing import Any, Union, List
import copy
import numpy as np
from easydict import EasyDict
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo, update_shape
from ding.envs.common.env_element import EnvElement, EnvElementInfo
from ding.envs.common.common_function import affine_transform
from ding.torch_utils import to_ndarray, to_list
from ding.utils import ENV_REGISTRY
from .mujoco_wrappers import wrap_mujoco
MUJOCO_INFO_DICT = {
'Ant-v3': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(111, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(8, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'Hopper-v2': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(11, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(3, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'Walker2d-v2': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(17, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(6, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'HalfCheetah-v3': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(17, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(6, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'Hopper-v3': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(11, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(3, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'InvertedPendulum-v2': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(4, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(1, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'InvertedDoublePendulum-v2': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(11, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(1, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'Reacher-v2': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(11, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(2, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
'Walker2d-v3': BaseEnvInfo(
agent_num=1,
obs_space=EnvElementInfo(
shape=(17, ),
value={
'min': np.float64("-inf"),
'max': np.float64("inf"),
'dtype': np.float32
},
),
act_space=EnvElementInfo(
shape=(6, ),
value={
'min': -1.0,
'max': 1.0,
'dtype': np.float32
},
),
rew_space=EnvElementInfo(
shape=1,
value={
'min': np.float64("-inf"),
'max': np.float64("inf")
},
),
use_wrappers=None,
),
}
@ENV_REGISTRY.register('mujoco')
class MujocoEnv(BaseEnv):
@classmethod
def default_config(cls: type) -> EasyDict:
cfg = EasyDict(copy.deepcopy(cls.config))
cfg.cfg_type = cls.__name__ + 'Dict'
return cfg
config = dict(
use_act_scale=False,
delay_reward_step=0,
)
def __init__(self, cfg: dict) -> None:
self._cfg = cfg
self._use_act_scale = cfg.use_act_scale
if 'delay_reward_step' in cfg:
self._delay_reward_step = cfg.delay_reward_step
else:
self._delay_reward_step = self.default_config().delay_reward_step
self._init_flag = False
def reset(self) -> np.ndarray:
if not self._init_flag:
self._env = self._make_env(only_info=False)
self._init_flag = True
if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:
np_seed = 100 * np.random.randint(1, 1000)
self._env.seed(self._seed + np_seed)
elif hasattr(self, '_seed'):
self._env.seed(self._seed)
obs = self._env.reset()
obs = to_ndarray(obs).astype('float32')
self._final_eval_reward = 0.
if self._delay_reward_step > 1:
self._delay_reward_duration = 0
self._current_delay_reward = 0.
return obs
def close(self) -> None:
if self._init_flag:
self._env.close()
self._init_flag = False
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def step(self, action: Union[np.ndarray, list]) -> BaseEnvTimestep:
action = to_ndarray(action)
if self._use_act_scale:
action_range = self.info().act_space.value
action = affine_transform(action, min_val=action_range['min'], max_val=action_range['max'])
obs, rew, done, info = self._env.step(action)
self._final_eval_reward += rew
obs = to_ndarray(obs).astype('float32')
if self._delay_reward_step > 1:
self._current_delay_reward += rew
self._delay_reward_duration += 1
if done or self._delay_reward_duration >= self._delay_reward_step:
rew = to_ndarray([self._current_delay_reward])
self._current_delay_reward = 0.
self._delay_reward_duration = 0
else:
rew = to_ndarray([0.])
else:
rew = to_ndarray([rew]) # wrapped to be transfered to a array with shape (1,)
if done:
info['final_eval_reward'] = self._final_eval_reward
return BaseEnvTimestep(obs, rew, done, info)
def info(self) -> BaseEnvInfo:
if self._cfg.env_id in MUJOCO_INFO_DICT:
info = copy.deepcopy(MUJOCO_INFO_DICT[self._cfg.env_id])
info.use_wrappers = self._make_env(only_info=True)
obs_shape, act_shape, rew_shape = update_shape(
info.obs_space.shape, info.act_space.shape, info.rew_space.shape, info.use_wrappers.split('\n')
)
info.obs_space.shape = obs_shape
info.act_space.shape = act_shape
info.rew_space.shape = rew_shape
return info
else:
keys = MUJOCO_INFO_DICT.keys()
raise NotImplementedError('{} not found in MUJOCO_INFO_DICT [{}]'.format(self._cfg.env_id, keys))
def _make_env(self, only_info=False):
return wrap_mujoco(
self._cfg.env_id,
norm_obs=self._cfg.get('norm_obs', None),
norm_reward=self._cfg.get('norm_reward', None),
only_info=only_info
)
def __repr__(self) -> str:
return "DI-engine Mujoco Env({})".format(self._cfg.env_id)
@staticmethod
def create_collector_env_cfg(cfg: dict) -> List[dict]:
collector_cfg = copy.deepcopy(cfg)
collector_env_num = collector_cfg.pop('collector_env_num', 1)
return [collector_cfg for _ in range(collector_env_num)]
@staticmethod
def create_evaluator_env_cfg(cfg: dict) -> List[dict]:
evaluator_cfg = copy.deepcopy(cfg)
evaluator_env_num = evaluator_cfg.pop('evaluator_env_num', 1)
evaluator_cfg.norm_reward.use_norm = False
return [evaluator_cfg for _ in range(evaluator_env_num)]
|
python-threatexchange/threatexchange/dataset.py | b-bold/ThreatExchange | 997 | 11071224 | <filename>python-threatexchange/threatexchange/dataset.py
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A wrapper around loading and storing ThreatExchange data from files.
There are a few categories of state that this wraps:
1. Checkpoints - state about previous fetches
2. Collaboration Indicator Dumps - Raw output from threat_updates
3. Index state - serializations of indexes for SignalType
"""
import json
import pathlib
import typing as t
from . import collab_config
from .content_type import meta
from .signal_type import signal_base
from .signal_type import index
class FetchCheckpoint(t.NamedTuple):
last_full_fetch: float
last_fetch: float
def next(self, fetch_start_time: float, full_fetch: bool) -> "FetchCheckpoint":
full_fetch = full_fetch or not self.last_full_fetch
return FetchCheckpoint(
fetch_start_time if full_fetch else self.last_full_fetch, fetch_start_time
)
def serialize(self) -> str:
return f"{self.last_full_fetch} {self.last_fetch}"
@classmethod
def deserialize(cls, s: str) -> "FetchCheckpoint":
last_full, _, last = s.partition(" ")
return cls(float(last_full), float(last))
class Dataset:
EXTENSION = ".te"
def __init__(
self,
config: collab_config.CollaborationConfig,
state_dir: t.Optional[pathlib.Path] = None,
) -> None:
self.config = config
if state_dir is None:
state_dir = pathlib.Path.home() / config.default_state_dir_name
assert not state_dir.is_file()
self.state_dir = state_dir
@property
def is_cache_empty(self) -> bool:
return not (
self.state_dir.exists() and any(self.state_dir.glob(f"*{self.EXTENSION}"))
)
def _fetch_checkpoint_path(self) -> pathlib.Path:
return self.state_dir / f"fetch_checkpoint{self.EXTENSION}"
def _indicator_checkpoint_path(self, privacy_group: int) -> pathlib.Path:
return (
self.state_dir / f"indicators/{privacy_group}/_checkpoint{self.EXTENSION}"
)
def clear_cache(self) -> None:
for p in self.state_dir.iterdir():
if p.suffix == self.EXTENSION:
p.unlink()
def record_fetch_checkpoint(
self, fetch_started_timestamp: float, full_fetch: bool
) -> None:
prev = self.get_fetch_checkpoint()
with self._fetch_checkpoint_path().open("w+") as f:
f.write(prev.next(fetch_started_timestamp, full_fetch).serialize())
def get_fetch_checkpoint(self) -> FetchCheckpoint:
checkpoint = self._fetch_checkpoint_path()
if not checkpoint.exists():
return FetchCheckpoint(0, 0)
return FetchCheckpoint.deserialize(checkpoint.read_text())
def _signal_state_file(self, signal_type: signal_base.SignalType) -> pathlib.Path:
return self.state_dir / f"{signal_type.get_name()}{self.EXTENSION}"
def _index_file(self, signal_type: signal_base.SignalType) -> pathlib.Path:
return self.state_dir / f"{signal_type.get_name()}.index{self.EXTENSION}"
def store_cache(self, signal_type: signal_base.SignalType) -> None:
if not self.state_dir.exists():
self.state_dir.mkdir()
signal_type.store(self._signal_state_file(signal_type))
def load_cache(
self, signal_types: t.Optional[t.Iterable[signal_base.SignalType]] = None
) -> t.List[signal_base.SignalType]:
"""Load everything in the state directory and initialize signal types"""
if signal_types is None:
signal_types = [s() for s in meta.get_all_signal_types()]
ret = []
for signal_type in signal_types:
signal_state_file = self._signal_state_file(signal_type)
if signal_state_file.exists():
signal_type.load(signal_state_file)
ret.append(signal_type)
return ret
def store_index(self, signal_type: signal_base.SignalType, index) -> None:
if not self.state_dir.exists():
self.state_dir.mkdir()
path = self._index_file(signal_type)
if index is None:
if path.exists():
path.unlink()
return
with path.open("wb") as fout:
index.serialize(fout)
def load_index(
self, signal_type: signal_base.SignalType
) -> t.Optional[index.SignalTypeIndex]:
path = self._index_file(signal_type)
if not path.exists():
return None
with path.open("rb") as fin:
return signal_type.get_index_cls.deserialize(fin)
|
tests/extmod/ucryptolib_aes128_ctr.py | sebastien-riou/micropython | 13,648 | 11071230 | try:
from ucryptolib import aes
except ImportError:
print("SKIP")
raise SystemExit
def _new(k, ctr_initial):
return aes(k, 6, ctr_initial)
try:
_new(b"x" * 16, b"x" * 16)
except ValueError as e:
# is CTR support disabled?
if e.args[0] == "mode":
print("SKIP")
raise SystemExit
raise e
crypto = _new(b"1234" * 4, b"5678" * 4)
enc = crypto.encrypt(b"a")
print(enc)
enc += crypto.encrypt(b"b" * 1000)
print(enc)
crypto = _new(b"1234" * 4, b"5678" * 4)
print(crypto.decrypt(enc))
|
openproblems/tasks/denoising/api.py | bendemeo/SingleCellOpenProblems | 134 | 11071250 | from ...data.sample import load_sample_data
import numpy as np
def check_dataset(adata):
"""Check that dataset output fits expected API."""
assert "train" in adata.obsm
assert "test" in adata.obsm
assert adata.obsm["train"].shape == adata.X.shape
assert adata.obsm["test"].shape == adata.X.shape
return True
def check_method(adata):
"""Check that method output fits expected API."""
assert "denoised" in adata.obsm
assert adata.obsm["denoised"].shape == adata.X.shape
return True
def sample_dataset():
"""Create a simple dataset to use for testing methods in this task."""
adata = load_sample_data()
adata.obsm["train"] = adata.X.toarray()
adata.obsm["train"] = np.random.binomial(
n=adata.obsm["train"].astype(int), p=0.8, size=adata.obsm["train"].shape
).astype(float)
adata.obsm["test"] = adata.X.toarray()
adata.obsm["test"].data = np.random.binomial(
n=adata.obsm["test"].astype(int), p=0.2, size=adata.obsm["test"].shape
).astype(float)
return adata
def sample_method(adata):
"""Create sample method output for testing metrics in this task."""
adata.obsm["denoised"] = adata.X.toarray() * 0.2
return adata
|
test/test_correlog.py | butala/spectrum | 261 | 11071254 | from spectrum import CORRELOGRAMPSD, CORRELATION, pcorrelogram, marple_data
from spectrum import data_two_freqs
from pylab import log10, plot, savefig, linspace
from numpy.testing import assert_array_almost_equal, assert_almost_equal
def test_correlog():
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
assert_almost_equal(psd[0], 0.138216970)
assert_almost_equal(psd[1000-1], 7.900110787)
assert_almost_equal(psd[2000-1], 0.110103858)
assert_almost_equal(psd[3000-1], 0.222184134)
assert_almost_equal(psd[4000-1], -0.036255277)
assert_almost_equal(psd[4096-1], 0.1391839711)
return psd
def test_correlog_auto_cross():
"""Same as test_correlog but x and y provided"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16)
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16)
assert_array_almost_equal(psd1, psd2)
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='CORRELATION')
assert_array_almost_equal(psd1, psd2)
def test_correlog_correlation_method():
"""test correlogramPSD playing with method argument"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='xcorr')
assert_array_almost_equal(psd1, psd2)
def test_pcorrelogram_class():
p = pcorrelogram(marple_data, lag=16)
p()
print(p)
p = pcorrelogram(data_two_freqs(), lag=16)
p.plot()
print(p)
def test_CORRELOGRAMPSD_others():
p = CORRELOGRAMPSD(marple_data, marple_data, lag=16, NFFT=None)
def create_figure():
psd = test_correlog()
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
savefig('psd_corr.png')
if __name__ == "__main__":
create_figure()
|
tests/components/p1_monitor/__init__.py | MrDelik/core | 30,023 | 11071256 | """Tests for the P1 Monitor integration."""
|
coin_monitor/system_info.py | TST-Group-BE/chia_plot_manager | 365 | 11071259 | <gh_stars>100-1000
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
system_info.py for use with coin_monitor.py
"""
VERSION = "V0.2 (2021-03-23)"
## Set Notification Accounts#
alert_email = ['<EMAIL>', '<EMAIL>']
new_coin_email = ['<EMAIL>', '<EMAIL>']
twilio_from = '+10000000'
twilio_account = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
twilio_token = '<PASSWORD>'
twilio_to = ['+17763540098', '+17763540086']
pushbilletAPI = "<KEY>" # pushbullet API token (http://www.pushbullet.com)
def main():
print("This script is not intended to be run directly.")
print("This is the systemwide Credentials & Settings module.")
print("It is called by other modules.")
exit()
if __name__ == '__main__':
main()
|
language/mentionmemory/tasks/memory_generation_task_test.py | urikz/language | 1,199 | 11071264 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EaE task."""
import copy
import itertools
import os
from absl.testing import absltest
from absl.testing import parameterized
import jax
from language.mentionmemory.encoders import eae_encoder # pylint: disable=unused-import
from language.mentionmemory.tasks import memory_generation_task
from language.mentionmemory.utils import data_utils
from language.mentionmemory.utils import test_utils
import ml_collections
import numpy as np
# easiest to define as constant here
MENTION_SIZE = 2
def gen_eae_test_list():
text_lengths = [0, 50, 128]
n_mention_list = [0, 5, 10, 15]
n_linked_mention_list = [0, 3, 5, 8, 10, 12, 15]
# pylint: disable=g-complex-comprehension
test_list = [
(text_length, n_mentions, n_linked_mentions)
for (
text_length,
n_mentions,
n_linked_mentions,
) in itertools.product(text_lengths, n_mention_list,
n_linked_mention_list)
if not (n_mentions *
MENTION_SIZE >= text_length or n_linked_mentions > n_mentions)
]
return test_list
class MemoryGenerationTaskTest(parameterized.TestCase):
"""Tests for MemoryGeneration task."""
encoder_config = {
'dtype': 'float32',
'vocab_size': 1000,
'entity_vocab_size': 1000,
'max_positions': 512,
'max_length': 128,
'hidden_size': 64,
'intermediate_dim': 128,
'entity_dim': 3,
'num_attention_heads': 2,
'num_initial_layers': 1,
'num_final_layers': 1,
'dropout_rate': 0.1,
}
model_config = {
'encoder_config': encoder_config,
'encoder_name': 'eae',
'dtype': 'float32',
}
config = {
'model_config': model_config,
'task_name': 'memory_generation',
'seed': 0,
'per_device_batch_size': 2,
'samples_per_example': 1,
'memory_dim': 3,
'mask_rate': 0,
'mention_mask_rate': 0,
'max_mlm_targets': 0,
'max_mention_targets': 10,
'max_mentions': 20,
'max_mentions_per_sample': 11,
'min_distance_from_passage_boundary': 2,
}
@parameterized.parameters(gen_eae_test_list())
def test_prediction_fn(self, text_length, n_mentions, n_linked_mentions):
"""Test loss function runs and produces expected values."""
config = copy.deepcopy(self.config)
config = ml_collections.FrozenConfigDict(config)
max_length = config.model_config.encoder_config.max_length
preprocess_fn = memory_generation_task.MemoryGenerationTask.make_preprocess_fn(
config)
collater_fn = memory_generation_task.MemoryGenerationTask.make_collater_fn(
config)
model = memory_generation_task.MemoryGenerationTask.build_model(
config.model_config)
dummy_input = memory_generation_task.MemoryGenerationTask.dummy_input(
config)
init_rng = jax.random.PRNGKey(0)
init_parameters = model.init(init_rng, dummy_input, True)
raw_example = test_utils.gen_mention_pretraining_sample(
text_length, n_mentions, n_linked_mentions, max_length=max_length)
processed_example = preprocess_fn(raw_example)
batch = {
key: np.tile(value, (config.per_device_batch_size, 1))
for key, value in processed_example.items()
}
batch = collater_fn(batch)
batch = jax.tree_map(np.asarray, batch)
predict_fn = memory_generation_task.MemoryGenerationTask.make_prediction_fn(
config)
predictions = predict_fn(
model_config=config.model_config,
model_params=init_parameters['params'],
model_vars={},
batch=batch,
)
self.assertSequenceEqual(predictions['values'].shape,
(config.max_mention_targets *
config.per_device_batch_size, config.memory_dim))
def gen_memory_saver_test_list():
num_total_memories_list = [1, 20, 50]
batch_size_list = [1, 5, 10]
n_mentions_per_batch_list = [1, 10]
num_shards_list = [1, 2, 3]
n_devices_list = [1, 2, 3]
shard_size_divisible_list = [1, 3, 5]
test_list = list(
itertools.product(num_total_memories_list, batch_size_list,
n_mentions_per_batch_list, n_devices_list,
num_shards_list, shard_size_divisible_list))
return test_list
class MemorySaverTest(parameterized.TestCase):
"""Tests for MemoryGeneration task."""
memory_dim = 3
memory_key_dim = 2
text_length = 2
max_mentions_per_sample = 3
def _stack(self, d):
self.assertNotEmpty(d)
keys = d[0].keys()
result = {}
for key in keys:
result[key] = np.stack([x[key] for x in d])
return result
def _sample_batch(self, batch_index, batch_size, n_mentions_per_batch):
mention_target_weights = np.random.randint(2, size=(n_mentions_per_batch,))
# Unique mention ID per sample
mention_target_ids = 1 + np.arange(
n_mentions_per_batch) + batch_index * n_mentions_per_batch
mention_batch_positions = np.random.randint(
batch_size, size=(n_mentions_per_batch,))
text_identifiers = 1 + mention_batch_positions + batch_index * batch_size
# mention hashes and encodings are same as entity IDs
mention_encodings = np.expand_dims(mention_target_ids, 1)
mention_encodings = np.tile(mention_encodings, self.memory_dim)
text_ids = 1 + np.arange(batch_size) + batch_index * batch_size
text_ids = np.expand_dims(text_ids, 1)
text_ids = np.tile(text_ids, self.text_length)
# Collect unique entity IDs per every passage
text_entities = [set() for _ in range(batch_size)]
for m_index in range(n_mentions_per_batch):
if mention_target_weights[m_index] > 0:
text_entities[mention_batch_positions[m_index]].add(
mention_target_ids[m_index])
unique_mention_ids = np.zeros((batch_size, self.max_mentions_per_sample),
dtype=np.int32)
# pylint:disable=g-explicit-length-test
for i in range(batch_size):
text_entities[i] = np.array(list(text_entities[i]), dtype=np.int32)
num_unique_entities = len(text_entities[i])
if num_unique_entities > self.max_mentions_per_sample:
unique_mention_ids[i] = text_entities[i][:self.max_mentions_per_sample]
elif num_unique_entities > 0:
unique_mention_ids[i, :num_unique_entities] = text_entities[i]
else:
# i-th sample doesn't contain any entities
pass
batch = {
'mention_target_weights': mention_target_weights,
'mention_target_ids': mention_target_ids,
'target_text_identifiers': text_identifiers,
'target_mention_hashes': mention_target_ids,
'text_ids': text_ids,
'mention_target_batch_positions': mention_batch_positions,
'mention_target_start_positions': mention_target_ids,
'mention_target_end_positions': mention_target_ids,
'unique_mention_ids': unique_mention_ids,
}
predictions = {
'values': mention_encodings,
'keys': mention_encodings[:, :self.memory_key_dim],
}
return batch, predictions
@parameterized.parameters(gen_memory_saver_test_list())
def test_memory_saver(self, num_total_memories, batch_size,
n_mentions_per_batch, n_devices, num_shards,
shard_size_divisible):
memory_saver = memory_generation_task.MemorySaver(
num_total_memories, self.memory_dim, self.text_length,
self.max_mentions_per_sample, self.memory_key_dim)
mention_to_batch = []
batch_to_set_of_mentions = {}
batch_index = 0
while True:
all_batch = []
all_predictions = []
for _ in range(n_devices):
batch, predictions = self._sample_batch(batch_index, batch_size,
n_mentions_per_batch)
for i in range(n_mentions_per_batch):
if batch['mention_target_weights'][i] == 1:
current_batch_index = batch['target_text_identifiers'][i]
entity_id = batch['mention_target_ids'][i]
mention_to_batch.append((entity_id, current_batch_index))
if current_batch_index not in batch_to_set_of_mentions:
batch_to_set_of_mentions[current_batch_index] = set()
batch_to_set_of_mentions[current_batch_index].add(entity_id)
batch_index += 1
all_batch.append(batch)
all_predictions.append(predictions)
all_batch = self._stack(all_batch)
all_predictions = self._stack(all_predictions)
memory_saver.add_memories(all_batch, all_predictions)
if memory_saver.get_num_memories() >= num_total_memories:
break
# Keep only first num_total_memories memories
mention_to_batch = dict(mention_to_batch[:num_total_memories])
tmp_dir = self.create_tempdir()
memory_saver.save(tmp_dir.full_path, num_shards, 1, 0, shard_size_divisible)
def load_array(suffix):
return data_utils.load_sharded_array(
os.path.join(tmp_dir.full_path,
suffix + '-?????-of-%05d' % num_shards), 1, 0)
mention_encodings = load_array('encodings')
mention_target_ids = load_array('labels')
text_identifiers = load_array('hashes')
mention_hashes = load_array('mention_hashes')
texts = load_array('texts')
positions = load_array('positions')
text_entities = load_array('text_entities')
self.assertSetEqual(
set(mention_to_batch.keys()),
set(mention_target_ids[mention_target_ids > 0]))
for i in range(len(mention_target_ids)):
if mention_target_ids[i] > 0:
batch_index = mention_to_batch[mention_target_ids[i]]
self.assertEqual(text_identifiers[i], batch_index)
self.assertTrue(np.all(texts[i] == batch_index))
self.assertEqual(mention_hashes[i], mention_target_ids[i])
self.assertTrue(np.all(mention_encodings[i] == mention_target_ids[i]))
self.assertTrue(np.all(positions[i] == mention_target_ids[i]))
current_text_entities = [x for x in text_entities[i] if x != 0]
self.assertSequenceEqual(
sorted(current_text_entities),
sorted(list(set(current_text_entities))))
current_text_entities = set(current_text_entities)
# These two sets might not be exactly equal, since `text_entities`
# contains at most `max_mentions_per_sample` unique entities for every
# mention.
self.assertContainsSubset(current_text_entities,
batch_to_set_of_mentions[batch_index])
if __name__ == '__main__':
absltest.main()
|
tests/core/asyncio/test_endpoint_local_broadcast.py | gsalgado/lahja | 400 | 11071266 | import asyncio
import pytest
from lahja import BaseEvent, BaseRequestResponseEvent
@pytest.mark.asyncio
async def test_local_broadcast_is_connected_to_self(endpoint):
assert endpoint.is_connected_to(endpoint.name)
@pytest.mark.asyncio
async def test_local_broadcast_wait_until_connected_to(endpoint):
await asyncio.wait_for(endpoint.wait_until_connected_to(endpoint.name), timeout=0.1)
@pytest.mark.asyncio
async def test_local_broadcast_result_in_being_present_in_remotes(endpoint):
names = {name for name, _ in endpoint.get_connected_endpoints_and_subscriptions()}
assert endpoint.name in names
class BroadcastEvent(BaseEvent):
pass
@pytest.mark.asyncio
async def test_local_broadcast_wait_until_endpoint_subscriptions_change(endpoint):
ready = asyncio.Event()
done = asyncio.Event()
async def do_wait():
ready.set()
await endpoint.wait_until_endpoint_subscriptions_change()
done.set()
asyncio.ensure_future(do_wait())
await ready.wait()
assert not done.is_set()
endpoint.subscribe(BroadcastEvent, lambda ev: None)
await asyncio.wait_for(done.wait(), timeout=0.1)
@pytest.mark.asyncio
async def test_subscribe_and_broadcast_to_self(endpoint):
got_event = asyncio.Event()
endpoint.subscribe(BroadcastEvent, lambda ev: got_event.set())
assert not got_event.is_set()
await endpoint.broadcast(BroadcastEvent())
await asyncio.wait_for(got_event.wait(), timeout=0.1)
assert got_event.is_set()
@pytest.mark.asyncio
async def test_wait_for_and_broadcast_to_self(endpoint):
ready = asyncio.Event()
got_event = asyncio.Event()
async def do_wait_for():
ready.set()
await endpoint.wait_for(BroadcastEvent)
got_event.set()
asyncio.ensure_future(do_wait_for())
await ready.wait()
assert not got_event.is_set()
await endpoint.broadcast(BroadcastEvent())
await asyncio.wait_for(got_event.wait(), timeout=0.1)
assert got_event.is_set()
@pytest.mark.asyncio
async def test_stream_and_broadcast_to_self(endpoint):
ready = asyncio.Event()
finished_stream = asyncio.Event()
async def do_stream():
ready.set()
async for ev in endpoint.stream(BroadcastEvent, num_events=3):
pass
finished_stream.set()
asyncio.ensure_future(do_stream())
await ready.wait()
assert not finished_stream.is_set()
await endpoint.broadcast(BroadcastEvent())
await endpoint.broadcast(BroadcastEvent())
await endpoint.broadcast(BroadcastEvent())
await asyncio.wait_for(finished_stream.wait(), timeout=0.1)
assert finished_stream.is_set()
class Response(BaseEvent):
def __init__(self, value):
self.value = value
class Request(BaseRequestResponseEvent[Response]):
@staticmethod
def expected_response_type():
return Response
def __init__(self, value):
self.value = value
@pytest.mark.asyncio
async def test_request_response_and_broadcast_to_self(endpoint):
ready = asyncio.Event()
async def do_response():
ready.set()
req = await endpoint.wait_for(Request)
await endpoint.broadcast(Response(req.value), req.broadcast_config())
asyncio.ensure_future(do_response())
await ready.wait()
resp = await asyncio.wait_for(endpoint.request(Request("test")), timeout=0.1)
assert isinstance(resp, Response)
assert resp.value == "test"
|
lte/gateway/python/magma/enodebd/main.py | Aitend/magma | 849 | 11071268 | <reponame>Aitend/magma
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from threading import Thread
from typing import List
from unittest import mock
from lte.protos.mconfig import mconfigs_pb2
from magma.common.sentry import sentry_init
from magma.common.service import MagmaService
from magma.enodebd.enodeb_status import (
get_operational_states,
get_service_status_old,
)
from magma.enodebd.logger import EnodebdLogger as logger
from magma.enodebd.state_machines.enb_acs_manager import StateMachineManager
from orc8r.protos.service303_pb2 import State
from .enodebd_iptables_rules import set_enodebd_iptables_rule
from .rpc_servicer import EnodebdRpcServicer
from .stats_manager import StatsManager
from .tr069.server import tr069_server
def get_context(ip: str):
with mock.patch('spyne.server.wsgi.WsgiApplication') as MockTransport:
MockTransport.req_env = {"REMOTE_ADDR": ip}
with mock.patch('spyne.server.wsgi.WsgiMethodContext') as MockContext:
MockContext.transport = MockTransport
return MockContext
def main():
"""
Top-level function for enodebd
"""
service = MagmaService('enodebd', mconfigs_pb2.EnodebD())
logger.init()
# Optionally pipe errors to Sentry
sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config)
# State machine manager for tracking multiple connected eNB devices.
state_machine_manager = StateMachineManager(service)
# Statistics manager
stats_mgr = StatsManager(state_machine_manager)
stats_mgr.run()
# Start TR-069 thread
server_thread = Thread(
target=tr069_server,
args=(state_machine_manager,),
daemon=True,
)
server_thread.start()
# Add all servicers to the server
enodebd_servicer = EnodebdRpcServicer(state_machine_manager)
enodebd_servicer.add_to_server(service.rpc_server)
# Register function to get service status
def get_enodebd_status():
return get_service_status_old(state_machine_manager)
service.register_get_status_callback(get_enodebd_status)
# Register a callback function for GetOperationalStates service303 function
def get_enodeb_operational_states() -> List[State]:
return get_operational_states(state_machine_manager, service.mconfig)
service.register_operational_states_callback(get_enodeb_operational_states)
# Set eNodeBD iptables rules due to exposing public IP to eNodeB
service.loop.create_task(set_enodebd_iptables_rule())
# Run the service loop
service.run()
# Cleanup the service
service.close()
def call_repeatedly(loop, interval, function, *args, **kwargs):
"""
Wrapper function to schedule function periodically
"""
# Schedule next call
loop.call_later(
interval, call_repeatedly, loop, interval, function,
*args, **kwargs,
)
# Call function
function(*args, **kwargs)
if __name__ == "__main__":
main()
|
src/pretix/api/views/__init__.py | fabm3n/pretix | 1,248 | 11071275 | <gh_stars>1000+
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from calendar import timegm
from django.db.models import Max
from django.http import HttpResponse
from django.utils.http import http_date, parse_http_date_safe
from rest_framework.filters import OrderingFilter
class RichOrderingFilter(OrderingFilter):
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
if hasattr(view, 'ordering_custom'):
newo = []
for ordering_part in ordering:
ob = view.ordering_custom.get(ordering_part)
if ob:
ob = dict(ob)
newo.append(ob.pop('_order'))
queryset = queryset.annotate(**ob)
else:
newo.append(ordering_part)
ordering = newo
return queryset.order_by(*ordering)
return queryset
class ConditionalListView:
def list(self, request, **kwargs):
if_modified_since = request.headers.get('If-Modified-Since')
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
if_unmodified_since = request.headers.get('If-Unmodified-Since')
if if_unmodified_since:
if_unmodified_since = parse_http_date_safe(if_unmodified_since)
if not hasattr(request, 'event'):
return super().list(request, **kwargs)
lmd = request.event.logentry_set.filter(
content_type__model=self.get_queryset().model._meta.model_name,
content_type__app_label=self.get_queryset().model._meta.app_label,
).aggregate(
m=Max('datetime')
)['m']
if lmd:
lmd_ts = timegm(lmd.utctimetuple())
if if_unmodified_since and lmd and lmd_ts > if_unmodified_since:
return HttpResponse(status=412)
if if_modified_since and lmd and lmd_ts <= if_modified_since:
return HttpResponse(status=304)
resp = super().list(request, **kwargs)
if lmd:
resp['Last-Modified'] = http_date(lmd_ts)
return resp
|
zeus/networks/pytorch/customs/nago.py | shaido987/vega | 240 | 11071282 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The NAGO model."""
import logging
import numpy as np
from zeus.modules.module import Module
from zeus.common import ClassType, ClassFactory
from .utils.logical_graph import GeneratorSolution, LogicalMasterGraph
from .utils.layer import MasterNetwork
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.NETWORK)
class NAGO(Module):
"""Search space of NAGO."""
def __init__(self, **kwargs):
"""Construct the Hierarchical Neural Architecture Generator class.
:param net_desc: config of the searched structure
"""
super(NAGO, self).__init__()
logger.info("start init NAGO")
# to prevent invalid graphs with G_nodes <= G_k
kwargs['G1_K'] = int(np.min([kwargs['G1_nodes'] - 1, kwargs['G1_K']]))
kwargs['G3_K'] = int(np.min([kwargs['G3_nodes'] - 1, kwargs['G3_K']]))
logger.info("NAGO desc: {}".format(kwargs))
top_graph_params = ['WS', kwargs['G1_nodes'], kwargs['G1_P'], kwargs['G1_K']]
mid_graph_params = ['ER', kwargs['G2_nodes'], kwargs['G2_P']]
bottom_graph_params = ['WS', kwargs['G3_nodes'], kwargs['G3_P'], kwargs['G3_K']]
channel_ratios = [kwargs['ch1_ratio'], kwargs['ch2_ratio'], kwargs['ch3_ratio']]
stage_ratios = [kwargs['stage1_ratio'], kwargs['stage2_ratio'], kwargs['stage3_ratio']]
conv_type = 'normal'
top_merge_dist = [1.0, 0.0, 0.0]
mid_merge_dist = [1.0, 0.0, 0.0]
bottom_merge_dist = [1.0, 0.0, 0.0]
op_dist = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
solution = GeneratorSolution(top_graph_params, mid_graph_params, bottom_graph_params,
stage_ratios, channel_ratios, op_dist, conv_type,
top_merge_dist, mid_merge_dist, bottom_merge_dist)
# Generate an architecture from the generator
model_frame = LogicalMasterGraph(solution)
# Compute the channel multipler factor based on the parameter count limit
n_params_base = model_frame._get_param_count()
multiplier = int(np.sqrt(float(kwargs['n_param_limit']) / n_params_base))
self.model = MasterNetwork(model_frame, multiplier, kwargs['image_size'],
kwargs['num_classes'], None, False)
def forward(self, x):
"""Calculate the output of the model.
:param x: input tensor
:return: output tensor of the model
"""
y, aux_logits = self.model(x)
return y
|
cmsplugin_cascade/generic/settings.py | teklager/djangocms-cascade | 139 | 11071295 | <reponame>teklager/djangocms-cascade<gh_stars>100-1000
CASCADE_PLUGINS = ['custom_snippet', 'heading', 'horizontal_rule', 'simple_wrapper', 'text_image']
def set_defaults(config):
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
config.setdefault('plugins_with_extra_fields', {})
plugins_with_extra_fields = config['plugins_with_extra_fields']
plugins_with_extra_fields.setdefault('HorizontalRulePlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Border': ['border-top'],
'extra_fields:Border Radius': ['border-radius'],
'extra_units:Border Radius': 'px,rem',
},
allow_override=False,
))
|
advisor_server/suggestion/migrations/0001_initial.py | silvery107/advisor | 1,498 | 11071329 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-02 08:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Algorithm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('status', models.CharField(max_length=128)),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Study',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('study_configuration', models.TextField()),
('algorithm', models.CharField(max_length=128)),
('status', models.CharField(max_length=128)),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Trial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('study_name', models.CharField(max_length=128)),
('name', models.CharField(max_length=128)),
('parameter_values', models.TextField(blank=True, null=True)),
('objective_value', models.FloatField(blank=True, null=True)),
('status', models.CharField(max_length=128)),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='TrialMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trial_id', models.IntegerField()),
('training_step', models.IntegerField(blank=True, null=True)),
('objective_value', models.FloatField(blank=True, null=True)),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
],
),
]
|
spotify_confidence/analysis/frequentist/z_test_linreg.py | danielsaaf/confidence | 107 | 11071337 | # Copyright 2017-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Iterable
from pandas import DataFrame
from spotify_confidence.analysis.constants import BONFERRONI, METHOD_COLUMN_NAME
from .experiment import Experiment
from ..abstract_base_classes.confidence_computer_abc import ConfidenceComputerABC
from ..abstract_base_classes.confidence_grapher_abc import ConfidenceGrapherABC
class ZTestLinreg(Experiment):
def __init__(
self,
data_frame: DataFrame,
numerator_column: str,
numerator_sum_squares_column: Union[str, None],
denominator_column: str,
feature_column: Union[str, None],
feature_sum_squares_column: Union[str, None],
feature_cross_sum_column: Union[str, None],
categorical_group_columns: Union[str, Iterable],
ordinal_group_column: Union[str, None] = None,
metric_column: Union[str, None] = None,
treatment_column: Union[str, None] = None,
interval_size: float = 0.95,
power: float = 0.8,
correction_method: str = BONFERRONI,
confidence_computer: ConfidenceComputerABC = None,
confidence_grapher: ConfidenceGrapherABC = None,
):
super().__init__(
data_frame=data_frame.assign(**{METHOD_COLUMN_NAME: "z-test-linreg"}),
numerator_column=numerator_column,
numerator_sum_squares_column=numerator_sum_squares_column,
denominator_column=denominator_column,
categorical_group_columns=categorical_group_columns,
ordinal_group_column=ordinal_group_column,
interval_size=interval_size,
correction_method=correction_method,
confidence_computer=confidence_computer,
confidence_grapher=confidence_grapher,
method_column=METHOD_COLUMN_NAME,
metric_column=metric_column,
treatment_column=treatment_column,
power=power,
feature_column=feature_column,
feature_sum_squares_column=feature_sum_squares_column,
feature_cross_sum_column=feature_cross_sum_column,
)
|
test/benchmark/fannkuch.py | CohenArthur/wren | 2,712 | 11071347 | # The Computer Language Benchmarks Game
# http://benchmarksgame.alioth.debian.org/
# contributed by <NAME>
# converted to Java by <NAME>
# converted to Python by <NAME>
# modified by <NAME>
def fannkuch(n):
maxFlipsCount = 0
permSign = True
checksum = 0
perm1 = list(range(n))
count = perm1[:]
rxrange = range(2, n - 1)
nm = n - 1
while 1:
k = perm1[0]
if k:
perm = perm1[:]
flipsCount = 1
kk = perm[k]
while kk:
perm[:k+1] = perm[k::-1]
flipsCount += 1
k = kk
kk = perm[kk]
if maxFlipsCount < flipsCount:
maxFlipsCount = flipsCount
checksum += flipsCount if permSign else -flipsCount
# Use incremental change to generate another permutation
if permSign:
perm1[0],perm1[1] = perm1[1],perm1[0]
permSign = False
else:
perm1[1],perm1[2] = perm1[2],perm1[1]
permSign = True
for r in rxrange:
if count[r]:
break
count[r] = r
perm0 = perm1[0]
perm1[:r+1] = perm1[1:r+2]
perm1[r+1] = perm0
else:
r = nm
if not count[r]:
print( checksum )
return maxFlipsCount
count[r] -= 1
n = 9
print(( "Pfannkuchen(%i) = %i" % (n, fannkuch(n)) )) |
tests/test_RI_CEPA.py | andyj10224/psi4numpy | 214 | 11071351 | <gh_stars>100-1000
from addons import *
from utils import *
import pytest
tdir = 'Coupled-Electron-Pair-Approximation'
def test_LCCD(workspace):
exe_py(workspace, tdir, 'LCCD')
def test_LCCSD(workspace):
exe_py(workspace, tdir, 'LCCSD')
def test_OLCCD(workspace):
exe_py(workspace, tdir, 'OLCCD')
def test_DFLCCD(workspace):
exe_py(workspace, tdir, 'DF-LCCD')
def test_DFLCCSD(workspace):
exe_py(workspace, tdir, 'DF-LCCSD')
|
samples/vsphere/appliances/list_service.py | JKraftman/vsphere-automation-sdk-python | 589 | 11071352 | <reponame>JKraftman/vsphere-automation-sdk-python
#!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2019. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '6.7+'
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import (sample_cli, sample_util)
from samples.vsphere.common.ssl_helper import get_unverified_session
"""
Description: Demonstrates services api workflow
1.List all services
"""
parser = sample_cli.build_arg_parser()
args = sample_util.process_cli_args(parser.parse_args())
session = get_unverified_session() if args.skipverification else None
client = create_vsphere_client(server=args.server,
username=args.username,
password=<PASSWORD>,
session=session)
service_list = client.appliance.Services.list()
print("Example: List Appliance Services:")
print("-------------------\n")
for key, values in service_list.items():
print("Service Name : {} ".format(key))
print("value : {}".format(values.description))
print("State: {} \n".format(values.state))
|
butterfree/transform/utils/window_spec.py | fossabot/butterfree | 208 | 11071356 | """Holds function for defining window in DataFrames."""
from typing import Any, List, Optional, Union
from pyspark import sql
from pyspark.sql import Column, WindowSpec, functions
from butterfree.constants.columns import TIMESTAMP_COLUMN
from butterfree.constants.window_definitions import ALLOWED_WINDOWS
class FrameBoundaries:
"""Utility functions for defining the frame boundaries.
Args:
mode: available modes to be used in time aggregations.
window_definition: time ranges to be used in the windows,
it can be second(s), minute(s), hour(s), day(s), week(s) and year(s),
"""
def __init__(self, mode: Optional[str], window_definition: str):
self.mode = mode
self.window_definition = window_definition
@property
def window_size(self) -> int:
"""Returns window size."""
if int(self.window_definition.split()[0]) <= 0:
raise KeyError(f"{self.window_definition} have negative element.")
return int(self.window_definition.split()[0])
@property
def window_unit(self) -> str:
"""Returns window unit."""
unit = self.window_definition.split()[1]
if unit not in ALLOWED_WINDOWS and self.mode != "row_windows":
raise ValueError("Not allowed")
return unit
def get(self, window: WindowSpec) -> Any:
"""Returns window with or without the frame boundaries."""
if self.mode is None:
return window
if self.mode == "row_windows":
span = self.window_size - 1
return window.rowsBetween(-span, 0)
if self.mode == "fixed_windows":
span = ALLOWED_WINDOWS[self.window_unit] * self.window_size
return window.rangeBetween(-span, 0)
class Window:
"""Utility functions for defining a window specification.
Args:
partition_by: he partitioning defined.
order_by: the ordering defined.
mode: available modes to be used in time aggregations.
window_definition: time ranges to be used in the windows, it can be second(s),
minute(s), hour(s), day(s), week(s) and year(s),
Use the static methods in :class:`Window` to create a :class:`WindowSpec`.
"""
DEFAULT_SLIDE_DURATION: str = "1 day"
def __init__(
self,
window_definition: str,
partition_by: Optional[Union[Column, str, List[str]]] = None,
order_by: Optional[Union[Column, str]] = None,
mode: str = None,
slide: str = None,
):
self.partition_by = partition_by
self.order_by = order_by or TIMESTAMP_COLUMN
self.frame_boundaries = FrameBoundaries(mode, window_definition)
self.slide = slide or self.DEFAULT_SLIDE_DURATION
def get_name(self) -> str:
"""Return window suffix name based on passed criteria."""
return "_".join(
[
"over",
f"{self.frame_boundaries.window_size}",
f"{self.frame_boundaries.window_unit}",
self.frame_boundaries.mode,
]
)
def get(self) -> Any:
"""Defines a common window to be used both in time and rows windows."""
if self.frame_boundaries.mode == "rolling_windows":
return functions.window(
TIMESTAMP_COLUMN,
self.frame_boundaries.window_definition,
slideDuration=self.slide,
)
elif self.order_by == TIMESTAMP_COLUMN:
w = sql.Window.partitionBy(self.partition_by).orderBy( # type: ignore
functions.col(TIMESTAMP_COLUMN).cast("long")
)
else:
w = sql.Window.partitionBy(self.partition_by).orderBy( # type: ignore
self.order_by
)
return self.frame_boundaries.get(w)
|
thrift/test/py/constants_test.py | laohubuzaijia/fbthrift | 2,112 | 11071383 | <filename>thrift/test/py/constants_test.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
class TestPythonConstants(unittest.TestCase):
def testStrings(self):
from .constants import constants
self.assertEquals(constants.apostrophe, "'")
self.assertEquals(constants.tripleApostrophe, "'''")
self.assertEquals(constants.quotationMark, '"')
self.assertEquals(constants.quote, 'this is a "quote"')
self.assertEquals(constants.backslash, "\\")
self.assertEquals(constants.escaped_a, "a")
def testDict(self):
from .constants import constants
self.assertEquals(constants.escapeChars['apostrophe'], "'")
self.assertEquals(constants.escapeChars['quotationMark'], '"')
self.assertEquals(constants.escapeChars['backslash'], "\\")
self.assertEquals(constants.escapeChars['escaped_a'], "a")
self.assertEquals(constants.char2ascii["'"], 39)
self.assertEquals(constants.char2ascii['"'], 34)
self.assertEquals(constants.char2ascii["\\"], 92)
self.assertEquals(constants.char2ascii["a"], 97)
def testStruct(self):
from .constants import constants
self.assertEquals(constants.str2struct["foo"].bar, {"baz": "qux"})
if __name__ == '__main__':
unittest.main()
|
pbctf2020/amazing_rop/exploit.py | nhtri2003gmail/ctf-write-ups | 101 | 11071415 | #!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./bof')
if args.REMOTE:
p = remote('maze.chal.perfect.blue', 1)
else:
p = process(binary.path)
'''
// This is what you need to do to get the first flag
// void print_flag() {
// asm volatile("mov $1, %%eax; mov $0x31337, %%edi; mov $0x1337, %%esi; int3" ::: "eax");
// }
'''
# 0x00001396: pop esi; pop edi; pop ebp; ret;
binary.symbols['pop_esi_edi_edp'] = 0x00001396
# ropper did not find this
'''
13ad: 58 pop eax
13ae: cc int3
13af: c3 ret
'''
binary.symbols['pop_eax_int3'] = 0x13ad
p.sendlineafter('Do you want color in the visualization? (Y/n) ', 'n')
for i in range(10):
_ = p.recvline().strip().decode().split(' ')
return_addr = int(''.join(_[2:6][::-1]),16)
log.info('return_addr: ' + hex(return_addr))
binary.address = return_addr - ((return_addr & 0xFFF) - (binary.sym.main & 0xFFF)) - binary.sym.main
log.info('binary.address: ' + hex(binary.address))
payload = b''
payload += (0x40 - 0x10) * b'A'
payload += b'flag'
payload += (0x40 - len(payload)) * b'A'
payload += p32(binary.sym.pop_esi_edi_edp)
payload += p32(0x1337)
payload += p32(0x31337)
payload += p32(0xdeadba5e)
payload += p32(binary.sym.pop_eax_int3)
payload += p32(1)
p.sendlineafter('Input some text: ', payload)
p.stream()
|
wsltools/utils/faker/providers/ssn/en_US/__init__.py | Symbo1/wsltools | 412 | 11071423 | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from .. import Provider as BaseProvider
class Provider(BaseProvider):
INVALID_SSN_TYPE = 'INVALID_SSN'
SSN_TYPE = 'SSN'
ITIN_TYPE = 'ITIN'
EIN_TYPE = 'EIN'
def itin(self):
"""Generate a random United States Individual Taxpayer Identification Number (ITIN).
An United States Individual Taxpayer Identification Number
(ITIN) is a tax processing number issued by the Internal
Revenue Service. It is a nine-digit number that always begins
with the number 9 and has a range of 70-88 in the fourth and
fifth digit. Effective April 12, 2011, the range was extended
to include 900-70-0000 through 999-88-9999, 900-90-0000
through 999-92-9999 and 900-94-0000 through 999-99-9999.
https://www.irs.gov/individuals/international-taxpayers/general-itin-information
"""
area = self.random_int(min=900, max=999)
serial = self.random_int(min=0, max=9999)
# The group number must be between 70 and 99 inclusively but not 89 or 93
group = self.random_element([x for x in range(70, 100) if x not in [89, 93]])
itin = "{:03d}-{:02d}-{:04d}".format(area, group, serial)
return itin
def ein(self):
"""Generate a random United States Employer Identification Number (EIN).
An United States An Employer Identification Number (EIN) is
also known as a Federal Tax Identification Number, and is
used to identify a business entity. EINs follow a format of a
two-digit prefix followed by a hyphen and a seven-digit sequence:
##-######
https://www.irs.gov/businesses/small-businesses-self-employed/employer-id-numbers
"""
# Only certain EIN Prefix values are assigned:
#
# https://www.irs.gov/businesses/small-businesses-self-employed/how-eins-are-assigned-and-valid-ein-prefixes
ein_prefix_choices = [
'01',
'02',
'03',
'04',
'05',
'06',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'20',
'21',
'22',
'23',
'24',
'25',
'26',
'27',
'30',
'31',
'32',
'33',
'34',
'35',
'36',
'37',
'38',
'39',
'40',
'41',
'42',
'43',
'44',
'45',
'46',
'47',
'48',
'50',
'51',
'52',
'53',
'54',
'55',
'56',
'57',
'58',
'59',
'60',
'61',
'62',
'63',
'64',
'65',
'66',
'67',
'68',
'71',
'72',
'73',
'74',
'75',
'76',
'77',
'80',
'81',
'82',
'83',
'84',
'85',
'86',
'87',
'88',
'90',
'91',
'92',
'93',
'94',
'95',
'98',
'99']
ein_prefix = self.random_element(ein_prefix_choices)
sequence = self.random_int(min=0, max=9999999)
ein = "{:s}-{:07d}".format(ein_prefix, sequence)
return ein
def invalid_ssn(self):
""" Generate a random invalid United States Social Security Identification Number (SSN).
Invalid SSNs have the following characteristics:
Cannot begin with the number 9
Cannot begin with 666 in positions 1 - 3
Cannot begin with 000 in positions 1 - 3
Cannot contain 00 in positions 4 - 5
Cannot contain 0000 in positions 6 - 9
https://www.ssa.gov/kc/SSAFactSheet--IssuingSSNs.pdf
Additionally, return an invalid SSN that is NOT a valid ITIN by excluding certain ITIN related "group" values
"""
itin_group_numbers = [
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
90,
91,
92,
94,
95,
96,
97,
98,
99]
area = self.random_int(min=0, max=999)
if area < 900 and area not in {666, 0}:
random_group_or_serial = self.random_int(min=1, max=1000)
if random_group_or_serial <= 500:
group = 0
serial = self.random_int(0, 9999)
else:
group = self.random_int(0, 99)
serial = 0
elif area in {666, 0}:
group = self.random_int(0, 99)
serial = self.random_int(0, 9999)
else:
group = self.random_element([x for x in range(0, 100) if x not in itin_group_numbers])
serial = self.random_int(0, 9999)
invalid_ssn = "{:03d}-{:02d}-{:04d}".format(area, group, serial)
return invalid_ssn
def ssn(self, taxpayer_identification_number_type=SSN_TYPE):
""" Generate a random United States Taxpayer Identification Number of the specified type.
If no type is specified, a US SSN is returned.
"""
if taxpayer_identification_number_type == self.ITIN_TYPE:
return self.itin()
elif taxpayer_identification_number_type == self.EIN_TYPE:
return self.ein()
elif taxpayer_identification_number_type == self.INVALID_SSN_TYPE:
return self.invalid_ssn()
elif taxpayer_identification_number_type == self.SSN_TYPE:
# Certain numbers are invalid for United States Social Security
# Numbers. The area (first 3 digits) cannot be 666 or 900-999.
# The group number (middle digits) cannot be 00. The serial
# (last 4 digits) cannot be 0000.
area = self.random_int(min=1, max=899)
if area == 666:
area += 1
group = self.random_int(1, 99)
serial = self.random_int(1, 9999)
ssn = "{:03d}-{:02d}-{:04d}".format(area, group, serial)
return ssn
else:
raise ValueError("taxpayer_identification_number_type must be one of 'SSN', 'EIN', 'ITIN',"
" or 'INVALID_SSN'.")
|
tia/bbg/example.py | AmarisAI/tia | 366 | 11071431 | <reponame>AmarisAI/tia<gh_stars>100-1000
import pandas as pd
from tia.bbg import LocalTerminal
if __name__ == '__main__':
d = pd.datetools.BDay(-4).apply(pd.datetime.now())
m = pd.datetools.BMonthBegin(-2).apply(pd.datetime.now())
def banner(msg):
print '*' * 25
print msg
print '*' * 25
banner('ReferenceDataRequest: single security, single field, frame response')
response = LocalTerminal.get_reference_data('msft us equity', 'px_last')
print response.as_map()
print response.as_frame()
banner('ReferenceDataRequest: single security, multi-field (with bulk), frame response')
response = LocalTerminal.get_reference_data('eurusd curncy', ['px_last', 'fwd_curve'])
print response.as_map()
rframe = response.as_frame()
print rframe.columns
# show frame within a frame
print rframe.ix[0, 'fwd_curve'].tail()
banner('ReferenceDataRequest: multi security, multi-field, bad field')
response = LocalTerminal.get_reference_data(['eurusd curncy', 'msft us equity'], ['px_last', 'fwd_curve'],
ignore_field_error=1)
print response.as_frame()['fwd_curve']['eurusd curncy']
banner('HistoricalDataRequest: multi security, multi-field, daily data')
response = LocalTerminal.get_historical(['eurusd curncy', 'msft us equity'], ['px_last', 'px_open'], start=d)
print response.as_map()
print response.as_frame().head(5)
banner('HistoricalDataRequest: multi security, multi-field, weekly data')
response = LocalTerminal.get_historical(['eurusd curncy', 'msft us equity'], ['px_last', 'px_open'], start=m,
period='WEEKLY')
print '--------- AS SINGLE TABLE ----------'
print response.as_frame().head(5)
#
# HOW TO
#
# - Retrieve an fx vol surface: BbgReferenceDataRequest('eurusd curncy', 'DFLT_VOL_SURF_MID')
# - Retrieve a fx forward curve: BbgReferenceDataRequest('eurusd curncy', 'FWD_CURVE')
# - Retrieve dividends: BbgReferenceDataRequest('csco us equity', 'BDVD_PR_EX_DTS_DVD_AMTS_W_ANN') |
warehouse/migrations/versions/701c2fba1f5f_cascade_release_deletion_to_files.py | fairhopeweb/warehouse | 3,103 | 11071451 | <gh_stars>1000+
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cascade Release deletion to Files
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2018-03-09 23:06:05.382680
"""
from alembic import op
revision = "<KEY>"
down_revision = "<PASSWORD>"
def upgrade():
op.drop_constraint("release_files_name_fkey", "release_files", type_="foreignkey")
op.create_foreign_key(
"release_files_name_fkey",
"release_files",
"releases",
["name", "version"],
["name", "version"],
onupdate="CASCADE",
ondelete="CASCADE",
)
def downgrade():
op.drop_constraint("release_files_name_fkey", "release_files", type_="foreignkey")
op.create_foreign_key(
"release_files_name_fkey",
"release_files",
"releases",
["name", "version"],
["name", "version"],
onupdate="CASCADE",
)
|
contrib/one_shot_models/examples/sider_alternate_weave.py | cjgalvin/deepchem | 3,782 | 11071453 | """
Script that trains Weave models on SIDER dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
sider_tasks, sider_datasets, transformers = dc.molnet.load_sider(
featurizer='Weave')
train_dataset, valid_dataset, test_dataset = sider_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
n_atom_feat = 75
n_pair_feat = 14
# Batch size of models
batch_size = 64
n_feat = 128
graph = dc.nn.AlternateSequentialWeaveGraph(
batch_size,
max_atoms=max_atoms,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat)
graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 75, 14))
#graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 50, 50))
graph.add(dc.nn.Dense(n_feat, 50, activation='tanh'))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(
dc.nn.AlternateWeaveGather(
batch_size, n_input=n_feat, gaussian_expand=True))
model = dc.models.MultitaskGraphClassifier(
graph,
len(sider_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20, log_every_N_batches=5)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/contrib/sessions.py | Christian-Castro/castro_odoo8 | 21,684 | 11071491 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Added tests for the sessions.
:copyright: (c) 2014 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
import shutil
from tempfile import mkdtemp, gettempdir
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib.sessions import FilesystemSessionStore
class SessionTestCase(WerkzeugTestCase):
def setup(self):
self.session_folder = mkdtemp()
def teardown(self):
shutil.rmtree(self.session_folder)
def test_default_tempdir(self):
store = FilesystemSessionStore()
assert store.path == gettempdir()
def test_basic_fs_sessions(self):
store = FilesystemSessionStore(self.session_folder)
x = store.new()
assert x.new
assert not x.modified
x['foo'] = [1, 2, 3]
assert x.modified
store.save(x)
x2 = store.get(x.sid)
assert not x2.new
assert not x2.modified
assert x2 is not x
assert x2 == x
x2['test'] = 3
assert x2.modified
assert not x2.new
store.save(x2)
x = store.get(x.sid)
store.delete(x)
x2 = store.get(x.sid)
# the session is not new when it was used previously.
assert not x2.new
def test_non_urandom(self):
urandom = os.urandom
del os.urandom
try:
store = FilesystemSessionStore(self.session_folder)
store.new()
finally:
os.urandom = urandom
def test_renewing_fs_session(self):
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
x = store.new()
store.save(x)
store.delete(x)
x2 = store.get(x.sid)
assert x2.new
def test_fs_session_lising(self):
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
sessions = set()
for x in range(10):
sess = store.new()
store.save(sess)
sessions.add(sess.sid)
listed_sessions = set(store.list())
assert sessions == listed_sessions
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SessionTestCase))
return suite
|
src/python/lista_encadeada_desordenada.py | danilolmoura/Algoritmos-e-Estruturas-de-Dados | 653 | 11071520 | <filename>src/python/lista_encadeada_desordenada.py
""" Implementação de uma lista encadeada desordenada """
class Node:
def __init__(self, value, next_node=None):
self.__value = value
self.next_node = next_node
@property
def value(self):
return self.__value
class LinkedList:
def __init__(self, *nodes):
self.nodes = nodes
def sorted(self):
tmp_nodes = {node.next_node: node for node in self.nodes}
sorted_nodes = list()
current_node = None
while current_node in tmp_nodes:
current_node = tmp_nodes[current_node]
sorted_nodes.insert(0, current_node)
return sorted_nodes
if __name__ == '__main__':
e = Node(5)
d = Node(4, next_node=e)
c = Node(3, next_node=d)
b = Node(2, next_node=c)
a = Node(1, next_node=b)
linked_list = LinkedList(c, a, d, b, e)
print('Unsorted linked list:')
for node in linked_list.nodes:
print(node.value)
print('Sorted linked list:')
for node in linked_list.sorted():
print(node.value)
|
ipymarkup/__init__.py | natasha/ipymarkup | 108 | 11071531 |
from .span import format_span_box_markup, show_span_box_markup # noqa
from .span import format_span_line_markup, show_span_line_markup # noqa
from .span import format_span_ascii_markup, show_span_ascii_markup # noqa
from .dep import format_dep_markup, show_dep_markup # noqa
from .dep import format_dep_ascii_markup, show_dep_ascii_markup # noqa
# legacy
show_box_markup = show_span_box_markup
show_line_markup = show_span_line_markup
show_ascii_markup = show_span_ascii_markup
|
tfx/components/example_gen/utils_test.py | avelez93/tfx | 1,813 | 11071534 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.utils."""
import os
import re
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from tfx.utils import json_utils
class UtilsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Create input splits.
test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._input_base_path = os.path.join(test_dir, 'input_base')
fileio.makedirs(self._input_base_path)
def testDictToExample(self):
instance_dict = {
'int': 10,
'float': 5.0,
'str': 'abc',
'int_list': [1, 2],
'float_list': [3.0],
'str_list': ['ab', 'cd'],
'none': None,
'empty_list': [],
}
example = utils.dict_to_example(instance_dict)
self.assertProtoEquals(
"""
features {
feature {
key: "empty_list"
value {
}
}
feature {
key: "float"
value {
float_list {
value: 5.0
}
}
}
feature {
key: "float_list"
value {
float_list {
value: 3.0
}
}
}
feature {
key: "int"
value {
int64_list {
value: 10
}
}
}
feature {
key: "int_list"
value {
int64_list {
value: 1
value: 2
}
}
}
feature {
key: "none"
value {
}
}
feature {
key: "str"
value {
bytes_list {
value: "abc"
}
}
}
feature {
key: "str_list"
value {
bytes_list {
value: "ab"
value: "cd"
}
}
}
}
""", example)
def testMakeOutputSplitNames(self):
split_names = utils.generate_output_split_names(
input_config=example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
output_config=example_gen_pb2.Output())
self.assertListEqual(['train', 'eval'], split_names)
split_names = utils.generate_output_split_names(
input_config=example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='single', pattern='single/*')
]),
output_config=example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
])))
self.assertListEqual(['train', 'eval'], split_names)
def testMakeDefaultOutputConfig(self):
output_config = utils.make_default_output_config(
utils.make_default_input_config())
self.assertEqual(2, len(output_config.split_config.splits))
output_config = utils.make_default_output_config(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]))
self.assertEqual(0, len(output_config.split_config.splits))
def testMakeOutputSplitNamesWithParameter(self):
split_name_param = data_types.RuntimeParameter(
name='split-name', ptype=str, default=u'train')
split_names = utils.generate_output_split_names(
input_config={
'splits': [{
'name': split_name_param,
'pattern': 'train/*'
}, {
'name': 'eval',
'pattern': 'eval/*'
}]
},
output_config=example_gen_pb2.Output())
# Assert the json serialized version because RuntimeParameters only get
# serialized after that.
self.assertEqual(
json_utils.dumps([split_name_param, 'eval']),
json_utils.dumps(split_names))
split_names = utils.generate_output_split_names(
input_config=example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='single', pattern='single/*')
]),
output_config={
'split_config': {
'splits': [{
'name': split_name_param,
'hash_buckets': 2
}, {
'name': 'eval',
'hash_buckets': 1
}]
}
})
# Assert the json serialized version because RuntimeParameters only get
# serialized after that.
self.assertEqual(
json_utils.dumps([split_name_param, 'eval']),
json_utils.dumps(split_names))
def testMakeDefaultOutputConfigWithParameter(self):
split_name_param = data_types.RuntimeParameter(
name='split-name', ptype=str, default=u'train')
output_config = utils.make_default_output_config({
'splits': [{
'name': split_name_param,
'pattern': 'train/*'
}, {
'name': 'eval',
'pattern': 'eval/*'
}]
})
self.assertEqual(0, len(output_config.split_config.splits))
def testGlobToRegex(self):
glob_pattern = 'a(b)c'
self.assertEqual(1, re.compile(glob_pattern).groups)
regex_pattern = utils._glob_to_regex(glob_pattern) # pylint: disable=protected-access
self.assertEqual(0, re.compile(regex_pattern).groups)
self.assertEqual(glob_pattern,
re.match(regex_pattern, glob_pattern).group())
def testCalculateSplitsFingerprint(self):
split1 = os.path.join(self._input_base_path, 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
os.utime(split1, (0, 1))
split2 = os.path.join(self._input_base_path, 'split2', 'data')
io_utils.write_string_file(split2, 'testing2')
os.utime(split2, (0, 3))
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='split1/*'),
example_gen_pb2.Input.Split(name='s2', pattern='split2/*')
]
fingerprint, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(
fingerprint,
'split:s1,num_files:1,total_bytes:7,xor_checksum:1,sum_checksum:1\n'
'split:s2,num_files:1,total_bytes:8,xor_checksum:3,sum_checksum:3')
self.assertEqual(span, 0)
self.assertIsNone(version)
def testSpanNoMatching(self):
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*'),
example_gen_pb2.Input.Split(name='s2', pattern='span{SPAN}/split2/*')
]
with self.assertRaisesRegex(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testVersionNoMatching(self):
span_dir = os.path.join(self._input_base_path, 'span01', 'wrong', 'data')
io_utils.write_string_file(span_dir, 'testing_version_no_matching')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/version{VERSION}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testSpanWrongFormat(self):
wrong_span = os.path.join(self._input_base_path, 'spanx', 'split1', 'data')
io_utils.write_string_file(wrong_span, 'testing_wrong_span')
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Cannot find span number'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testVersionWrongFormat(self):
wrong_version = os.path.join(self._input_base_path, 'span01', 'versionx',
'split1', 'data')
io_utils.write_string_file(wrong_version, 'testing_wrong_version')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/version{VERSION}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Cannot find version number'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testMultipleSpecs(self):
splits1 = [
example_gen_pb2.Input.Split(
name='s1', pattern='span1{SPAN}/span2{SPAN}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Only one {SPAN} is allowed'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits1)
splits2 = [
example_gen_pb2.Input.Split(
name='s1',
pattern='span{SPAN}/ver1{VERSION}/ver2{VERSION}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Only one {VERSION} is allowed'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits2)
splits3 = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}-{MM}-{DD}-{MM}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Exactly one of each date spec'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits3)
def testHaveSpanNoVersion(self):
# Test specific behavior when Span spec is present but Version is not.
split1 = os.path.join(self._input_base_path, 'span1', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertIsNone(version)
def testHaveSpanAndVersion(self):
# Test specific behavior when both Span and Version are present.
split1 = os.path.join(self._input_base_path, 'span1', 'version1', 'split1',
'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/version{VERSION}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertEqual(version, 1)
def testHaveVersionNoSpan(self):
# Test specific behavior when Version spec is present but Span is not.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='version{VERSION}/split1/*')
]
with self.assertRaisesRegex(
ValueError,
'Version spec provided, but Span or Date spec is not present'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testNoSpanOrVersion(self):
# Test specific behavior when neither Span nor Version spec is present.
split1 = os.path.join(self._input_base_path, 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [example_gen_pb2.Input.Split(name='s1', pattern='split1/*')]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 0)
self.assertIsNone(version)
def testNewSpanWithOlderVersionAlign(self):
# Test specific behavior when a newer Span has older Version.
span1_ver2 = os.path.join(self._input_base_path, 'span1', 'ver2', 'split1',
'data')
io_utils.write_string_file(span1_ver2, 'testing')
span2_ver1 = os.path.join(self._input_base_path, 'span2', 'ver1', 'split1',
'data')
io_utils.write_string_file(span2_ver1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 2)
self.assertEqual(version, 1)
def testDateSpecPartiallyMissing(self):
splits1 = [
example_gen_pb2.Input.Split(name='s1', pattern='{YYYY}-{MM}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Exactly one of each date spec'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits1)
def testBothSpanAndDate(self):
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}-{MM}-{DD}/{SPAN}/split1/*')
]
with self.assertRaisesRegex(
ValueError,
'Either span spec or date specs must be specified exclusively'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testDateBadFormat(self):
# Test improperly formed date.
split1 = os.path.join(self._input_base_path, 'yyyymmdd', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
with self.assertRaisesRegex(ValueError,
'Cannot find span number using date'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testInvalidDate(self):
split1 = os.path.join(self._input_base_path, '20201301', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Retrieved date is invalid'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testHaveDateNoVersion(self):
# Test specific behavior when Date spec is present but Version is not.
split1 = os.path.join(self._input_base_path, '19700102', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertIsNone(version)
def testHaveDateAndVersion(self):
# Test specific behavior when both Date and Version are present.
split1 = os.path.join(self._input_base_path, '19700102', 'ver1', 'split1',
'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertEqual(version, 1)
def testSpanInvalidWidth(self):
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='{SPAN:x}/split1/*')
]
with self.assertRaisesRegex(
ValueError, 'Width modifier in span spec is not a positive integer'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testVersionInvalidWidth(self):
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{SPAN}/{VERSION:x}/split1/*')
]
with self.assertRaisesRegex(
ValueError, 'Width modifier in version spec is not a positive integer'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testSpanWidth(self):
split1 = os.path.join(self._input_base_path, 'span1', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
# TODO(jjma): find a better way of describing this error to user.
with self.assertRaisesRegex(ValueError,
'Glob pattern does not match regex pattern'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:1}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertIsNone(version)
def testVersionWidth(self):
split1 = os.path.join(self._input_base_path, 'span1', 'ver1', 'split1',
'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION:2}/split1/*')
]
# TODO(jjma): find a better way of describing this error to user.
with self.assertRaisesRegex(ValueError,
'Glob pattern does not match regex pattern'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION:1}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertEqual(version, 1)
def testSpanVersionWidthNoSeperator(self):
split1 = os.path.join(self._input_base_path, '1234', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{SPAN:2}{VERSION:2}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 12)
self.assertEqual(version, 34)
def testCalculateSplitsFingerprintSpanAndVersionWithSpan(self):
# Test align of span and version numbers.
span1_v1_split1 = os.path.join(self._input_base_path, 'span01', 'ver01',
'split1', 'data')
io_utils.write_string_file(span1_v1_split1, 'testing11')
span1_v1_split2 = os.path.join(self._input_base_path, 'span01', 'ver01',
'split2', 'data')
io_utils.write_string_file(span1_v1_split2, 'testing12')
span2_v1_split1 = os.path.join(self._input_base_path, 'span02', 'ver01',
'split1', 'data')
io_utils.write_string_file(span2_v1_split1, 'testing21')
# Test if error raised when span does not align.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegex(
ValueError, 'Latest span should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v1_split2 = os.path.join(self._input_base_path, 'span02', 'ver01',
'split2', 'data')
io_utils.write_string_file(span2_v1_split2, 'testing22')
span2_v2_split1 = os.path.join(self._input_base_path, 'span02', 'ver02',
'split1', 'data')
io_utils.write_string_file(span2_v2_split1, 'testing21')
# Test if error raised when span aligns but version does not.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegex(
ValueError, 'Latest version should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v2_split2 = os.path.join(self._input_base_path, 'span02', 'ver02',
'split2', 'data')
io_utils.write_string_file(span2_v2_split2, 'testing22')
# Test if latest span and version is selected when aligned for each split.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/ver{VERSION}/split2/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 2)
self.assertEqual(version, 2)
self.assertEqual(splits[0].pattern, 'span02/ver02/split1/*')
self.assertEqual(splits[1].pattern, 'span02/ver02/split2/*')
def testCalculateSplitsFingerprintSpanAndVersionWithDate(self):
# Test align of span and version numbers.
span1_v1_split1 = os.path.join(self._input_base_path, '19700102', 'ver01',
'split1', 'data')
io_utils.write_string_file(span1_v1_split1, 'testing11')
span1_v1_split2 = os.path.join(self._input_base_path, '19700102', 'ver01',
'split2', 'data')
io_utils.write_string_file(span1_v1_split2, 'testing12')
span2_v1_split1 = os.path.join(self._input_base_path, '19700103', 'ver01',
'split1', 'data')
io_utils.write_string_file(span2_v1_split1, 'testing21')
# Test if error raised when date does not align.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegex(
ValueError, 'Latest span should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v1_split2 = os.path.join(self._input_base_path, '19700103', 'ver01',
'split2', 'data')
io_utils.write_string_file(span2_v1_split2, 'testing22')
span2_v2_split1 = os.path.join(self._input_base_path, '19700103', 'ver02',
'split1', 'data')
io_utils.write_string_file(span2_v2_split1, 'testing21')
# Test if error raised when date aligns but version does not.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegex(
ValueError, 'Latest version should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v2_split2 = os.path.join(self._input_base_path, '19700103', 'ver02',
'split2', 'data')
io_utils.write_string_file(span2_v2_split2, 'testing22')
# Test if latest span and version is selected when aligned for each split.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split2/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 2)
self.assertEqual(version, 2)
self.assertEqual(splits[0].pattern, '19700103/ver02/split1/*')
self.assertEqual(splits[1].pattern, '19700103/ver02/split2/*')
def testRangeConfigWithNonexistentSpan(self):
# Test behavior when specified span in RangeConfig does not exist.
span1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=2, end_span_number=2))
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
with self.assertRaisesRegex(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits, range_config=range_config)
def testSpanAlignWithRangeConfig(self):
span1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
span2_split1 = os.path.join(self._input_base_path, 'span02', 'split1',
'data')
io_utils.write_string_file(span2_split1, 'testing21')
# Test static range in RangeConfig.
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=1, end_span_number=1))
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits, range_config)
self.assertEqual(span, 1)
self.assertIsNone(version)
self.assertEqual(splits[0].pattern, 'span01/split1/*')
def testRangeConfigSpanWidthPresence(self):
# Test RangeConfig.static_range behavior when span width is not given.
span1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=1, end_span_number=1))
splits1 = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*')
]
# RangeConfig cannot find zero padding span without width modifier.
with self.assertRaisesRegex(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits1, range_config=range_config)
splits2 = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
# With width modifier in span spec, RangeConfig.static_range makes
# correct zero-padded substitution.
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits2, range_config=range_config)
self.assertEqual(span, 1)
self.assertIsNone(version)
self.assertEqual(splits2[0].pattern, 'span01/split1/*')
def testRangeConfigWithDateSpec(self):
span1_split1 = os.path.join(self._input_base_path, '19700102', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
start_span = utils.date_to_span_number(1970, 1, 2)
end_span = utils.date_to_span_number(1970, 1, 2)
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=start_span, end_span_number=end_span))
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits, range_config=range_config)
self.assertEqual(span, 1)
self.assertIsNone(version)
self.assertEqual(splits[0].pattern, '19700102/split1/*')
def testGetQueryForSpan(self):
query = 'select * from table'
self.assertEqual(utils.get_query_for_span(query, 1), 'select * from table')
query = 'select * from table where date=@span_yyyymmdd_utc'
self.assertEqual(
utils.get_query_for_span(query, 1),
"select * from table where date='19700102'")
query = ('select * from table where '
'ts>=TIMESTAMP_SECONDS(@span_begin_timestamp) and '
'ts<TIMESTAMP_SECONDS(@span_end_timestamp)')
self.assertEqual(
utils.get_query_for_span(query, 2),
'select * from table where ts>=TIMESTAMP_SECONDS(172800) and ts<TIMESTAMP_SECONDS(259200)'
)
if __name__ == '__main__':
tf.test.main()
|
NTM-One-Shot-TF/MANN/Utils/Metrics.py | dhruvramani/ContinualLearningExperiments | 265 | 11071545 | <filename>NTM-One-Shot-TF/MANN/Utils/Metrics.py<gh_stars>100-1000
import tensorflow as tf
import numpy as np
from .tf_utils import update_tensor
# prediction is the argmax
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
targets = tf.cast(targets, predictions.dtype)
accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)
def step_((accuracy, indices), (p, t)):
"""with tf.variable_scope("Metric_step_var", reuse=True):
accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
initializer=tf.constant_initializer(0), dtype=tf.float32)
indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
initializer=tf.constant_initializer(0), dtype=tf.float32)"""
p = tf.cast(p, tf.int32)
t = tf.cast(t, tf.int32)
##Accuracy Update
batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
gather = tf.cast(tf.gather_nd(indices,tf.stack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
index = tf.cast(tf.stack([batch_range, gather], axis=1), dtype=tf.int64)
val = tf.cast(tf.equal(p, t), tf.float32)
delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
##Index Update
index = tf.cast(tf.stack([batch_range, t], axis=1), dtype=tf.int64)
val = tf.constant(1.0, shape=[batch_size])
delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
indices = indices + tf.sparse_tensor_to_dense(delta)
return [accuracy, indices]
accuracy, indices = tf.scan(step_, elems=(tf.transpose(predictions, perm=[1, 0]), tf.transpose(targets, perm=[1, 0])),initializer=[accuracy, indices], name="Scan_Metric_Last")
accuracy = accuracy[-1]
accuracy = tf.reduce_mean(accuracy / nb_classes , axis=0)
return accuracy
|
tests/test_utils_system.py | Muflhi01/videoflow | 1,022 | 11071550 | import pytest
import videoflow.utils.system as system
def test_gpus_available_1(monkeypatch):
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 0
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '0')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 0
def test_gpus_available_2(monkeypatch):
def get_system_gpus_mock():
return set([0])
monkeypatch.setattr(system, 'get_system_gpus', get_system_gpus_mock)
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 0
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '0')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 1
assert 0 in gpus
def test_gpus_available_3(monkeypatch):
def get_system_gpus_mock():
return set([0, 1])
monkeypatch.setattr(system, 'get_system_gpus', get_system_gpus_mock)
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '1, 2')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 1
assert 1 in gpus
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '2, 3')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 0
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', 'asdfa, 1, 0, asdf')
gpus = system.get_gpus_available_to_process()
assert len(gpus) == 2
if __name__ == "__main__":
pytest.main([__file__])
|
vel/rl/buffers/backend/circular_vec_buffer_backend.py | galatolofederico/vel | 273 | 11071566 | import gym
import numpy as np
from vel.exceptions import VelException
def take_along_axis(large_array, indexes):
""" Take along axis """
# Reshape indexes into the right shape
if len(large_array.shape) > len(indexes.shape):
indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape))))
return np.take_along_axis(large_array, indexes, axis=0)
class CircularVecEnvBufferBackend:
"""
Backend class for replay buffer that uses a circular buffer - new experience overwrites the oldest one
Version supporting multiple environments.
Frame stack compensation - if environment has a framestack built in, we will store only the last action
"""
def __init__(self, buffer_capacity: int, num_envs: int, observation_space: gym.Space, action_space: gym.Space,
frame_stack_compensation: bool=False, frame_history: int=1):
# Maximum number of items in the buffer
self.buffer_capacity = buffer_capacity
self.frame_stack_compensation = frame_stack_compensation
# Number of parallel envs to record
self.num_envs = num_envs
# How many elements have been inserted in the buffer
self.current_size = 0
# Index of last inserted element
self.current_idx = -1
# How many frames are stacked into each observation
self.frame_history = frame_history
# Data buffers
if self.frame_stack_compensation:
self.state_buffer = np.zeros(
[self.buffer_capacity, self.num_envs] + list(observation_space.shape)[:-1] +
[observation_space.shape[-1] // self.frame_history],
dtype=observation_space.dtype
)
else:
self.state_buffer = np.zeros(
[self.buffer_capacity, self.num_envs] + list(observation_space.shape),
dtype=observation_space.dtype
)
self.action_buffer = np.zeros(
[self.buffer_capacity, self.num_envs] + list(action_space.shape), dtype=action_space.dtype
)
self.reward_buffer = np.zeros([self.buffer_capacity, self.num_envs], dtype=np.float32)
self.dones_buffer = np.zeros([self.buffer_capacity, self.num_envs], dtype=bool)
self.extra_data = {}
# Just a sentinel to simplify further calculations
self.dones_buffer[self.current_idx] = True
def store_transition(self, frame, action, reward, done, extra_info=None):
""" Store given transition in the backend """
self.current_idx = (self.current_idx + 1) % self.buffer_capacity
if self.frame_stack_compensation:
# Compensate for frame stack built into the environment
idx_range = np.arange(-frame.shape[-1] // self.frame_history, 0)
frame = np.take(frame, indices=idx_range, axis=-1)
self.state_buffer[self.current_idx] = frame
self.action_buffer[self.current_idx] = action
self.reward_buffer[self.current_idx] = reward
self.dones_buffer[self.current_idx] = done
if extra_info is not None:
for name in extra_info:
if name not in self.extra_data:
assert self.current_size == 0, f"New data {name} encountered in the middle of the training"
array = extra_info[name]
self.extra_data[name] = np.zeros([self.buffer_capacity] + list(array.shape), dtype=array.dtype)
self.extra_data[name][self.current_idx] = extra_info[name]
if self.current_size < self.buffer_capacity:
self.current_size += 1
return self.current_idx
def get_frame_with_future(self, frame_idx, env_idx):
""" Return frame from the buffer together with the next frame """
if frame_idx == self.current_idx:
raise VelException("Cannot provide enough future for the frame")
past_frame = self.get_frame(frame_idx, env_idx)
if not self.dones_buffer[frame_idx, env_idx]:
# We're not done
next_idx = (frame_idx + 1) % self.buffer_capacity
next_frame = self.state_buffer[next_idx, env_idx]
if self.frame_history > 1:
future_frame = np.concatenate([
past_frame.take(indices=np.arange(1, past_frame.shape[-1]), axis=-1), next_frame
], axis=-1)
else:
future_frame = next_frame
else:
# We are done
future_frame = np.zeros_like(past_frame)
return past_frame, future_frame
def get_frame_with_future_forward_steps(self, frame_idx, env_idx, forward_steps, discount_factor):
""" Return frame from the buffer together with the next frame """
index_array = np.arange(frame_idx, frame_idx+forward_steps) % self.current_size
if self.current_idx in index_array:
raise VelException("Cannot provide enough future for the frame")
past_frame = self.get_frame(frame_idx, env_idx)
dones_array = self.dones_buffer[index_array, env_idx]
rewards_array = self.reward_buffer[index_array, env_idx]
discounted_rewards_array = rewards_array * (discount_factor ** np.arange(forward_steps))
if dones_array.any():
# Are we done between current frame and frame + n
done = True
dones_shifted = np.zeros_like(dones_array)
dones_shifted[1:] = dones_array[:-1]
reward = discounted_rewards_array[~np.logical_or.accumulate(dones_shifted)].sum()
future_frame = np.zeros_like(past_frame)
else:
done = False
reward = discounted_rewards_array.sum()
if forward_steps >= self.frame_history:
frame_indices = (index_array[:self.frame_history] + 1) % self.buffer_capacity
future_frame = np.moveaxis(self.state_buffer[frame_indices, env_idx], 0, -2).reshape(past_frame.shape)
else:
frame_candidate = np.moveaxis(
self.state_buffer[(index_array + 1) % self.buffer_capacity, env_idx], 0, -2
)
frame_candidate_target_shape = (
list(frame_candidate.shape[:-2]) + [frame_candidate.shape[-2] * frame_candidate.shape[-1]]
)
future_frame = np.concatenate([
past_frame[..., (frame_candidate_target_shape[-1] - past_frame.shape[-1]):],
frame_candidate.reshape(frame_candidate_target_shape)
], -1)
return past_frame, future_frame, reward, done
def get_frame(self, frame_idx, env_idx):
""" Return frame from the buffer """
if frame_idx >= self.current_size:
raise VelException("Requested frame beyond the size of the buffer")
accumulator = []
last_frame = self.state_buffer[frame_idx, env_idx]
accumulator.append(last_frame)
for i in range(self.frame_history - 1):
prev_idx = (frame_idx - 1) % self.buffer_capacity
if prev_idx == self.current_idx:
raise VelException("Cannot provide enough history for the frame")
elif self.dones_buffer[prev_idx, env_idx]:
# If previous frame was done - just append zeroes
accumulator.append(np.zeros_like(last_frame))
else:
frame_idx = prev_idx
accumulator.append(self.state_buffer[frame_idx, env_idx])
# We're pushing the elements in reverse order
return np.concatenate(accumulator[::-1], axis=-1)
def get_transition(self, frame_idx, env_idx):
""" Single transition with given index """
past_frame, future_frame = self.get_frame_with_future(frame_idx, env_idx)
data_dict = {
'observations': past_frame,
'observations_next': future_frame,
'actions': self.action_buffer[frame_idx, env_idx],
'rewards': self.reward_buffer[frame_idx, env_idx],
'dones': self.dones_buffer[frame_idx, env_idx],
}
for name in self.extra_data:
data_dict[name] = self.extra_data[name][frame_idx, env_idx]
return data_dict
def get_transitions(self, indexes):
""" Get dictionary of transition data """
assert indexes.shape[1] == self.state_buffer.shape[1], \
"Must have the same number of indexes as there are environments"
frame_batch_shape = (
[indexes.shape[0], indexes.shape[1]]
+ list(self.state_buffer.shape[2:-1])
+ [self.state_buffer.shape[-1] * self.frame_history]
)
past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
for buffer_idx, frame_row in enumerate(indexes):
for env_idx, frame_idx in enumerate(frame_row):
past_frame_buffer[buffer_idx, env_idx], future_frame_buffer[buffer_idx, env_idx] = (
self.get_frame_with_future(frame_idx, env_idx)
)
actions = take_along_axis(self.action_buffer, indexes)
rewards = take_along_axis(self.reward_buffer, indexes)
dones = take_along_axis(self.dones_buffer, indexes)
transition_tensors = {
'observations': past_frame_buffer,
'actions': actions,
'rewards': rewards,
'observations_next': future_frame_buffer,
'dones': dones.astype(np.float32),
}
for name in self.extra_data:
transition_tensors[name] = take_along_axis(self.extra_data[name], indexes)
return transition_tensors
def get_transitions_forward_steps(self, indexes, forward_steps, discount_factor):
"""
Get dictionary of a transition data - where the target of a transition is
n steps forward along the trajectory. Rewards are properly aggregated according to the discount factor,
and the process stops when trajectory is done.
"""
frame_batch_shape = (
[indexes.shape[0], indexes.shape[1]]
+ list(self.state_buffer.shape[2:-1])
+ [self.state_buffer.shape[-1] * self.frame_history]
)
simple_batch_shape = [indexes.shape[0], indexes.shape[1]]
past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype)
reward_buffer = np.zeros(simple_batch_shape, dtype=np.float32)
dones_buffer = np.zeros(simple_batch_shape, dtype=bool)
for buffer_idx, frame_row in enumerate(indexes):
for env_idx, frame_idx in enumerate(frame_row):
past_frame, future_frame, reward, done = self.get_frame_with_future_forward_steps(
frame_idx, env_idx, forward_steps=forward_steps, discount_factor=discount_factor
)
past_frame_buffer[buffer_idx, env_idx] = past_frame
future_frame_buffer[buffer_idx, env_idx] = future_frame
reward_buffer[buffer_idx, env_idx] = reward
dones_buffer[buffer_idx, env_idx] = done
actions = take_along_axis(self.action_buffer, indexes)
transition_tensors = {
'observations': past_frame_buffer,
'actions': actions,
'rewards': reward_buffer,
'observations_next': future_frame_buffer,
'dones': dones_buffer.astype(np.float32),
}
for name in self.extra_data:
transition_tensors[name] = take_along_axis(self.extra_data[name], indexes)
return transition_tensors
def get_trajectories(self, indexes, rollout_length):
""" Return batch consisting of *consecutive* transitions """
# assert indexes.shape[0] > 1, "There must be multiple indexes supplied"
assert rollout_length > 1, "Rollout length must be greater than 1"
batch_indexes = (
indexes.reshape(1, indexes.shape[0]) - np.arange(rollout_length - 1, -1, -1).reshape(rollout_length, 1)
)
return self.get_transitions(batch_indexes)
def sample_batch_transitions(self, batch_size, forward_steps=1):
""" Return indexes of next sample"""
results = []
for i in range(self.num_envs):
results.append(self.sample_frame_single_env(batch_size, forward_steps=forward_steps))
return np.stack(results, axis=-1)
def sample_batch_trajectories(self, rollout_length):
""" Return indexes of next random rollout """
results = []
for i in range(self.num_envs):
results.append(self.sample_rollout_single_env(rollout_length))
return np.stack(results, axis=-1)
def sample_rollout_single_env(self, rollout_length):
""" Return indexes of next sample"""
# Sample from up to total size
if self.current_size < self.buffer_capacity:
if rollout_length + 1 > self.current_size:
raise VelException("Not enough elements in the buffer to sample the rollout")
# -1 because we cannot take the last one
return np.random.choice(self.current_size - rollout_length) + rollout_length - 1
else:
if rollout_length + self.frame_history > self.current_size:
raise VelException("Not enough elements in the buffer to sample the rollout")
candidate = np.random.choice(self.buffer_capacity)
# These are the elements we cannot draw, as then we don't have enough history
forbidden_ones = (
np.arange(self.current_idx, self.current_idx + self.frame_history + rollout_length - 1)
% self.buffer_capacity
)
# Exclude these frames for learning as they may have some part of history overwritten
while candidate in forbidden_ones:
candidate = np.random.choice(self.buffer_capacity)
return candidate
def sample_frame_single_env(self, batch_size, forward_steps=1):
""" Return an in index of a random set of frames from a buffer, that have enough history and future """
# Whole idea of this function is to make sure that sample we take is far away from the point which we are
# currently writing to the buffer, which is 'discontinuous'
if self.current_size < self.buffer_capacity:
# Sample from up to total size of the buffer
# -1 because we cannot take the last one
return np.random.choice(self.current_size - forward_steps, batch_size, replace=False)
else:
candidate = np.random.choice(self.buffer_capacity, batch_size, replace=False)
forbidden_ones = (
np.arange(self.current_idx - forward_steps + 1, self.current_idx + self.frame_history)
% self.buffer_capacity
)
# Exclude these frames for learning as they may have some part of history overwritten
while any(x in candidate for x in forbidden_ones):
candidate = np.random.choice(self.buffer_capacity, batch_size, replace=False)
return candidate
|
tests/integration_tests/test_mxnet.py | captain-pool/optuna | 1,300 | 11071606 | from typing import Union
import mxnet as mx
import numpy as np
import pytest
import optuna
from optuna.integration.mxnet import MXNetPruningCallback
from optuna.testing.integration import DeterministicPruner
def test_mxnet_pruning_callback() -> None:
def objective(trial: optuna.trial.Trial, eval_metric: Union[list, str]) -> float:
# Symbol.
data = mx.symbol.Variable("data")
data = mx.symbol.FullyConnected(data=data, num_hidden=1)
data = mx.symbol.Activation(data=data, act_type="sigmoid")
mlp = mx.symbol.SoftmaxOutput(data=data, name="softmax")
# Optimizer.
optimizer = mx.optimizer.RMSProp()
# Dataset.
train_data = mx.io.NDArrayIter(
data=np.zeros((16, 20), np.float32),
label=np.zeros((16,), np.int32),
batch_size=1,
shuffle=True,
)
eval_data = mx.io.NDArrayIter(
data=np.zeros((5, 20), np.float32),
label=np.zeros((5,), np.int32),
batch_size=1,
shuffle=True,
)
model = mx.mod.Module(symbol=mlp)
model.fit(
train_data=train_data,
eval_data=eval_data,
eval_metric=eval_metric,
optimizer=optimizer,
num_epoch=1,
eval_end_callback=MXNetPruningCallback(trial, "accuracy"),
)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(lambda trial: objective(trial, "accuracy"), n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(lambda trial: objective(trial, "accuracy"), n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
assert study.trials[0].value == 1.0
with pytest.raises(ValueError):
objective(optuna.trial.Trial(study, 0), [])
objective(optuna.trial.Trial(study, 0), ["mae"])
study.optimize(lambda trial: objective(trial, ["accuracy", "mae"]), n_trials=1)
|
powerline/segments/common/vcs.py | MrFishFinger/powerline | 11,435 | 11071612 | <reponame>MrFishFinger/powerline<filename>powerline/segments/common/vcs.py<gh_stars>1000+
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.lib.vcs import guess, tree_status
from powerline.segments import Segment, with_docstring
from powerline.theme import requires_segment_info, requires_filesystem_watcher
@requires_filesystem_watcher
@requires_segment_info
class BranchSegment(Segment):
divider_highlight_group = None
@staticmethod
def get_directory(segment_info):
return segment_info['getcwd']()
def __call__(self, pl, segment_info, create_watcher, status_colors=False, ignore_statuses=()):
name = self.get_directory(segment_info)
if name:
repo = guess(path=name, create_watcher=create_watcher)
if repo is not None:
branch = repo.branch()
scol = ['branch']
if status_colors:
try:
status = tree_status(repo, pl)
except Exception as e:
pl.exception('Failed to compute tree status: {0}', str(e))
status = '?'
else:
status = status and status.strip()
if status in ignore_statuses:
status = None
scol.insert(0, 'branch_dirty' if status else 'branch_clean')
return [{
'contents': branch,
'highlight_groups': scol,
'divider_highlight_group': self.divider_highlight_group,
}]
branch = with_docstring(BranchSegment(),
'''Return the current VCS branch.
:param bool status_colors:
Determines whether repository status will be used to determine highlighting.
Default: False.
:param list ignore_statuses:
List of statuses which will not result in repo being marked as dirty. Most
useful is setting this option to ``["U"]``: this will ignore repository
which has just untracked files (i.e. repository with modified, deleted or
removed files will be marked as dirty, while just untracked files will make
segment show clean repository). Only applicable if ``status_colors`` option
is True.
Highlight groups used: ``branch_clean``, ``branch_dirty``, ``branch``.
''')
@requires_filesystem_watcher
@requires_segment_info
class StashSegment(Segment):
divider_highlight_group = None
@staticmethod
def get_directory(segment_info):
return segment_info['getcwd']()
def __call__(self, pl, segment_info, create_watcher):
name = self.get_directory(segment_info)
if name:
repo = guess(path=name, create_watcher=create_watcher)
if repo is not None:
stash = getattr(repo, 'stash', None)
if stash:
stashes = stash()
if stashes:
return [{
'contents': str(stashes),
'highlight_groups': ['stash'],
'divider_highlight_group': self.divider_highlight_group
}]
stash = with_docstring(StashSegment(),
'''Return the number of current VCS stash entries, if any.
Highlight groups used: ``stash``.
''')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.