max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
core/modules/modules.py | WoojuLee24/spvnas | 149 | 12680917 | <filename>core/modules/modules.py
import random
from abc import abstractmethod
import torch.nn as nn
__all__ = ['RandomModule', 'RandomChoice', 'RandomDepth']
class RandomModule(nn.Module):
@abstractmethod
def random_sample(self):
pass
@abstractmethod
def clear_sample(self):
pass
@abstractmethod
def manual_select(self, sample):
pass
def forward(self, *inputs):
return self.determinize()(*inputs)
@abstractmethod
def determinize(self):
pass
class RandomChoice(RandomModule):
def __init__(self, *choices):
super().__init__()
self.choices = nn.ModuleList(choices)
def random_sample(self):
self.index = random.randint(0, len(self.choices) - 1)
return self.index
def clear_sample(self):
self.index = None
def manual_select(self, index):
self.index = index
def determinize(self):
return self.choices[self.index]
class RandomDepth(RandomModule):
def __init__(self, *layers, depth_min=None, depth_max=None):
super().__init__()
self.layers = nn.ModuleList(layers)
self.depth_min = depth_min
self.depth_max = depth_max
def random_sample(self):
if self.depth_min is not None:
depth_min = self.depth_min
else:
depth_min = 0
if self.depth_max is not None:
depth_max = self.depth_max
else:
depth_max = len(self.layers)
self.depth = random.randint(depth_min, depth_max)
return self.depth
def clear_sample(self):
self.depth = None
def status(self):
return self.depth
def manual_select(self, depth):
self.depth = depth
# fixme: support tuples as input
def forward(self, x):
for k in range(self.depth):
x = self.layers[k](x)
return x
def determinize(self):
return nn.Sequential(*self.layers[:self.depth])
|
mmaction/datasets/samplers/__init__.py | kiyoon/Video-Swin-Transformer | 648 | 12680918 | from .distributed_sampler import (ClassSpecificDistributedSampler,
DistributedSampler)
__all__ = ['DistributedSampler', 'ClassSpecificDistributedSampler']
|
core/basestore.py | miguelvm/PyTrendFollow | 158 | 12680923 | <filename>core/basestore.py
from config.settings import quotes_storage
"""
This file imports data read/write methods for a local storage depending on the user's choice.
These methods are used in core.contract_store.
"""
if quotes_storage == 'hdf5':
from core.hdfstore import read_symbol, read_contract, write_data, drop_symbol |
mysql2pgsql/lib/config.py | asvedr/py-mysql2pgsql | 275 | 12680936 | from __future__ import with_statement, absolute_import
import os.path
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from .errors import ConfigurationFileInitialized,\
ConfigurationFileNotFound
class ConfigBase(object):
def __init__(self, config_file_path):
self.options = load(open(config_file_path))
class Config(ConfigBase):
def __init__(self, config_file_path, generate_if_not_found=True):
if not os.path.isfile(config_file_path):
if generate_if_not_found:
self.reset_configfile(config_file_path)
if os.path.isfile(config_file_path):
raise ConfigurationFileInitialized("""No configuration file found.
A new file has been initialized at: %s
Please review the configuration and retry...""" % config_file_path)
else:
raise ConfigurationFileNotFound("cannot load config file %s" % config_file_path)
super(Config, self).__init__(config_file_path)
def reset_configfile(self, file_path):
with open(file_path, 'w') as f:
f.write(CONFIG_TEMPLATE)
CONFIG_TEMPLATE = """
# a socket connection will be selected if a 'socket' is specified
# also 'localhost' is a special 'hostname' for MySQL that overrides the 'port' option
# and forces it to use a local socket connection
# if tcp is chosen, you can use compression
mysql:
hostname: localhost
port: 3306
socket: /tmp/mysql.sock
username: mysql2psql
password:
database: mysql2psql_test
compress: false
destination:
# if file is given, output goes to file, else postgres
file:
postgres:
hostname: localhost
port: 5432
username: mysql2psql
password:
database: mysql2psql_test
# if tables is given, only the listed tables will be converted. leave empty to convert all tables.
#only_tables:
#- table1
#- table2
# if exclude_tables is given, exclude the listed tables from the conversion.
#exclude_tables:
#- table3
#- table4
# if supress_data is true, only the schema definition will be exported/migrated, and not the data
supress_data: false
# if supress_ddl is true, only the data will be exported/imported, and not the schema
supress_ddl: false
# if force_truncate is true, forces a table truncate before table loading
force_truncate: false
# if timezone is true, forces to append/convert to UTC tzinfo mysql data
timezone: false
# if index_prefix is given, indexes will be created whith a name prefixed with index_prefix
index_prefix:
"""
|
mayan/apps/documents/serializers/document_version_serializers.py | bonitobonita24/Mayan-EDMS | 343 | 12680955 | <filename>mayan/apps/documents/serializers/document_version_serializers.py
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.serializers import ContentTypeSerializer
from mayan.apps.rest_api import serializers
from mayan.apps.rest_api.relations import MultiKwargHyperlinkedIdentityField
from ..models.document_version_models import DocumentVersion
from ..models.document_version_page_models import DocumentVersionPage
class DocumentVersionPageSerializer(serializers.HyperlinkedModelSerializer):
content_type = ContentTypeSerializer(read_only=True)
content_type_id = serializers.IntegerField(
help_text=_('Content type ID of the source object for the page.'),
write_only=True
)
document_version_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_version.document.pk',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'document_version_id',
'lookup_url_kwarg': 'document_version_id',
}
),
view_name='rest_api:documentversion-detail'
)
image_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_version.document.pk',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'document_version_id',
'lookup_url_kwarg': 'document_version_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_version_page_id',
}
),
view_name='rest_api:documentversionpage-image'
)
url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_version.document.pk',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'document_version_id',
'lookup_url_kwarg': 'document_version_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_version_page_id',
}
),
view_name='rest_api:documentversionpage-detail'
)
class Meta:
fields = (
'content_type', 'content_type_id', 'document_version_id',
'document_version_url', 'id', 'image_url', 'object_id',
'page_number', 'url'
)
model = DocumentVersionPage
read_only_fields = (
'content_type', 'document_version_id', 'document_version_url',
'id', 'image_url', 'url'
)
class DocumentVersionSerializer(serializers.HyperlinkedModelSerializer):
document_url = serializers.HyperlinkedIdentityField(
lookup_field='document_id',
lookup_url_kwarg='document_id',
view_name='rest_api:document-detail'
)
export_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_id',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_version_id',
},
),
view_name='rest_api:documentversion-export'
)
page_list_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_id',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_version_id',
},
),
view_name='rest_api:documentversionpage-list'
)
pages_first = DocumentVersionPageSerializer(many=False, read_only=True)
url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_id',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_version_id',
},
),
view_name='rest_api:documentversion-detail'
)
class Meta:
fields = (
'active', 'comment', 'document_id', 'document_url', 'export_url',
'id', 'page_list_url', 'pages_first', 'timestamp', 'url'
)
model = DocumentVersion
read_only_fields = (
'document_id', 'document_url', 'export_url', 'id',
'page_list_url', 'pages_first', 'timestamp', 'url'
)
|
sacrebleu/metrics/helpers.py | jhcross/sacrebleu | 373 | 12680970 | """Various utility functions for word and character n-gram extraction."""
from collections import Counter
from typing import List, Tuple
def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]:
"""Extracts all ngrams (min_order <= n <= max_order) from a sentence.
:param line: A string sentence.
:param min_order: Minimum n-gram order.
:param max_order: Maximum n-gram order.
:return: a Counter object with n-grams counts and the sequence length.
"""
ngrams = []
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngrams.append(tuple(tokens[i: i + n]))
return Counter(ngrams), len(tokens)
def extract_word_ngrams(tokens: List[str], n: int) -> Counter:
"""Extracts n-grams with order `n` from a list of tokens.
:param tokens: A list of tokens.
:param n: The order of n-grams.
:return: a Counter object with n-grams counts.
"""
return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter:
"""Yields counts of character n-grams from a sentence.
:param line: A segment containing a sequence of words.
:param n: The order of the n-grams.
:param include_whitespace: If given, will not strip whitespaces from the line.
:return: a dictionary containing ngrams and counts
"""
if not include_whitespace:
line = ''.join(line.split())
return Counter([line[i:i + n] for i in range(len(line) - n + 1)])
def extract_all_char_ngrams(
line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]:
"""Extracts all character n-grams at once for convenience.
:param line: A segment containing a sequence of words.
:param max_order: The maximum order of the n-grams.
:param include_whitespace: If given, will not strip whitespaces from the line.
:return: a list of Counter objects containing ngrams and counts.
"""
counters = []
if not include_whitespace:
line = ''.join(line.split())
for n in range(1, max_order + 1):
ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)])
counters.append(ngrams)
return counters
|
py4web/utils/dbstore.py | DonaldMcC/py4web | 133 | 12680976 | from datetime import datetime, timedelta
class DBStore:
def __init__(self, db, name="py4web_session"):
self.__prerequisites__ = [db]
Field = db.Field
self.db = db
if not name in db.tables:
db.define_table(
name,
Field("rkey", "string"),
Field("rvalue", "text"),
Field("expiration", "integer"),
Field("created_on", "datetime"),
Field("expires_on", "datetime"),
)
db.commit()
self.table = db[name]
def get(self, key):
db, table, now = self.db, self.table, datetime.utcnow()
row = db(table.rkey == key).select().first()
if not row:
return None
if row.expiration:
row.update_record(expires_on=now + timedelta(row.expiration))
return row.rvalue
def set(self, key, value, expiration=None):
db, table, now = self.db, self.table, datetime.utcnow()
db(table.expires_on < now).delete()
row = db(table.rkey == key).select().first()
expires_on = (
now + timedelta(expiration) if expiration else datetime(2999, 12, 31)
)
if row:
row.update_record(
rvalue=value, expires_on=expires_on, expiration=expiration
)
else:
table.insert(
rkey=key,
rvalue=value,
expires_on=expires_on,
expiration=expiration,
ceated_on=None,
)
db.commit()
|
chitra/serve/api.py | rajatscibi/chitra | 158 | 12680979 | <reponame>rajatscibi/chitra
from typing import Callable, Dict, Optional
import uvicorn
from fastapi import FastAPI, File, UploadFile
from chitra.__about__ import documentation_url
from chitra.serve import schema
from chitra.serve.base import ModelServer
from chitra.serve.constants import IMAGE_CLF, OBJECT_DETECTION, QNA, TXT_CLF
class API(ModelServer):
def __init__(
self,
api_type: str,
model: Callable,
preprocess_fn: Optional[Callable] = None,
preprocess_conf: Optional[Dict] = None,
postprocess_fn: Optional[Callable] = None,
postprocess_conf: Optional[Dict] = None,
**kwargs,
):
"""
Creates FastAPI app for `api_type`
Args:
api_type: Type of the API. See `API.available_api_types()`
model: Any ML/DL model
preprocess_fn: Override Data Preprocessing Function, data will
be processed with this function
before calling model.
postprocess_fn: Override Data Postprocessing Function, model
output will be passed into this function.
**kwargs:
"""
super(API, self).__init__(
api_type, model, preprocess_fn, postprocess_fn, **kwargs
)
docs_url = kwargs.get("docs_url", "/docs")
title = kwargs.get("title", "Chitra Model Server 🔥")
desc = kwargs.get(
"description",
f"<a href={documentation_url}>Goto Chitra Docs</a> 🔗",
)
self.app: FastAPI = FastAPI(title=title, description=desc, docs_url=docs_url)
if not preprocess_conf:
preprocess_conf = {}
if not postprocess_conf:
postprocess_conf = {}
self.preprocess_conf = preprocess_conf
self.postprocess_conf = postprocess_conf
self.setup(**kwargs)
async def predict_image(self, file: UploadFile = File(...)):
preprocess_fn = self.data_processor.preprocess_fn
postprocess_fn = self.data_processor.postprocess_fn
x = preprocess_fn(await file.read())
x = self.model(x)
x = postprocess_fn(x)
return x
async def predict_text(self, data: schema.Query):
data_processor = self.data_processor
x = data.query
if data_processor.preprocess_fn:
x = data_processor.preprocess(x)
x = self.model(x)
if data_processor.postprocess_fn:
x = data_processor.postprocess(x)
return x
async def predict_question_answer(self, data: schema.QnARequest):
data_processor = self.data_processor
x = data.query, data.question
if data_processor.preprocess_fn:
x = data_processor.preprocess(x)
x = self.model(x)
if data_processor.postprocess_fn:
x = data_processor.postprocess(x)
return x
def setup(self, **_):
if self.api_type in (IMAGE_CLF, OBJECT_DETECTION):
self.app.post("/api/predict-image")(self.predict_image)
elif self.api_type == TXT_CLF:
self.app.post("/api/predict-text")(self.predict_text)
elif self.api_type == QNA:
self.app.post("/api/QnA")(self.predict_question_answer)
def run(self):
uvicorn.run(self.app)
def create_api(
model: Callable,
api_type: str = "IMAGE-CLASSIFICATION",
preprocess_fn: Callable = None,
preprocess_conf: Optional[Dict] = None,
postprocess_fn: Callable = None,
postprocess_conf: Optional[Dict] = None,
run: bool = False,
**kwargs,
) -> API:
"""
Launch FastAPI app
Args:
model: Any ML/DL model
api_type: Type of the API task, see `chitra.serve.get_available_api_types()`
preprocess_fn: Override default preprocessing function
preprocess_conf: Arguments for preprocessing function
postprocess_fn: Override default postprocessing function
postprocess_conf: Arguments for postprocessing function
run: Set True to run the app
**kwargs:
Returns:
Object of `chitra.serve.API` class
"""
api = API(
api_type,
model,
preprocess_fn=preprocess_fn,
preprocess_conf=preprocess_conf,
postprocess_fn=postprocess_fn,
postprocess_conf=postprocess_conf,
**kwargs,
)
if run:
api.run()
return api
|
pontoon/base/management/commands/heroku_deploy_setup.py | foss4/pontoon | 1,145 | 12680984 | <reponame>foss4/pontoon
import os
from urllib.parse import urlparse, urljoin
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from pontoon.base.models import Project, User
class Command(BaseCommand):
help = "Setup an instance of Pontoon deployed via Heroku Deploy."
def handle(self, *args, **options):
site_url = os.environ.get("SITE_URL")
app_host = urlparse(site_url).netloc
admin_email = os.environ.get("ADMIN_EMAIL")
admin_password = <PASSWORD>("ADMIN_PASSWORD")
User.objects.create_superuser(admin_email, admin_email, admin_password)
Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
Project.objects.filter(slug="pontoon-intro").update(
url=urljoin(site_url, "intro/")
)
|
stringsifter/lib/stats.py | noraj/stringsifter | 523 | 12681042 | <reponame>noraj/stringsifter
# Copyright (C) 2019 FireEye, Inc. All Rights Reserved.
"""
english letter probabilities
table from http://en.algoritmy.net/article/40379/Letter-frequency-English
"""
english_letter_probs_percent = [
['a', 8.167],
['b', 1.492],
['c', 2.782],
['d', 4.253],
['e', 12.702],
['f', 2.228],
['g', 2.015],
['h', 6.094],
['i', 6.966],
['j', 0.153],
['k', 0.772],
['l', 4.025],
['m', 2.406],
['n', 6.749],
['o', 7.507],
['p', 1.929],
['q', 0.095],
['r', 5.987],
['s', 6.327],
['t', 9.056],
['u', 2.758],
['v', 0.978],
['w', 2.360],
['x', 0.150],
['y', 1.974],
['z', 0.074]]
english_letter_probs = {lt: (per * 0.01) for lt, per in english_letter_probs_percent}
"""
Scrabble Scores
table from https://en.wikipedia.org/wiki/Scrabble_letter_distributions
"""
scrabble_dict = {"a": 1, "b": 3, "c": 3, "d": 2, "e": 1, "f": 4,
"g": 2, "h": 4, "i": 1, "j": 8, "k": 5, "l": 1,
"m": 3, "n": 1, "o": 1, "p": 3, "q": 10, "r": 1,
"s": 1, "t": 1, "u": 1, "v": 4, "w": 4, "x": 8,
"y": 4, "z": 10}
|
test/watchdog_test/test_get_current_local_nfs_mounts.py | openshift-bot/aws-efs-utils | 196 | 12681051 | <filename>test/watchdog_test/test_get_current_local_nfs_mounts.py
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import watchdog
MOUNT_FMT_LINE = '{address}:/ {mountpoint} {fs_type} {options} 0 0'
DEFAULT_OPTS = 'rw,port=12345'
def _create_mount_file(tmpdir, lines):
mount_file = tmpdir.join('mounts')
mount_file.write('\n'.join(lines))
return str(mount_file)
def test_no_mounts(tmpdir):
mount_file = _create_mount_file(tmpdir, [])
mounts = watchdog.get_current_local_nfs_mounts(mount_file)
assert {} == mounts
def test_no_local_mounts(tmpdir):
mount_file = _create_mount_file(tmpdir, [MOUNT_FMT_LINE.format(address='10.1.0.1', mountpoint='/mnt',
fs_type='nfs4', options=DEFAULT_OPTS)])
mounts = watchdog.get_current_local_nfs_mounts(mount_file)
assert {} == mounts
def test_no_local_nfs_mounts(tmpdir):
mount_file = _create_mount_file(tmpdir, [MOUNT_FMT_LINE.format(address='127.0.0.1', mountpoint='/mnt',
fs_type='ext4', options=DEFAULT_OPTS)])
mounts = watchdog.get_current_local_nfs_mounts(mount_file)
assert {} == mounts
def test_local_nfs_mount(tmpdir):
mount_file = _create_mount_file(tmpdir, [MOUNT_FMT_LINE.format(address='127.0.0.1', mountpoint='/mnt',
fs_type='nfs4', options=DEFAULT_OPTS)])
mounts = watchdog.get_current_local_nfs_mounts(mount_file)
assert 1 == len(mounts)
assert 'mnt.12345' in mounts
def test_local_nfs_mount_noresvport(tmpdir):
mount_file = _create_mount_file(tmpdir, [MOUNT_FMT_LINE.format(address='127.0.0.1', mountpoint='/mnt',
fs_type='nfs4', options='rw,noresvport,port=12345')])
mounts = watchdog.get_current_local_nfs_mounts(mount_file)
assert 1 == len(mounts)
assert 'mnt.12345' in mounts
|
tests/models/test_managers.py | operatorai/modelstore | 151 | 12681061 | <filename>tests/models/test_managers.py
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modelstore.models import managers
from modelstore.models.catboost import CatBoostManager
from modelstore.models.pytorch import PyTorchManager
from modelstore.models.pytorch_lightning import PyTorchLightningManager
from modelstore.models.sklearn import SKLearnManager
from modelstore.models.xgboost import XGBoostManager
def test_iter_libraries():
mgrs = {library: manager for library, manager in managers.iter_libraries()}
assert len(mgrs) == 16
assert isinstance(mgrs["sklearn"], SKLearnManager)
assert isinstance(mgrs["pytorch"], PyTorchManager)
assert isinstance(mgrs["xgboost"], XGBoostManager)
assert isinstance(mgrs["catboost"], CatBoostManager)
assert isinstance(mgrs["pytorch_lightning"], PyTorchLightningManager)
|
deephar/data/mpii.py | steuwe/deephar | 343 | 12681071 | import os
import numpy as np
import scipy.io as sio
from PIL import Image
from deephar.utils import *
def load_mpii_mat_annotation(filename):
mat = sio.loadmat(filename)
annot_tr = mat['annot_tr']
annot_val = mat['annot_val']
# Respect the order of TEST (0), TRAIN (1), and VALID (2)
rectidxs = [None, annot_tr[0,:], annot_val[0,:]]
images = [None, annot_tr[1,:], annot_val[1,:]]
annorect = [None, annot_tr[2,:], annot_val[2,:]]
return rectidxs, images, annorect
def serialize_annorect(rectidxs, annorect):
assert len(rectidxs) == len(annorect)
sample_list = []
for i in range(len(rectidxs)):
rec = rectidxs[i]
for j in range(rec.size):
idx = rec[j,0]-1 # Convert idx from Matlab
ann = annorect[i][idx,0]
annot = {}
annot['head'] = ann['head'][0,0][0]
annot['objpos'] = ann['objpos'][0,0][0]
annot['scale'] = ann['scale'][0,0][0,0]
annot['pose'] = ann['pose'][0,0]
annot['imgidx'] = i
sample_list.append(annot)
return sample_list
def calc_head_size(head_annot):
head = np.array([float(head_annot[0]), float(head_annot[1]),
float(head_annot[2]), float(head_annot[3])])
return 0.6 * np.linalg.norm(head[0:2] - head[2:4])
class MpiiSinglePerson(object):
"""Implementation of the MPII dataset for single person.
"""
def __init__(self, dataset_path, dataconf,
poselayout=pa16j2d,
remove_outer_joints=True):
self.dataset_path = dataset_path
self.dataconf = dataconf
self.poselayout = poselayout
self.remove_outer_joints = remove_outer_joints
self.load_annotations(os.path.join(dataset_path, 'annotations.mat'))
def load_annotations(self, filename):
try:
rectidxs, images, annorect = load_mpii_mat_annotation(filename)
self.samples = {}
self.samples[TEST_MODE] = [] # No samples for test
self.samples[TRAIN_MODE] = serialize_annorect(
rectidxs[TRAIN_MODE], annorect[TRAIN_MODE])
self.samples[VALID_MODE] = serialize_annorect(
rectidxs[VALID_MODE], annorect[VALID_MODE])
self.images = images
except:
warning('Error loading the MPII dataset!')
raise
def load_image(self, key, mode):
try:
annot = self.samples[mode][key]
image = self.images[mode][annot['imgidx']][0]
imgt = T(Image.open(os.path.join(
self.dataset_path, 'images', image)))
except:
warning('Error loading sample key/mode: %d/%d' % (key, mode))
raise
return imgt
def get_data(self, key, mode, fast_crop=False):
output = {}
if mode == TRAIN_MODE:
dconf = self.dataconf.random_data_generator()
else:
dconf = self.dataconf.get_fixed_config()
imgt = self.load_image(key, mode)
annot = self.samples[mode][key]
scale = 1.25*annot['scale']
objpos = np.array([annot['objpos'][0], annot['objpos'][1] + 12*scale])
objpos += scale * np.array([dconf['transx'], dconf['transy']])
winsize = 200 * dconf['scale'] * scale
winsize = (winsize, winsize)
output['bbox'] = objposwin_to_bbox(objpos, winsize)
if fast_crop:
"""Slightly faster method, but gives lower precision."""
imgt.crop_resize_rotate(objpos, winsize,
self.dataconf.crop_resolution, dconf['angle'])
else:
imgt.rotate_crop(dconf['angle'], objpos, winsize)
imgt.resize(self.dataconf.crop_resolution)
if dconf['hflip'] == 1:
imgt.horizontal_flip()
imgt.normalize_affinemap()
output['frame'] = normalize_channels(imgt.asarray(),
channel_power=dconf['chpower'])
p = np.empty((self.poselayout.num_joints, self.poselayout.dim))
p[:] = np.nan
head = annot['head']
p[self.poselayout.map_to_mpii, 0:2] = \
transform_2d_points(imgt.afmat, annot['pose'].T, transpose=True)
if imgt.hflip:
p = p[self.poselayout.map_hflip, :]
# Set invalid joints and NaN values as an invalid value
p[np.isnan(p)] = -1e9
v = np.expand_dims(get_visible_joints(p[:,0:2]), axis=-1)
if self.remove_outer_joints:
p[(v==0)[:,0],:] = -1e9
output['pose'] = np.concatenate((p, v), axis=-1)
output['headsize'] = calc_head_size(annot['head'])
output['afmat'] = imgt.afmat.copy()
return output
def get_shape(self, dictkey):
if dictkey == 'frame':
return self.dataconf.input_shape
if dictkey == 'pose':
return (self.poselayout.num_joints, self.poselayout.dim+1)
if dictkey == 'headsize':
return (1,)
if dictkey == 'afmat':
return (3, 3)
raise Exception('Invalid dictkey on get_shape!')
def get_length(self, mode):
return len(self.samples[mode])
|
hummingbot/connector/exchange/digifinex/digifinex_exchange.py | cardosofede/hummingbot | 542 | 12681081 | import asyncio
import logging
import math
import time
from decimal import Decimal
from typing import Any, AsyncIterable, Dict, List, Optional
from hummingbot.connector.exchange.digifinex import digifinex_utils
from hummingbot.connector.exchange.digifinex.digifinex_global import DigifinexGlobal
from hummingbot.connector.exchange.digifinex.digifinex_in_flight_order import DigifinexInFlightOrder
from hummingbot.connector.exchange.digifinex.digifinex_order_book_tracker import DigifinexOrderBookTracker
from hummingbot.connector.exchange.digifinex.digifinex_user_stream_tracker import DigifinexUserStreamTracker
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.common import OpenOrder
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
BuyOrderCreatedEvent,
MarketEvent,
MarketOrderFailureEvent,
OrderCancelledEvent,
OrderFilledEvent,
SellOrderCompletedEvent,
SellOrderCreatedEvent,
)
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.core.utils.estimate_fee import estimate_fee
from hummingbot.logger import HummingbotLogger
ctce_logger = None
s_decimal_NaN = Decimal("nan")
class DigifinexExchange(ExchangeBase):
"""
DigifinexExchange connects with digifinex.com exchange and provides order book pricing, user account tracking and
trading functionality.
"""
API_CALL_TIMEOUT = 10.0
SHORT_POLL_INTERVAL = 5.0
UPDATE_ORDER_STATUS_MIN_INTERVAL = 10.0
LONG_POLL_INTERVAL = 120.0
@classmethod
def logger(cls) -> HummingbotLogger:
global ctce_logger
if ctce_logger is None:
ctce_logger = logging.getLogger(__name__)
return ctce_logger
def __init__(self,
digifinex_api_key: str,
digifinex_secret_key: str,
trading_pairs: Optional[List[str]] = None,
trading_required: bool = True
):
"""
:param key: The API key to connect to private digifinex.com APIs.
:param secret: The API secret.
:param trading_pairs: The market trading pairs which to track order book data.
:param trading_required: Whether actual trading is needed.
"""
super().__init__()
self._trading_required = trading_required
self._trading_pairs = trading_pairs
self._global = DigifinexGlobal(digifinex_api_key, digifinex_secret_key)
# self._rest_api = DigifinexRestApi(self._digifinex_auth, self._http_client)
self._order_book_tracker = DigifinexOrderBookTracker(trading_pairs=trading_pairs)
self._user_stream_tracker = DigifinexUserStreamTracker(self._global, trading_pairs)
self._ev_loop = asyncio.get_event_loop()
self._poll_notifier = asyncio.Event()
self._last_timestamp = 0
self._in_flight_orders: Dict[str, DigifinexInFlightOrder] = {} # Dict[client_order_id:str, DigifinexInFlightOrder]
self._order_not_found_records = {} # Dict[client_order_id:str, count:int]
self._trading_rules = {} # Dict[trading_pair:str, TradingRule]
self._status_polling_task = None
self._user_stream_event_listener_task = None
self._trading_rules_polling_task = None
self._last_poll_timestamp = 0
@property
def name(self) -> str:
return "digifinex"
@property
def order_books(self) -> Dict[str, OrderBook]:
return self._order_book_tracker.order_books
@property
def trading_rules(self) -> Dict[str, TradingRule]:
return self._trading_rules
@property
def in_flight_orders(self) -> Dict[str, DigifinexInFlightOrder]:
return self._in_flight_orders
@property
def status_dict(self) -> Dict[str, bool]:
"""
A dictionary of statuses of various connector's components.
"""
return {
"order_books_initialized": self._order_book_tracker.ready,
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"trading_rule_initialized": len(self._trading_rules) > 0,
"user_stream_initialized":
self._user_stream_tracker.data_source.last_recv_time > 0 if self._trading_required else True,
}
@property
def ready(self) -> bool:
"""
:return True when all statuses pass, this might take 5-10 seconds for all the connector's components and
services to be ready.
"""
return all(self.status_dict.values())
@property
def limit_orders(self) -> List[LimitOrder]:
return [
in_flight_order.to_limit_order()
for in_flight_order in self._in_flight_orders.values()
]
@property
def tracking_states(self) -> Dict[str, any]:
"""
:return active in-flight orders in json format, is used to save in sqlite db.
"""
return {
key: value.to_json()
for key, value in self._in_flight_orders.items()
if not value.is_done
}
def restore_tracking_states(self, saved_states: Dict[str, any]):
"""
Restore in-flight orders from saved tracking states, this is st the connector can pick up on where it left off
when it disconnects.
:param saved_states: The saved tracking_states.
"""
self._in_flight_orders.update({
key: DigifinexInFlightOrder.from_json(value)
for key, value in saved_states.items()
})
def supported_order_types(self) -> List[OrderType]:
"""
:return a list of OrderType supported by this connector.
Note that Market order type is no longer required and will not be used.
"""
return [OrderType.LIMIT, OrderType.LIMIT_MAKER]
def start(self, clock: Clock, timestamp: float):
"""
This function is called automatically by the clock.
"""
super().start(clock, timestamp)
def stop(self, clock: Clock):
"""
This function is called automatically by the clock.
"""
super().stop(clock)
async def start_network(self):
"""
This function is required by NetworkIterator base class and is called automatically.
It starts tracking order book, polling trading rules,
updating statuses and tracking user data.
"""
self._order_book_tracker.start()
self._trading_rules_polling_task = safe_ensure_future(self._trading_rules_polling_loop())
if self._trading_required:
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._user_stream_tracker_task = safe_ensure_future(self._user_stream_tracker.start())
self._user_stream_event_listener_task = safe_ensure_future(self._user_stream_event_listener())
async def stop_network(self):
"""
This function is required by NetworkIterator base class and is called automatically.
"""
self._order_book_tracker.stop()
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._trading_rules_polling_task is not None:
self._trading_rules_polling_task.cancel()
self._trading_rules_polling_task = None
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._user_stream_tracker_task is not None:
self._user_stream_tracker_task.cancel()
self._user_stream_tracker_task = None
if self._user_stream_event_listener_task is not None:
self._user_stream_event_listener_task.cancel()
self._user_stream_event_listener_task = None
async def check_network(self) -> NetworkStatus:
"""
This function is required by NetworkIterator base class and is called periodically to check
the network connection. Simply ping the network (or call any light weight public API).
"""
try:
await self._global.rest_api.request("get", "ping")
except asyncio.CancelledError:
raise
except Exception as e:
_ = e
self.logger().exception('check_network', stack_info=True)
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
async def _trading_rules_polling_loop(self):
"""
Periodically update trading rule.
"""
while True:
try:
await self._update_trading_rules()
await asyncio.sleep(60)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(f"Unexpected error while fetching trading rules. Error: {str(e)}",
exc_info=True,
app_warning_msg="Could not fetch new trading rules from digifinex.com. "
"Check network connection.")
await asyncio.sleep(0.5)
async def _update_trading_rules(self):
instruments_info = await self._global.rest_api.request("get", path_url="markets")
self._trading_rules.clear()
self._trading_rules = self._format_trading_rules(instruments_info)
def _format_trading_rules(self, instruments_info: Dict[str, Any]) -> Dict[str, TradingRule]:
"""
Converts json API response into a dictionary of trading rules.
:param instruments_info: The json API response
:return A dictionary of trading rules.
Response Example:
{
"data": [{
"volume_precision": 4,
"price_precision": 2,
"market": "btc_usdt",
"min_amount": 2,
"min_volume": 0.0001
}],
"date": 1589873858,
"code": 0
}
"""
result = {}
for rule in instruments_info["data"]:
try:
trading_pair = digifinex_utils.convert_from_exchange_trading_pair(rule["market"])
price_decimals = Decimal(str(rule["price_precision"]))
quantity_decimals = Decimal(str(rule["volume_precision"]))
# E.g. a price decimal of 2 means 0.01 incremental.
price_step = Decimal("1") / Decimal(str(math.pow(10, price_decimals)))
quantity_step = Decimal("1") / Decimal(str(math.pow(10, quantity_decimals)))
result[trading_pair] = TradingRule(trading_pair,
min_price_increment=price_step,
min_base_amount_increment=quantity_step)
except Exception:
self.logger().error(f"Error parsing the trading pair rule {rule}. Skipping.", exc_info=True)
return result
def get_order_price_quantum(self, trading_pair: str, price: Decimal):
"""
Returns a price step, a minimum price increment for a given trading pair.
"""
trading_rule = self._trading_rules[trading_pair]
return trading_rule.min_price_increment
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal):
"""
Returns an order amount step, a minimum amount increment for a given trading pair.
"""
trading_rule = self._trading_rules[trading_pair]
return Decimal(trading_rule.min_base_amount_increment)
def get_order_book(self, trading_pair: str) -> OrderBook:
if trading_pair not in self._order_book_tracker.order_books:
raise ValueError(f"No order book exists for '{trading_pair}'.")
return self._order_book_tracker.order_books[trading_pair]
def buy(self, trading_pair: str, amount: Decimal, order_type=OrderType.MARKET,
price: Decimal = s_decimal_NaN, **kwargs) -> str:
"""
Buys an amount of base asset (of the given trading pair). This function returns immediately.
To see an actual order, you'll have to wait for BuyOrderCreatedEvent.
:param trading_pair: The market (e.g. BTC-USDT) to buy from
:param amount: The amount in base token value
:param order_type: The order type
:param price: The price (note: this is no longer optional)
:returns A new internal order id
"""
order_id: str = digifinex_utils.get_new_client_order_id(True, trading_pair)
safe_ensure_future(self._create_order(TradeType.BUY, order_id, trading_pair, amount, order_type, price))
return order_id
def sell(self, trading_pair: str, amount: Decimal, order_type=OrderType.MARKET,
price: Decimal = s_decimal_NaN, **kwargs) -> str:
"""
Sells an amount of base asset (of the given trading pair). This function returns immediately.
To see an actual order, you'll have to wait for SellOrderCreatedEvent.
:param trading_pair: The market (e.g. BTC-USDT) to sell from
:param amount: The amount in base token value
:param order_type: The order type
:param price: The price (note: this is no longer optional)
:returns A new internal order id
"""
order_id: str = digifinex_utils.get_new_client_order_id(False, trading_pair)
safe_ensure_future(self._create_order(TradeType.SELL, order_id, trading_pair, amount, order_type, price))
return order_id
def cancel(self, trading_pair: str, order_id: str):
"""
Cancel an order. This function returns immediately.
To get the cancellation result, you'll have to wait for OrderCancelledEvent.
:param trading_pair: The market (e.g. BTC-USDT) of the order.
:param order_id: The internal order id (also called client_order_id)
"""
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is None:
raise ValueError(f"Failed to cancel order - {order_id}. Order not found.")
if tracked_order.exchange_order_id is None:
self.ev_loop.run_until_complete(tracked_order.get_exchange_order_id())
safe_ensure_future(self._execute_cancel(tracked_order))
return order_id
async def _create_order(self,
trade_type: TradeType,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
price: Decimal):
"""
Calls create-order API end point to place an order, starts tracking the order and triggers order created event.
:param trade_type: BUY or SELL
:param order_id: Internal order id (also called client_order_id)
:param trading_pair: The market to place order
:param amount: The order amount (in base token value)
:param order_type: The order type
:param price: The order price
"""
if not order_type.is_limit_type():
raise Exception(f"Unsupported order type: {order_type}")
trading_rule = self._trading_rules[trading_pair]
amount = self.quantize_order_amount(trading_pair, amount)
price = self.quantize_order_price(trading_pair, price)
if amount < trading_rule.min_order_size:
raise ValueError(f"Buy order amount {amount} is lower than the minimum order size "
f"{trading_rule.min_order_size}.")
symbol = digifinex_utils.convert_to_exchange_trading_pair(trading_pair)
api_params = {"symbol": symbol,
"type": trade_type.name.lower(),
"price": f"{price:f}",
"amount": f"{amount:f}",
# "client_oid": order_id
}
if order_type is OrderType.LIMIT_MAKER:
api_params["post_only"] = 1
self.start_tracking_order(order_id,
None,
trading_pair,
trade_type,
price,
amount,
order_type
)
try:
order_result = await self._global.rest_api.request("post", "spot/order/new", api_params, True)
exchange_order_id = str(order_result["order_id"])
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is not None:
self.logger().info(f"Created {order_type.name} {trade_type.name} order {order_id} for "
f"{amount} {trading_pair}.")
tracked_order.update_exchange_order_id(exchange_order_id)
event_tag = MarketEvent.BuyOrderCreated if trade_type is TradeType.BUY else MarketEvent.SellOrderCreated
event_class = BuyOrderCreatedEvent if trade_type is TradeType.BUY else SellOrderCreatedEvent
self.trigger_event(event_tag,
event_class(
self.current_timestamp,
order_type,
trading_pair,
amount,
price,
order_id,
tracked_order.creation_timestamp,
))
except asyncio.CancelledError:
raise
except Exception as e:
self.stop_tracking_order(order_id)
self.logger().network(
f"Error submitting {trade_type.name} {order_type.name} order to Digifinex for "
f"{amount} {trading_pair} "
f"{price}.",
exc_info=True,
app_warning_msg=str(e)
)
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, order_type))
def start_tracking_order(self,
order_id: str,
exchange_order_id: str,
trading_pair: str,
trade_type: TradeType,
price: Decimal,
amount: Decimal,
order_type: OrderType):
"""
Starts tracking an order by simply adding it into _in_flight_orders dictionary.
"""
self._in_flight_orders[order_id] = DigifinexInFlightOrder(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
order_type=order_type,
trade_type=trade_type,
price=price,
amount=amount,
creation_timestamp=self.current_timestamp
)
def stop_tracking_order(self, order_id: str):
"""
Stops tracking an order by simply removing it from _in_flight_orders dictionary.
"""
if order_id in self._in_flight_orders:
del self._in_flight_orders[order_id]
async def _execute_cancel(self, o: DigifinexInFlightOrder) -> str:
"""
Executes order cancellation process by first calling cancel-order API. The API result doesn't confirm whether
the cancellation is successful, it simply states it receives the request.
:param trading_pair: The market trading pair
:param order_id: The internal order id
order.last_state to change to CANCELED
"""
try:
await self._global.rest_api.request(
"post",
"spot/order/cancel",
{"order_id": o.exchange_order_id},
True
)
if o.client_order_id in self._in_flight_orders:
self.trigger_event(MarketEvent.OrderCancelled,
OrderCancelledEvent(self.current_timestamp, o.client_order_id))
del self._in_flight_orders[o.client_order_id]
return o.exchange_order_id
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Failed to cancel order {o.exchange_order_id}: {str(e)}",
exc_info=True,
app_warning_msg=f"Failed to cancel the order {o.exchange_order_id} on Digifinex. "
f"Check API key and network connection."
)
async def _status_polling_loop(self):
"""
Periodically update user balances and order status via REST API. This serves as a fallback measure for web
socket API updates.
"""
while True:
try:
self._poll_notifier = asyncio.Event()
await self._poll_notifier.wait()
await safe_gather(
self._update_balances(),
self._update_order_status(),
)
self._last_poll_timestamp = self.current_timestamp
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(str(e), exc_info=True)
self.logger().network("Unexpected error while fetching account updates.",
exc_info=True,
app_warning_msg="Could not fetch account updates from Digifinex. "
"Check API key and network connection.")
await asyncio.sleep(0.5)
async def _update_balances(self):
local_asset_names = set(self._account_balances.keys())
remote_asset_names = set()
account_info = await self._global.rest_api.get_balance()
for account in account_info["list"]:
asset_name = account["currency"]
self._account_available_balances[asset_name] = Decimal(str(account["free"]))
self._account_balances[asset_name] = Decimal(str(account["total"]))
remote_asset_names.add(asset_name)
try:
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
except Exception as e:
self.logger().error(e)
async def _update_order_status(self):
"""
Calls REST API to get status update for each in-flight order.
"""
last_tick = int(self._last_poll_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL)
current_tick = int(self.current_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL)
if current_tick > last_tick and len(self._in_flight_orders) > 0:
tracked_orders = list(self._in_flight_orders.values())
tasks = []
for tracked_order in tracked_orders:
order_id = await tracked_order.get_exchange_order_id()
tasks.append(self._global.rest_api.request("get",
"spot/order/detail",
{"order_id": order_id},
True))
self.logger().debug(f"Polling for order status updates of {len(tasks)} orders.")
update_results = await safe_gather(*tasks, return_exceptions=True)
for update_result in update_results:
if isinstance(update_result, Exception):
raise update_result
if "data" not in update_result:
self.logger().info(f"_update_order_status result not in resp: {update_result}")
continue
order_data = update_result["data"]
self._process_rest_trade_details(order_data)
self._process_order_status(order_data.get('order_id'), order_data.get('status'))
def _process_order_status(self, exchange_order_id: str, status: int):
"""
Updates in-flight order and triggers cancellation or failure event if needed.
"""
tracked_order = self.find_exchange_order(exchange_order_id)
if tracked_order is None:
return
client_order_id = tracked_order.client_order_id
# Update order execution status
tracked_order.last_state = str(status)
if tracked_order.is_cancelled:
self.logger().info(f"Successfully canceled order {client_order_id}.")
self.trigger_event(MarketEvent.OrderCancelled,
OrderCancelledEvent(
self.current_timestamp,
client_order_id))
tracked_order.cancelled_event.set()
self.stop_tracking_order(client_order_id)
# elif tracked_order.is_failure:
# self.logger().info(f"The market order {client_order_id} has failed according to order status API. "
# f"Reason: {digifinex_utils.get_api_reason(order_msg['reason'])}")
# self.trigger_event(MarketEvent.OrderFailure,
# MarketOrderFailureEvent(
# self.current_timestamp,
# client_order_id,
# tracked_order.order_type
# ))
# self.stop_tracking_order(client_order_id)
def _process_rest_trade_details(self, order_detail_msg: Any):
for trade_msg in order_detail_msg['detail']:
"""
Updates in-flight order and trigger order filled event for trade message received. Triggers order completed
event if the total executed amount equals to the specified order amount.
"""
# for order in self._in_flight_orders.values():
# await order.get_exchange_order_id()
tracked_order = self.find_exchange_order(trade_msg['order_id'])
if tracked_order is None:
return
updated = tracked_order.update_with_rest_order_detail(trade_msg)
if not updated:
return
self.trigger_event(
MarketEvent.OrderFilled,
OrderFilledEvent(
self.current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
Decimal(str(trade_msg["executed_price"])),
Decimal(str(trade_msg["executed_amount"])),
estimate_fee(self.name, tracked_order.order_type in [OrderType.LIMIT, OrderType.LIMIT_MAKER]),
# TradeFee(0.0, [(trade_msg["fee_currency"], Decimal(str(trade_msg["fee"])))]),
exchange_trade_id=trade_msg["tid"]
)
)
if math.isclose(tracked_order.executed_amount_base, tracked_order.amount) or \
tracked_order.executed_amount_base >= tracked_order.amount:
tracked_order.last_state = "FILLED"
self.logger().info(f"The {tracked_order.trade_type.name} order "
f"{tracked_order.client_order_id} has completed "
f"according to order status API.")
event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \
else MarketEvent.SellOrderCompleted
event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \
else SellOrderCompletedEvent
self.trigger_event(event_tag,
event_class(self.current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.order_type))
self.stop_tracking_order(tracked_order.client_order_id)
def find_exchange_order(self, exchange_order_id: str):
for o in self._in_flight_orders.values():
if o.exchange_order_id == exchange_order_id:
return o
def _process_order_message_traded(self, order_msg):
tracked_order: DigifinexInFlightOrder = self.find_exchange_order(order_msg['id'])
if tracked_order is None:
return
(delta_trade_amount, delta_trade_price) = tracked_order.update_with_order_update(order_msg)
if not delta_trade_amount:
return
self.trigger_event(
MarketEvent.OrderFilled,
OrderFilledEvent(
self.current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
delta_trade_price,
delta_trade_amount,
estimate_fee(self.name, tracked_order.order_type in [OrderType.LIMIT, OrderType.LIMIT_MAKER]),
# TradeFee(0.0, [(trade_msg["fee_currency"], Decimal(str(trade_msg["fee"])))]),
exchange_trade_id=str(int(self._time() * 1e6))
)
)
if math.isclose(tracked_order.executed_amount_base, tracked_order.amount) or \
tracked_order.executed_amount_base >= tracked_order.amount:
tracked_order.last_state = "2"
self.logger().info(f"The {tracked_order.trade_type.name} order "
f"{tracked_order.client_order_id} has completed "
f"according to order status API.")
event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \
else MarketEvent.SellOrderCompleted
event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \
else SellOrderCompletedEvent
self.trigger_event(event_tag,
event_class(self.current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.order_type))
self.stop_tracking_order(tracked_order.client_order_id)
async def cancel_all(self, timeout_seconds: float):
"""
Cancels all in-flight orders and waits for cancellation results.
Used by bot's top level stop and exit commands (cancelling outstanding orders on exit)
:param timeout_seconds: The timeout at which the operation will be canceled.
:returns List of CancellationResult which indicates whether each order is successfully cancelled.
"""
if self._trading_pairs is None:
raise Exception("cancel_all can only be used when trading_pairs are specified.")
cancellation_results = []
try:
# for trading_pair in self._trading_pairs:
# await self._global.rest_api.request(
# "post",
# "private/cancel-all-orders",
# {"instrument_name": digifinex_utils.convert_to_exchange_trading_pair(trading_pair)},
# True
# )
open_orders = list(self._in_flight_orders.values())
for o in open_orders:
await self._execute_cancel(o)
for cl_order_id, tracked_order in self._in_flight_orders.items():
open_order = [o for o in open_orders if o.exchange_order_id == tracked_order.exchange_order_id]
if not open_order:
cancellation_results.append(CancellationResult(cl_order_id, True))
# self.trigger_event(MarketEvent.OrderCancelled,
# OrderCancelledEvent(self.current_timestamp, cl_order_id))
else:
cancellation_results.append(CancellationResult(cl_order_id, False))
except Exception:
self.logger().network(
"Failed to cancel all orders.",
exc_info=True,
app_warning_msg="Failed to cancel all orders on Digifinex. Check API key and network connection."
)
return cancellation_results
def tick(self, timestamp: float):
"""
Is called automatically by the clock for each clock's tick (1 second by default).
It checks if status polling task is due for execution.
"""
now = time.time()
poll_interval = (self.SHORT_POLL_INTERVAL
if now - self._user_stream_tracker.last_recv_time > 60.0
else self.LONG_POLL_INTERVAL)
last_tick = int(self._last_timestamp / poll_interval)
current_tick = int(timestamp / poll_interval)
if current_tick > last_tick:
if not self._poll_notifier.is_set():
self._poll_notifier.set()
self._last_timestamp = timestamp
def get_fee(self,
base_currency: str,
quote_currency: str,
order_type: OrderType,
order_side: TradeType,
amount: Decimal,
price: Decimal = s_decimal_NaN,
is_maker: Optional[bool] = None) -> AddedToCostTradeFee:
"""
To get trading fee, this function is simplified by using fee override configuration. Most parameters to this
function are ignore except order_type. Use OrderType.LIMIT_MAKER to specify you want trading fee for
maker order.
"""
is_maker = order_type is OrderType.LIMIT_MAKER
return AddedToCostTradeFee(percent=self.estimate_fee_pct(is_maker))
async def _iter_user_event_queue(self) -> AsyncIterable[Dict[str, any]]:
while True:
try:
yield await self._user_stream_tracker.user_stream.get()
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unknown error. Retrying after 1 seconds.",
exc_info=True,
app_warning_msg="Could not fetch user events from Digifinex. Check API key and network connection."
)
await asyncio.sleep(1.0)
async def _user_stream_event_listener(self):
"""
Listens to message in _user_stream_tracker.user_stream queue. The messages are put in by
DigifinexAPIUserStreamDataSource.
"""
async for event_message in self._iter_user_event_queue():
try:
if "method" not in event_message:
continue
channel = event_message["method"]
# if "user.trade" in channel:
# for trade_msg in event_message["result"]["data"]:
# await self._process_trade_message(trade_msg)
if "order.update" in channel:
for order_msg in event_message["params"]:
self._process_order_status(order_msg['id'], order_msg['status'])
self._process_order_message_traded(order_msg)
elif channel == "balance.update":
balances = event_message["params"]
for balance_entry in balances:
asset_name = balance_entry["currency"]
self._account_balances[asset_name] = Decimal(str(balance_entry["total"]))
self._account_available_balances[asset_name] = Decimal(str(balance_entry["free"]))
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error in user stream listener loop.", exc_info=True)
await asyncio.sleep(5.0)
async def get_open_orders(self) -> List[OpenOrder]:
result = await self._global.rest_api.request(
"get",
"spot/order/current",
{},
True
)
ret_val = []
for order in result["data"]:
# if digifinex_utils.HBOT_BROKER_ID not in order["client_oid"]:
# continue
if order["type"] not in ["buy", "sell"]:
raise Exception(f"Unsupported order type {order['type']}")
ret_val.append(
OpenOrder(
client_order_id=None,
trading_pair=digifinex_utils.convert_from_exchange_trading_pair(order["symbol"]),
price=Decimal(str(order["price"])),
amount=Decimal(str(order["amount"])),
executed_amount=Decimal(str(order["executed_amount"])),
status=order["status"],
order_type=OrderType.LIMIT,
is_buy=True if order["type"] == "buy" else False,
time=int(order["created_date"]),
exchange_order_id=order["order_id"]
)
)
return ret_val
|
dart_fss/xbrl/table.py | dveamer/dart-fss | 243 | 12681131 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import re
import pandas as pd
from pandas import DataFrame
from dateutil.relativedelta import relativedelta
from arelle.ModelXbrl import ModelXbrl
from arelle import XbrlConst
from dart_fss.utils import str_to_regex
from dart_fss.xbrl.helper import (cls_label_check, get_label_list,
cls_merge_type, cls_datetime_check,
get_max_depth, get_value_from_dataset,
generate_df_columns, generate_df_rows,
flatten, get_title)
class Table(object):
""" XBRL Table
XBRL 파일에서 추출된 데이터를 기반으로 재무제표에 관한 정보를 담고 있는 클래스
Attributes
----------
parent: str
로드한 파일 이름
xbrl: ModelXbrl
arelle Xbrl 클래스
"""
def __init__(self, parent, xbrl, code, definition, uri):
self.parent = parent
self.code = code
self.definition = definition
self.uri = uri
self._xbrl = xbrl
self._facts = None
self._dataset = None
self._cls = None
self._labels = None
@property
def facts(self):
"""list of modelFact: """
if self._facts is None:
arcrole = XbrlConst.parentChild
relation = self._xbrl.relationshipSet(arcrole, self.uri)
facts = []
for fact in self._xbrl.facts:
if relation.fromModelObject(fact.concept) \
or relation.toModelObject(fact.concept):
facts.append(fact)
self._facts = facts
return self._facts
@property
def dataset(self):
"""dict of modelFact: """
if self._dataset is None:
dataset = dict()
for fact in self.facts:
object_id = fact.context.objectId()
if dataset.get(object_id) is None:
dataset[object_id] = []
dataset[object_id].append(fact)
self._dataset = dataset
return self._dataset
@property
def cls(self):
"""classification 반환"""
if self._cls is None:
self._get_cls()
return self._cls
def cls_filter(self, start_dt=None, end_dt=None, label=None):
""" classification 필터링 함수
Parameters
----------
start_dt: str
검색 시작 일자
end_dt: str
검색 종료 일자
label: str
포함할 label 명
Returns
-------
list of cls
필터된 classification
"""
return [item for item in self.cls
if cls_datetime_check(item, start_dt, end_dt) and cls_label_check(item, label)]
def _get_cls(self):
""" classification 정보 추출 함수"""
contexts = set()
for data in self.facts:
context = data.context
contexts.add(context)
cls = list()
for context in contexts:
object_id = context.objectId()
# data가 없을때 무시
if len(self.dataset[object_id]) < 1:
continue
instant_datetime = None
start_datetime = None
end_datetime = None
if context.isInstantPeriod is True:
instant_datetime = context.instantDatetime - relativedelta(days=1)
else:
start_datetime = context.startDatetime
end_datetime = context.endDatetime - relativedelta(days=1)
label = dict()
dims = context.qnameDims
if len(dims) > 0:
for dimQname in sorted(dims.keys(), key=lambda d: str(d), reverse=True):
dim_value = dims[dimQname]
ko = dim_value.member.label(lang='ko')
ko = re.sub(r'\[.*?\]', '', ko)
en = dim_value.member.label(lang='en')
en = re.sub(r'\[.*?\]', '', en)
label[dimQname] = {
'ko': ko,
'en': en
}
_cls = {
'cls_id': object_id,
'instant_datetime': instant_datetime,
'start_datetime': start_datetime,
'end_datetime': end_datetime,
'label': label
}
cls.append(_cls)
cls.sort(key=lambda x: x.get('instant_datetime') or x.get('start_datetime'), reverse=True)
self._cls = cls
return self._cls
@property
def labels(self):
"""labels 반환"""
if self._labels is None:
arcrole = XbrlConst.parentChild
relationship_set = self._xbrl.relationshipSet(arcrole, self.uri)
root_concept = relationship_set.rootConcepts[0]
labels = get_label_list(relationship_set, root_concept)
self._labels = labels
return self._labels
def to_DataFrame(self, cls=None, lang='ko', start_dt=None, end_dt=None,
label=None, show_abstract=False, show_class=True, show_depth=10,
show_concept=True, separator=True):
""" Pandas DataFrame으로 변환하는 함수
Parameters
----------
cls: dict, optional
classification
lang: str, optional
'ko' 한글 or 'en' 영문
start_dt: str, optional
검색 시작 일자
end_dt: str, optional
검색 종료 일자
label: str, optional
Column Label에 포함될 단어
show_abstract: bool, optional
abtract 표시 여부
show_class: bool, optional
class 표시여부
show_depth: int, optional
class 표시 깊이
show_concept: bool, optional
concept_id 표시 여부
separator: bool, optional
숫자 첫단위 표시 여부
Returns
-------
DataFrame
재무제표 DataFrame
"""
if cls is None:
cls = self.cls_filter(start_dt, end_dt, label)
cls = cls_merge_type(cls)
depth = get_max_depth(self.labels, show_abstract=show_abstract)
depth = depth if depth < show_depth else show_depth
table = self.parent.get_table_by_code('d999004')
unit = get_value_from_dataset(table.cls, table.dataset, 'dart-gcd_EntityReportingCurrencyISOCode')
definition = self.definition + ' (Unit: {})'.format(unit[0])
columns = generate_df_columns(definition, cls, depth, lang,
show_concept=show_concept, show_class=show_class)
if separator:
pd.options.display.float_format = '{:,}'.format
else:
pd.options.display.float_format = '{:}'.format
df = pd.DataFrame(columns=columns)
rows = generate_df_rows(self.labels, cls, self.dataset, depth, lang=lang,
show_abstract=show_abstract, show_concept=show_concept, show_class=show_class)
data = flatten(rows)
for idx, r in enumerate(data):
df.loc[idx] = r
regex_pass = str_to_regex('concept_id OR label_ko OR label_en OR class')
df_count = df.count()
drop_columns = []
for key, count in df_count.items():
if regex_pass.search(' '.join(key[1])):
pass
elif count <= 1:
drop_columns.append(key)
df = df.drop(drop_columns, axis=1)
return df
def get_value_by_concept_id(self, concept_id, start_dt=None, end_dt=None, label=None, lang='en'):
""" concept_id을 이용하여 값을 찾아 주는 함수
Parameters
----------
concept_id: str
재무제표 계정의 concept_id
start_dt: str
검색 시작 일자
end_dt: str
검색 종료 일자
label: str
검색 포함 label
lang: str
'ko' 한글 / 'en' 영문
Returns
-------
dict of (str or float)
{ column 이름 : 값 }
"""
cls = self.cls_filter(start_dt, end_dt, label)
data = get_value_from_dataset(classification=cls, dataset=self.dataset, concept_id=concept_id)
results = dict()
for c, d in zip(cls, data):
title = get_title(c, lang=lang)
results[title] = d
return results
def __repr__(self):
info = {
'code': self.code,
'definition': self.definition
}
return str(info) |
tests/nnapi/specs/V1_2/rsqrt_4D_float_nnfw.mod.py | bogus-sudo/ONE-1 | 255 | 12681141 | # model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
i3 = Output("op3", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
model = model.Operation("RSQRT", i1).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 36.0, 2.0, 90, 4.0, 16.0, 25.0, 100.0,
23.0, 19.0, 40.0, 256.0, 4.0, 43.0, 8.0, 36.0]}
output0 = {i3: # output 0
[1.0, 0.166667, 0.70710678118, 0.105409, 0.5, 0.25, 0.2, 0.1,
0.208514, 0.229416, 0.158114, 0.0625, 0.5, 0.152499, 0.35355339059, 0.166667]}
# Instantiate an example
Example((input0, output0))
|
endless_pagination/tests/integration/test_callbacks.py | bjinwright/django-endless-pagination | 124 | 12681143 | <reponame>bjinwright/django-endless-pagination<gh_stars>100-1000
"""Javascript callbacks integration tests."""
from __future__ import unicode_literals
from endless_pagination.tests.integration import SeleniumTestCase
class CallbacksTest(SeleniumTestCase):
view_name = 'callbacks'
def notifications_loaded(self, driver):
return driver.find_elements_by_id('fragment')
def assertNotificationsEqual(self, notifications):
"""Assert the given *notifications* equal the ones in the DOM."""
self.wait_ajax().until(self.notifications_loaded)
find = self.selenium.find_element_by_id
for key, value in notifications.items():
self.assertEqual(value, find(key).text)
def test_on_click(self):
# Ensure the onClick callback is correctly called.
self.get()
self.click_link(2)
self.assertNotificationsEqual({
'onclick': 'Object 1',
'onclick-label': '2',
'onclick-url': '/callbacks/?page=2',
'onclick-key': 'page',
})
def test_on_completed(self):
# Ensure the onCompleted callback is correctly called.
self.get(page=10)
self.click_link(1)
self.assertNotificationsEqual({
'oncompleted': 'Object 1',
'oncompleted-label': '1',
'oncompleted-url': '/callbacks/',
'oncompleted-key': 'page',
'fragment': 'Object 3',
})
|
src/oci/database_management/models/awr_db_parameter_summary.py | Manny27nyc/oci-python-sdk | 249 | 12681144 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AwrDbParameterSummary(object):
"""
The summary of the AWR change history data for a single database parameter.
"""
def __init__(self, **kwargs):
"""
Initializes a new AwrDbParameterSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this AwrDbParameterSummary.
:type name: str
:param instance_number:
The value to assign to the instance_number property of this AwrDbParameterSummary.
:type instance_number: int
:param begin_value:
The value to assign to the begin_value property of this AwrDbParameterSummary.
:type begin_value: str
:param end_value:
The value to assign to the end_value property of this AwrDbParameterSummary.
:type end_value: str
:param is_changed:
The value to assign to the is_changed property of this AwrDbParameterSummary.
:type is_changed: bool
:param value_modified:
The value to assign to the value_modified property of this AwrDbParameterSummary.
:type value_modified: str
:param is_default:
The value to assign to the is_default property of this AwrDbParameterSummary.
:type is_default: bool
"""
self.swagger_types = {
'name': 'str',
'instance_number': 'int',
'begin_value': 'str',
'end_value': 'str',
'is_changed': 'bool',
'value_modified': 'str',
'is_default': 'bool'
}
self.attribute_map = {
'name': 'name',
'instance_number': 'instanceNumber',
'begin_value': 'beginValue',
'end_value': 'endValue',
'is_changed': 'isChanged',
'value_modified': 'valueModified',
'is_default': 'isDefault'
}
self._name = None
self._instance_number = None
self._begin_value = None
self._end_value = None
self._is_changed = None
self._value_modified = None
self._is_default = None
@property
def name(self):
"""
**[Required]** Gets the name of this AwrDbParameterSummary.
The name of the parameter.
:return: The name of this AwrDbParameterSummary.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this AwrDbParameterSummary.
The name of the parameter.
:param name: The name of this AwrDbParameterSummary.
:type: str
"""
self._name = name
@property
def instance_number(self):
"""
Gets the instance_number of this AwrDbParameterSummary.
The database instance number.
:return: The instance_number of this AwrDbParameterSummary.
:rtype: int
"""
return self._instance_number
@instance_number.setter
def instance_number(self, instance_number):
"""
Sets the instance_number of this AwrDbParameterSummary.
The database instance number.
:param instance_number: The instance_number of this AwrDbParameterSummary.
:type: int
"""
self._instance_number = instance_number
@property
def begin_value(self):
"""
Gets the begin_value of this AwrDbParameterSummary.
The parameter value when the period began.
:return: The begin_value of this AwrDbParameterSummary.
:rtype: str
"""
return self._begin_value
@begin_value.setter
def begin_value(self, begin_value):
"""
Sets the begin_value of this AwrDbParameterSummary.
The parameter value when the period began.
:param begin_value: The begin_value of this AwrDbParameterSummary.
:type: str
"""
self._begin_value = begin_value
@property
def end_value(self):
"""
Gets the end_value of this AwrDbParameterSummary.
The parameter value when the period ended.
:return: The end_value of this AwrDbParameterSummary.
:rtype: str
"""
return self._end_value
@end_value.setter
def end_value(self, end_value):
"""
Sets the end_value of this AwrDbParameterSummary.
The parameter value when the period ended.
:param end_value: The end_value of this AwrDbParameterSummary.
:type: str
"""
self._end_value = end_value
@property
def is_changed(self):
"""
Gets the is_changed of this AwrDbParameterSummary.
Indicates whether the parameter value changed within the period.
:return: The is_changed of this AwrDbParameterSummary.
:rtype: bool
"""
return self._is_changed
@is_changed.setter
def is_changed(self, is_changed):
"""
Sets the is_changed of this AwrDbParameterSummary.
Indicates whether the parameter value changed within the period.
:param is_changed: The is_changed of this AwrDbParameterSummary.
:type: bool
"""
self._is_changed = is_changed
@property
def value_modified(self):
"""
Gets the value_modified of this AwrDbParameterSummary.
Indicates whether the parameter has been modified after instance startup:
- MODIFIED - Parameter has been modified with ALTER SESSION
- SYSTEM_MOD - Parameter has been modified with ALTER SYSTEM (which causes all the currently logged in sessions\u2019 values to be modified)
- FALSE - Parameter has not been modified after instance startup
:return: The value_modified of this AwrDbParameterSummary.
:rtype: str
"""
return self._value_modified
@value_modified.setter
def value_modified(self, value_modified):
"""
Sets the value_modified of this AwrDbParameterSummary.
Indicates whether the parameter has been modified after instance startup:
- MODIFIED - Parameter has been modified with ALTER SESSION
- SYSTEM_MOD - Parameter has been modified with ALTER SYSTEM (which causes all the currently logged in sessions\u2019 values to be modified)
- FALSE - Parameter has not been modified after instance startup
:param value_modified: The value_modified of this AwrDbParameterSummary.
:type: str
"""
self._value_modified = value_modified
@property
def is_default(self):
"""
Gets the is_default of this AwrDbParameterSummary.
Indicates whether the parameter value in the end snapshot is the default.
:return: The is_default of this AwrDbParameterSummary.
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""
Sets the is_default of this AwrDbParameterSummary.
Indicates whether the parameter value in the end snapshot is the default.
:param is_default: The is_default of this AwrDbParameterSummary.
:type: bool
"""
self._is_default = is_default
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
powerline_shell/segments/php_version.py | Dakedres/powerline-shell | 2,656 | 12681149 | import subprocess
from ..utils import ThreadedSegment, decode
class Segment(ThreadedSegment):
def run(self):
self.version = None
try:
output = decode(
subprocess.check_output(['php', '-r', 'echo PHP_VERSION;'],
stderr=subprocess.STDOUT))
self.version = output.split('-')[0] if '-' in output else output
except OSError:
self.version = None
def add_to_powerline(self):
self.join()
if not self.version:
return
# FIXME no hard-coded colors
self.powerline.append(" " + self.version + " ", 15, 4)
|
gslib/commands/hmac.py | stanhu/gsutil | 649 | 12681150 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of HMAC key management command for GCS.
NOTE: Any modification to this file or corresponding HMAC logic
should be submitted in its own PR and release to avoid
concurrency issues in testing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.metrics import LogCommandParams
from gslib.project_id import PopulateProjectId
from gslib.utils.cloud_api_helper import GetCloudApiInstance
from gslib.utils.text_util import InsistAscii
_CREATE_SYNOPSIS = """
gsutil hmac create [-p <project>] <service_account_email>
"""
_DELETE_SYNOPSIS = """
gsutil hmac delete [-p <project>] <access_id>
"""
_GET_SYNOPSIS = """
gsutil hmac get [-p <project>] <access_id>
"""
_LIST_SYNOPSIS = """
gsutil hmac list [-a] [-l] [-p <project>] [-u <service_account_email>]
"""
_UPDATE_SYNOPSIS = """
gsutil hmac update -s (ACTIVE|INACTIVE) [-e <etag>] [-p <project>] <access_id>
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The ``hmac create`` command creates an HMAC key for the specified service
account:
gsutil hmac create <EMAIL>
The secret key material is only available upon creation, so be sure to store
the returned secret along with the access_id.
<B>CREATE OPTIONS</B>
The ``create`` sub-command has the following option
-p <project> Specify the ID or number of the project in which
to create a key.
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The ``hmac delete`` command permanently deletes the specified HMAC key:
gsutil hmac delete GOOG56JBMFZX6PMPTQ62VD2
Note that keys must be updated to be in the ``INACTIVE`` state before they can be
deleted.
<B>DELETE OPTIONS</B>
The ``delete`` sub-command has the following option
-p <project> Specify the ID or number of the project from which to
delete a key.
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``hmac get`` command retrieves the specified HMAC key's metadata:
gsutil hmac get GOOG56JBMFZX6PMPTQ62VD2
Note that there is no option to retrieve a key's secret material after it has
been created.
<B>GET OPTIONS</B>
The ``get`` sub-command has the following option
-p <project> Specify the ID or number of the project from which to
get a key.
"""
_LIST_DESCRIPTION = """
<B>LIST</B>
The ``hmac list`` command lists the HMAC key metadata for keys in the
specified project. If no project is specified in the command, the default
project is used.
<B>LIST OPTIONS</B>
The ``list`` sub-command has the following options
-a Show all keys, including recently deleted
keys.
-l Use long listing format. Shows each key's full
metadata excluding the secret.
-p <project> Specify the ID or number of the project from
which to list keys.
-u <service_account_email> Filter keys for a single service account.
"""
_UPDATE_DESCRIPTION = """
<B>UPDATE</B>
The ``hmac update`` command sets the state of the specified key:
gsutil hmac update -s INACTIVE -e M42da= GOOG56JBMFZX6PMPTQ62VD2
Valid state arguments are ``ACTIVE`` and ``INACTIVE``. To set a key to state
``DELETED``, use the ``hmac delete`` command on an ``INACTIVE`` key. If an etag
is set in the command, it will only succeed if the provided etag matches the etag
of the stored key.
<B>UPDATE OPTIONS</B>
The ``update`` sub-command has the following options
-s <ACTIVE|INACTIVE> Sets the state of the specified key to either
``ACTIVE`` or ``INACTIVE``.
-e <etag> If provided, the update will only be performed
if the specified etag matches the etag of the
stored key.
-p <project> Specify the ID or number of the project in
which to update a key.
"""
_SYNOPSIS = (_CREATE_SYNOPSIS + _DELETE_SYNOPSIS.lstrip('\n') +
_GET_SYNOPSIS.lstrip('\n') + _LIST_SYNOPSIS.lstrip('\n') +
_UPDATE_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = """
You can use the ``hmac`` command to interact with service account `HMAC keys
<https://cloud.google.com/storage/docs/authentication/hmackeys>`_.
The ``hmac`` command has five sub-commands:
""" + '\n'.join([
_CREATE_DESCRIPTION,
_DELETE_DESCRIPTION,
_GET_DESCRIPTION,
_LIST_DESCRIPTION,
_UPDATE_DESCRIPTION,
])
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_VALID_UPDATE_STATES = ['INACTIVE', 'ACTIVE']
_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
_create_help_text = CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION)
_delete_help_text = CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_list_help_text = CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION)
_update_help_text = CreateHelpText(_UPDATE_SYNOPSIS, _UPDATE_DESCRIPTION)
def _AccessIdException(command_name, subcommand, synopsis):
return CommandException(
'%s %s requires an Access ID to be specified as the last argument.\n%s' %
(command_name, subcommand, synopsis))
def _KeyMetadataOutput(metadata):
"""Format the key metadata for printing to the console."""
def FormatInfo(name, value, new_line=True):
"""Format the metadata name-value pair into two aligned columns."""
width = 22
info_str = '\t%-*s %s' % (width, name + ':', value)
if new_line:
info_str += '\n'
return info_str
message = 'Access ID %s:\n' % metadata.accessId
message += FormatInfo('State', metadata.state)
message += FormatInfo('Service Account', metadata.serviceAccountEmail)
message += FormatInfo('Project', metadata.projectId)
message += FormatInfo('Time Created',
metadata.timeCreated.strftime(_TIME_FORMAT))
message += FormatInfo('Time Last Updated',
metadata.updated.strftime(_TIME_FORMAT))
message += FormatInfo('Etag', metadata.etag, new_line=False)
return message
class HmacCommand(Command):
"""Implementation of gsutil hmac command."""
command_spec = Command.CreateCommandSpec(
'hmac',
min_args=1,
max_args=8,
supported_sub_args='ae:lp:s:u:',
file_url_ok=True,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
usage_synopsis=_SYNOPSIS,
argparse_arguments={
'create': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'delete': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'get': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'list': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'update': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
},
)
help_spec = Command.HelpSpec(
help_name='hmac',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary=('CRUD operations on service account HMAC keys.'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'delete': _delete_help_text,
'get': _get_help_text,
'list': _list_help_text,
'update': _update_help_text,
})
def _CreateHmacKey(self, thread_state=None):
"""Creates HMAC key for a service account."""
if self.args:
self.service_account_email = self.args[0]
else:
err_msg = ('%s %s requires a service account to be specified as the '
'last argument.\n%s')
raise CommandException(
err_msg %
(self.command_name, self.action_subcommand, _CREATE_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.CreateHmacKey(self.project_id,
self.service_account_email,
provider='gs')
print('%-12s %s' % ('Access ID:', response.metadata.accessId))
print('%-12s %s' % ('Secret:', response.secret))
def _DeleteHmacKey(self, thread_state=None):
"""Deletes an HMAC key."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_DELETE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
gsutil_api.DeleteHmacKey(self.project_id, access_id, provider='gs')
def _GetHmacKey(self, thread_state=None):
"""Gets HMAC key from its Access Id."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_GET_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.GetHmacKey(self.project_id, access_id, provider='gs')
print(_KeyMetadataOutput(response))
def _ListHmacKeys(self, thread_state=None):
"""Lists HMAC keys for a project or service account."""
if self.args:
raise CommandException(
'%s %s received unexpected arguments.\n%s' %
(self.command_name, self.action_subcommand, _LIST_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.ListHmacKeys(self.project_id,
self.service_account_email,
self.show_all,
provider='gs')
short_list_format = '%s\t%-12s %s'
if self.long_list:
for item in response:
print(_KeyMetadataOutput(item))
print()
else:
for item in response:
print(short_list_format %
(item.accessId, item.state, item.serviceAccountEmail))
def _UpdateHmacKey(self, thread_state=None):
"""Update an HMAC key's state."""
if not self.state:
raise CommandException(
'A state flag must be supplied for %s %s\n%s' %
(self.command_name, self.action_subcommand, _UPDATE_SYNOPSIS))
elif self.state not in _VALID_UPDATE_STATES:
raise CommandException('The state flag value must be one of %s' %
', '.join(_VALID_UPDATE_STATES))
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_UPDATE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.UpdateHmacKey(self.project_id,
access_id,
self.state,
self.etag,
provider='gs')
print(_KeyMetadataOutput(response))
def RunCommand(self):
"""Command entry point for the hmac command."""
if self.gsutil_api.GetApiSelector(provider='gs') != ApiSelector.JSON:
raise CommandException(
'The "hmac" command can only be used with the GCS JSON API')
self.action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
LogCommandParams(sub_opts=self.sub_opts)
self.service_account_email = None
self.state = None
self.show_all = False
self.long_list = False
self.etag = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-u':
self.service_account_email = a
elif o == '-p':
# Project IDs are sent as header values when using gs and s3 XML APIs.
InsistAscii(a, 'Invalid non-ASCII character found in project ID')
self.project_id = a
elif o == '-s':
self.state = a
elif o == '-a':
self.show_all = True
elif o == '-l':
self.long_list = True
elif o == '-e':
self.etag = a
if not self.project_id:
self.project_id = PopulateProjectId(None)
method_for_arg = {
'create': self._CreateHmacKey,
'delete': self._DeleteHmacKey,
'get': self._GetHmacKey,
'list': self._ListHmacKeys,
'update': self._UpdateHmacKey,
}
if self.action_subcommand not in method_for_arg:
raise CommandException('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help hmac".' %
(self.action_subcommand, self.command_name))
LogCommandParams(subcommands=[self.action_subcommand])
method_for_arg[self.action_subcommand]()
return 0
|
scripts/gen_gcov_files.py | maxvankessel/zephyr | 6,224 | 12681151 | #!/usr/bin/env python3
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""This script will parse the serial console log file and create the required
gcda files.
"""
import argparse
import os
import re
def retrieve_data(input_file):
extracted_coverage_info = {}
capture_data = False
reached_end = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
reached_end = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
# Remove the leading delimiter "*"
file_name = line.split("<")[0][1:]
# Remove the trailing new line char
hex_dump = line.split("<")[1][:-1]
extracted_coverage_info.update({file_name: hex_dump})
if not reached_end:
print("incomplete data captured from %s" % input_file)
return extracted_coverage_info
def create_gcda_files(extracted_coverage_info):
if args.verbose:
print("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
if args.verbose:
print(filename)
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = filename[:-4] + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input", required=True,
help="Input dump data")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Verbose Output")
args = parser.parse_args()
def main():
parse_args()
input_file = args.input
extracted_coverage_info = retrieve_data(input_file)
create_gcda_files(extracted_coverage_info)
if __name__ == '__main__':
main()
|
lingvo/core/sendrecv.py | Harshs27/lingvo | 2,611 | 12681160 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Send/Recv ops.
The following _Send()/_Recv() are adapted from python op wrappers
generated by python_op_gen_main. python_op_gen_main.cc's
PrintAllPythonOps needs to be updated to export internal ops.
"""
from lingvo import compat as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla
# pylint: enable=g-direct-tensorflow-import
def _TpuCore(device):
"""Returns the TPU core represented by <device>, or -1 if not TPU."""
prefix = "device:TPU_REPLICATED_CORE:"
if prefix in device:
return int(device[len(prefix):])
return -1
class Channel:
"""A communication channel to transfer tensors in order."""
def __init__(self, dtype, shape, send_device, recv_device, name=None):
"""Construct a channel.
Args:
dtype: The dtype of tensors sent through the channel.
shape: The shape of tensors sent through the channel. Must be a fully
defined shape for TPUs.
send_device: A fully-specified tensorflow device.
recv_device: A fully-specified tensorflow device.
name: A name for the channel (optional).
"""
current_graph = tf.get_default_graph()
assert current_graph, "A channel is scoped within a tf.Graph"
self._dtype = dtype
self._send_device = send_device
self._recv_device = recv_device
self._name = current_graph.unique_name(name if name else "channel")
assert shape is not None
shape = tf.TensorShape(shape)
self._shape = shape
self._send_tpu_core = _TpuCore(send_device)
self._recv_tpu_core = _TpuCore(recv_device)
self._send_called = False
self._recv_op = None
assert ((self._send_tpu_core == -1) == (self._recv_tpu_core == -1)), (
"Mixing TPU and non-TPU: %s and %s" % (send_device, recv_device))
if self._send_tpu_core >= 0:
assert self._shape.is_fully_defined(), (
"TPU channel must have fully defined shape. Name: %s, shape: %s" %
(self._name, self._shape))
assert self._send_tpu_core != self._recv_tpu_core, (
"TPU send/recv must be cross-core: %s and %s" %
(send_device, recv_device))
def Send(self, tensor):
"""Sends a tensor through the channel."""
assert tensor.dtype == self._dtype
assert not self._send_called, ("Send called multiple times for %s" %
self._name)
self._send_called = True
if self._send_tpu_core == -1:
return tf.raw_ops.Send(
tensor=tensor,
tensor_name=self._name,
send_device=self._send_device,
send_device_incarnation=0,
recv_device=self._recv_device)
else:
with tf.device(self._send_device):
return xla.send(
tensor, tensor_name=self._name, name="Send_" + self._name)
def Recv(self):
"""Receives a tensor from the channel."""
if self._send_tpu_core == -1:
received = tf.raw_ops.Recv(
tensor_type=self._dtype,
tensor_name=self._name,
send_device=self._send_device,
send_device_incarnation=0,
recv_device=self._recv_device)
received.set_shape(self._shape)
return received
else:
with tf.device(self._recv_device):
return xla.recv(
self._dtype,
tensor_name=self._name,
shape=self._shape,
name="Recv_" + self._name)
|
exhale/configs.py | matz-e/exhale | 169 | 12681165 | <filename>exhale/configs.py
# -*- coding: utf8 -*-
########################################################################################
# This file is part of exhale. Copyright (c) 2017-2019, <NAME>. #
# Full BSD 3-Clause license available here: #
# #
# https://github.com/svenevs/exhale/blob/master/LICENSE #
########################################################################################
'''
The ``configs`` module exists to contain the Sphinx Application configurations specific
to this extension. Almost every ``global`` variable defined in this file can be
modified using the ``exhale_args`` in ``conf.py``. The convention for this file is as
follows:
1. Things that are **not** supposed to change, because their value is expected to be
constant, are declared in ``ALL_CAPS``. See
- :data:`~exhale.configs.SECTION_HEADING_CHAR`
- :data:`~exhale.configs.SUB_SECTION_HEADING_CHAR`
- :data:`~exhale.configs.SUB_SUB_SECTION_HEADING_CHAR`
- :data:`~exhale.configs.DEFAULT_DOXYGEN_STDIN_BASE`
2. Internal / private variables that are **not** supposed to changed except for by this
extension are declared as ``_lower_case_with_single_leading_underscore`` as is common
in Python ;).
3. Every other variable is declared as ``camelCase``, indicating that it can be
configured **indirectly** by using it as a key in the arguments to ``exhale_args``
present in your ``conf.py``. For example, one of the *required* arguments for this
extension is :data:`~exhale.configs.containmentFolder`. This means that the key
``"containmentFolder"`` is *expected* to be present in ``exhale_args``.
.. code-block:: py
exhale_args = {
"containmentFolder": "./api",
# ...
}
Read the documentation for the various configs present to see what the various
options are to modify the behavior of Exhale.
'''
from __future__ import unicode_literals
import os
import six
import textwrap
from sphinx.errors import ConfigError, ExtensionError
from sphinx.util import logging
from types import FunctionType, ModuleType
try:
# Python 2 StringIO
from cStringIO import StringIO
except ImportError:
# Python 3 StringIO
from io import StringIO
logger = logging.getLogger(__name__)
"""
The |SphinxLoggerAdapter| for communicating with the sphinx build process.
.. |SphinxLoggerAdapter| replace:: :class:`sphinx:sphinx.util.SphinxLoggerAdapter`
"""
########################################################################################
## #
## Required configurations, these get set indirectly via the dictionary argument #
## given to exhale in your conf.py. #
## #
########################################################################################
containmentFolder = None
'''
**Required**
The location where Exhale is going to generate all of the reStructuredText documents.
**Value in** ``exhale_args`` (str)
The value of key ``"containmentFolder"`` should be a string representing the
(relative or absolute) path to the location where Exhale will be creating all of the
files. **Relative paths are relative to the Sphinx application source directory**,
which is almost always wherever the file ``conf.py`` is.
.. note::
To better help you the user know what Exhale is generating (and therefore safe
to delete), it is a **hard requirement** that ``containmentFolder`` is a
**subdirectory** of the Sphinx Source Directory. AKA the path ``"."`` will be
rejected, but the path ``"./api"`` will be accepted.
The suggested value for ``"containmentFolder"`` is ``"./api"``, or
``"./source/api"`` if you have separate source and build directories with Sphinx.
When the html is eventually generated, this will make for a more human friendly
url being generated.
.. warning::
The verbiage subdirectory means **direct** subdirectory. So the path
``"./library/api"`` will be rejected. This is because I make the assumption that
``containmentFolder`` is "owned" by Exhale / is safe to delete.
'''
rootFileName = None
'''
**Required**
The name of the file that **you** will be linking to from your reStructuredText
documents. Do **not** include the ``containmentFolder`` path in this file name,
Exhale will create the file ``"{contaimentFolder}/{rootFileName}"`` for you.
**Value in** ``exhale_args`` (str)
The value of key ``"rootFileName"`` should be a string representing the name of
the file you will be including in your top-level ``toctree`` directive. In order
for Sphinx to be happy, you should include a ``.rst`` suffix. All of the generated
API uses reStructuredText, and that will not ever change.
For example, if you specify
- ``"containmentFolder" = "./api"``, and
- ``"rootFileName" = "library_root.rst"``
Then exhale will generate the file ``./api/library_root.rst``. You would then
include this file in a ``toctree`` directive (say in ``index.rst``) with:
.. raw:: html
<div class="highlight-rest">
<div class="highlight">
<pre>
.. toctree::
:maxdepth: 2
about
<b>api/library_root</b></pre>
</div>
</div>
'''
rootFileTitle = None
'''
**Required**
The title to be written at the top of ``rootFileName``, which will appear in your
file including it in the ``toctree`` directive.
**Value in** ``exhale_args`` (str)
The value of the key ``"rootFileTitle"`` should be a string that has the title of
the main library root document folder Exhale will be generating. The user is
required to supply this value because its value directly affects the overall
presentation of your documentation. For example, if you are including the Exhale
generated library root file in your ``index.rst`` top-level ``toctree`` directive,
the title you supply here will show up on both your main page, as well as in the
navigation menus.
An example value could be ``"Library API"``.
'''
doxygenStripFromPath = None
'''
**Required**
When building on Read the Docs, there seem to be issues regarding the Doxygen
variable ``STRIP_FROM_PATH`` when built remotely. That is, it isn't stripped at
all. This value enables Exhale to manually strip the path.
**Value in** ``exhale_args`` (str)
The value of the key ``"doxygenStripFromPath"`` should be a string representing the
(relative or absolute) path to be stripped from the final documentation. As with
:data:`~exhale.configs.containmentFolder`, relative paths are relative to the Sphinx
source directory (where ``conf.py`` is). Consider the following directory structure::
my_project/
├───docs/
│ conf.py
│
└───include/
└───my_project/
common.hpp
In this scenario, if you supplied ``"doxygenStripFromPath" = ".."``, then the file
page for ``common.hpp`` would list its declaration as
``include/my_project/common.hpp``. If you instead set it to be ``"../include"``,
then the file page for ``common.hpp`` would list its declaration as just
``my_project/common.hpp``.
As a consequence, modification of this variable directly affects what shows up in
the file view hierarchy. In the previous example, the difference would really just
be whether or not all files are nestled underneath a global ``include`` folder or
not.
.. warning::
It is **your** responsibility to ensure that the value you provide for this
configuration is valid. The file view hierarchy will almost certainly break if
you give nonsense.
.. note::
Depending on your project layout, some links may be broken in the above example
if you use ``"../include"`` that work when you use ``".."``. To get your docs
working, revert to ``".."``. If you're feeling nice, raise an issue on GitHub
and let me know --- I haven't been able to track this one down yet :/
Particularly, this seems to happen with projects that have duplicate filenames
in different folders, e.g.::
include/
└───my_project/
│ common.hpp
│
└───viewing/
common.hpp
'''
########################################################################################
## #
## Additional configurations available to further customize the output of exhale. #
## #
########################################################################################
# Build Process Logging, Colors, and Debugging #
########################################################################################
verboseBuild = False
'''
**Optional**
If you are having a hard time getting documentation to build, or say hierarchies are
not appearing as they should be, set this to ``True``.
**Value in** ``exhale_args`` (bool)
Set the boolean value to be ``True`` to include colorized printing at various stages
of the build process.
.. warning::
There is only one level of verbosity: excessively verbose. **All logging is
written to** ``sys.stderr``. See :data:`~exhale.configs.alwaysColorize`.
.. tip::
Looking at the actual code of Exhale trying to figure out what is going on? All
logging sections have a comment ``# << verboseBuild`` just before the logging
section. So you can ``grep -r '# << verboseBuild' exhale/`` if you're working
with the code locally.
'''
alwaysColorize = True
'''
**Optional**
Exhale prints various messages throughout the build process to both ``sys.stdout``
and ``sys.stderr``. The default behavior is to colorize output always, regardless
of if the output is being directed to a file. This is because you can simply use
``cat`` or ``less -R``. By setting this to ``False``, when redirecting output to
a file the color will not be included.
**Value in** ``exhale_args`` (bool)
The default is ``True`` because I find color to be something developers should
embrace. Simply use ``less -R`` to view colorized output conveniently. While I
have a love of all things color, I understand you may not. So just set this to
``False``.
.. note::
There is not and will never be a way to remove the colorized logging from the
console. This only controls when ``sys.stdout`` and ``sys.stderr`` are being
redirected to a file.
'''
generateBreatheFileDirectives = False
'''
**Optional**
Append the ``.. doxygenfile::`` directive from Breathe for *every* file page
generated in the API.
**Value in** ``exhale_args`` (bool)
If True, then the breathe directive (``doxygenfile``) will be incorporated at the
bottom of the file.
.. danger::
**This feature is not intended for production release of pages, only debugging.**
This feature is "deprecated" in lieu of minimal parsing of the input Doxygen xml
for a given documented file. This feature can be used to help determine if
Exhale has made a mistake in parsing the file level documentation, but usage of
this feature will create **many** duplicate id's and the Sphinx build process
will be littered with complaints.
**Usage of this feature will completely dismantle the links coordinated in all
parts of Exhale**. Because duplicate id's are generated, Sphinx chooses where
to link to. It seems to reliably choose the links generated by the Breathe File
directive, meaning the majority of the navigational setup of Exhale is pretty
much invalidated.
'''
########################################################################################
# Root API Document Customization and Treeview #
########################################################################################
afterTitleDescription = None
'''
**Optional**
Provide a description to appear just after :data:`~exhale.configs.rootFileTitle`.
**Value in** ``exhale_args`` (str)
If you want to provide a brief summary of say the layout of the API, or call
attention to specific classes, functions, etc, use this. For example, if you had
Python bindings but no explicit documentation for the Python side of the API, you
could use something like
.. code-block:: py
exhale_args = {
# ... other required arguments...
"rootFileTitle": "Library API",
"afterTitleDescription": textwrap.dedent(\'\'\'
.. note::
The following documentation presents the C++ API. The Python API
generally mirrors the C++ API, but some methods may not be available in
Python or may perform different actions.
\'\'\')
}
'''
afterHierarchyDescription = None
'''
**Optional**
Provide a description that appears after the Class and File hierarchies, but before
the full (and usually very long) API listing.
**Value in** ``exhale_args`` (str)
Similar to :data:`~exhale.configs.afterTitleDescription`, only it is included in the
middle of the document.
'''
fullApiSubSectionTitle = "Full API"
'''
**Optional**
The title for the subsection that comes after the Class and File hierarchies, just
before the enumeration of the full API.
**Value in** ``exhale_args`` (str)
The default value is simply ``"Full API"``. Change this to be something else if you
so desire.
'''
afterBodySummary = None
'''
**Optional**
Provide a summary to be included at the bottom of the root library file.
**Value in** ``exhale_args`` (str)
Similar to :data:`~exhale.configs.afterTitleDescription`, only it is included at the
bottom of the document.
.. note::
The root library document generated can be quite long, depending on your
framework. Important notes to developers should be included at the top of the
file using :data:`~exhale.configs.afterTitleDescription`, or after the hierarchies
using :data:`~exhale.configs.afterHierarchyDescription`.
'''
fullToctreeMaxDepth = 5
'''
**Optional**
The generated library root document performs ``.. include:: unabridged_api.rst`` at
the bottom, after the Class and File hierarchies. Inside ``unabridged_api.rst``,
every generated file is included using a ``toctree`` directive to prevent Sphinx
from getting upset about documents not being included. This value controls the
``:maxdepth:`` for all of these ``toctree`` directives.
**Value in** ``exhale_args`` (int)
The default value is ``5``, but you may want to give a smaller value depending on
the framework being documented.
.. warning::
This value must be greater than or equal to ``1``. You are advised not to use
a value greater than ``5``.
'''
listingExclude = []
'''
**Optional**
A list of regular expressions to exclude from both the class hierarchy and namespace
page enumerations. This can be useful when you want to keep the listings for the
hierarchy / namespace pages more concise, but **do** ultimately want the excluded
items documented somewhere.
Nodes whose ``name`` (fully qualified, e.g., ``namespace::ClassName``) matches any
regular expression supplied here will:
1. Exclude this item from the class view hierarchy listing.
2. Exclude this item from the defining namespace's listing (where applicable).
3. The "excluded" item will still have it's own documentation **and** be linked in
the "full API listing", as well as from the file page that defined the compound
(if recovered). Otherwise Sphinx will explode with warnings about documents not
being included in any ``toctree`` directives.
This configuration variable is **one size fits all**. It was created as a band-aid
fix for PIMPL frameworks.
.. todo::
More fine-grained control will be available in the pickleable writer API
sometime in Exhale 1.x.
.. note::
If you want to skip documentation of a compound in your framework *entirely*,
this configuration variable is **not** where you do it. See
:ref:`Doxygen PREDEFINED <doxygen_predefined>` for information on excluding
compounds entirely using the doxygen preprocessor.
**Value in** ``exhale_args`` (list)
The list can be of variable types, but each item will be compiled into an internal
list using :func:`python:re.compile`. The arguments for
``re.compile(pattern, flags=0)`` should be specified in order, but for convenience
if no ``flags`` are needed for your use case you can just specify a string. For
example:
.. code-block:: py
exhale_args = {
# These two patterns should be equitable for excluding PIMPL
# objects in a framework that uses the ``XxxImpl`` naming scheme.
"listingExclude": [r".*Impl$", (r".*impl$", re.IGNORECASE)]
}
Each item in ``listingExclude`` may either be a string (the regular expression
pattern), or it may be a length two iterable ``(string pattern, int flags)``.
'''
# Compiled regular expressions from listingExclude
# TODO: moves into config object
_compiled_listing_exclude = []
unabridgedOrphanKinds = {"dir", "file"}
"""
**Optional**
The list of node kinds to **exclude** from the unabridged API listing beneath the
class and file hierarchies.
**Value in** ``exhale_args`` (list or set of strings)
The list of kinds (see :data:`~exhale.utils.AVAILABLE_KINDS`) that will **not** be
included in the unabridged API listing. The default is to exclude directories and
files (which are already in the file hierarchy). Note that if this variable is
provided, it will overwrite the default ``{"dir", "file"}``, meaning if you want
to exclude something in addition you need to include ``"dir"`` and ``"file"``:
.. code-block:: py
# In conf.py
exhale_args = {
# Case 1: _only_ exclude union
"unabridgedOrphanKinds": {"union"}
# Case 2: exclude union in addition to dir / file.
"unabridgedOrphanKinds": {"dir", "file", "union"}
}
.. tip::
See :data:`~exhale.configs.fullToctreeMaxDepth`, users seeking to reduce the
length of the unabridged API should set this value to ``1``.
.. warning::
If **either** ``"class"`` **or** ``"struct"`` appear in
``unabridgedOrphanKinds`` then **both** will be excluded. The unabridged API
will present classes and structs together.
"""
########################################################################################
# Clickable Hierarchies <3 #
########################################################################################
createTreeView = False
'''
**Optional**
When set to ``True``, clickable hierarchies for the Class and File views will be
generated. **Set this variable to** ``True`` **if you are generating html** output
for much more attractive websites!
**Value in** ``exhale_args`` (bool)
When set to ``False``, the Class and File hierarches are just reStructuredText
bullet lists. This is rather unattractive, but the default of ``False`` is to
hopefully enable non-html writers to still be able to use ``exhale``.
.. tip::
Using ``html_theme = "bootstrap"`` (the `Sphinx Bootstrap Theme`__)? Make sure
you set :data:`~exhale.configs.treeViewIsBootstrap` to ``True``!
__ https://ryan-roemer.github.io/sphinx-bootstrap-theme/
'''
minifyTreeView = True
'''
**Optional**
When set to ``True``, the generated html and/or json for the class and file
hierarchy trees will be minified.
**Value in** ``exhale_args`` (bool)
The default value is ``True``, which should help page load times for larger APIs.
Setting to ``False`` should only really be necessary if there is a problem -- the
minified version will be hard to parse as a human.
'''
treeViewIsBootstrap = False
'''
**Optional**
If the generated html website is using ``bootstrap``, make sure to set this to
``True``. The `Bootstrap Treeview`__ library will be used.
__ http://jonmiles.github.io/bootstrap-treeview/
**Value in** ``exhale_args`` (bool)
When set to ``True``, the clickable hierarchies will be generated using a Bootstrap
friendly library.
'''
treeViewBootstrapTextSpanClass = "text-muted"
'''
**Optional**
What **span** class to use for the *qualifying* text after the icon, but before the
hyperlink to the actual documentation page. For example, ``Struct Foo`` in the
hierarchy would have ``Struct`` as the *qualifying* text (controlled by this
variable), and ``Foo`` will be a hyperlink to ``Foo``'s actual documentation.
**Value in** ``exhale_args`` (str)
A valid class to apply to a ``span``. The actual HTML being generated is something
like:
.. code-block:: html
<span class="{span_cls}">{qualifier}</span> {hyperlink text}
So if the value of this input was ``"text-muted"``, and it was the hierarchy element
for ``Struct Foo``, it would be
.. code-block:: html
<span class="text-muted">Struct</span> Foo
The ``Foo`` portion will receive the hyperlink styling elsewhere.
.. tip::
Easy choices to consider are the `contextual classes`__ provided by your
bootstrap theme. Alternatively, add your own custom stylesheet to Sphinx
directly and create a class with the color you want there.
__ https://getbootstrap.com/docs/3.3/css/#helper-classes-colors
.. danger::
No validity checks are performed. If you supply a class that cannot be used,
there is no telling what will happen.
'''
treeViewBootstrapIconMimicColor = "text-muted"
'''
**Optional**
The **paragraph** CSS class to *mimic* for the icon color in the tree view.
**Value in** ``exhale_args`` (str)
This value must be a valid CSS class for a **paragraph**. The way that it is used
is in JavaScript, on page-load, a "fake paragraph" is inserted with the class
specified by this variable. The color is extracted, and then a force-override is
applied to the page's stylesheet. This was necessary to override some aspects of
what the ``bootstrap-treeview`` library does. It's full usage looks like this:
.. code-block:: js
/* Inspired by very informative answer to get color of links:
https://stackoverflow.com/a/2707837/3814202 */
/* vvvvvvvvvv what you give */
var $fake_p = $('<p class="icon_mimic"></p>').hide().appendTo("body");
/* ^^^^^^^^^^ */
var iconColor = $fake_p.css("color");
$fake_p.remove();
/* later on */
// Part 2: override the style of the glyphicons by injecting some CSS
$('<style type="text/css" id="exhaleTreeviewOverride">' +
' .treeview span[class~=icon] { ' +
' color: ' + iconColor + ' ! important;' +
' }' +
'</style>').appendTo('head');
.. tip::
Easy choices to consider are the `contextual classes`__ provided by your
bootstrap theme. Alternatively, add your own custom stylesheet to Sphinx
directly and create a class with the color you want there.
__ https://getbootstrap.com/docs/3.3/css/#helper-classes-colors
.. danger::
No validity checks are performed. If you supply a class that cannot be used,
there is no telling what will happen.
'''
treeViewBootstrapOnhoverColor = "#F5F5F5"
'''
**Optional**
The hover color for elements in the hierarchy trees. Default color is a light-grey,
as specified by default value of ``bootstrap-treeview``'s `onhoverColor`_.
*Value in** ``exhale_args`` (str)
Any valid color. See `onhoverColor`_ for information.
.. _onhoverColor: https://github.com/jonmiles/bootstrap-treeview#onhovercolor
'''
treeViewBootstrapUseBadgeTags = True
'''
**Optional**
When set to ``True`` (default), a Badge indicating the number of nested children
will be included **when 1 or more children are present**.
When enabled, each node in the json data generated has it's `tags`_ set, and the
global `showTags`_ option is set to ``true``.
.. _tags: https://github.com/jonmiles/bootstrap-treeview#tags
.. _showTags: https://github.com/jonmiles/bootstrap-treeview#showtags
**Value in** ``exhale_args`` (bool)
Set to ``False`` to exclude the badges. Search for ``Tags as Badges`` on the
`example bootstrap treeview page`__, noting that if a given node does not have any
children, no badge will be added. This is simply because a ``0`` badge is likely
more confusing than helpful.
__ http://jonmiles.github.io/bootstrap-treeview/
'''
treeViewBootstrapExpandIcon = "glyphicon glyphicon-plus"
'''
**Optional**
Global setting for what the "expand" icon is for the bootstrap treeview. The
default value here is the default of the ``bootstrap-treeview`` library.
**Value in** ``exhale_args`` (str)
See the `expandIcon`_ description of ``bootstrap-treeview`` for more information.
.. _expandIcon: https://github.com/jonmiles/bootstrap-treeview#expandicon
.. note::
Exhale handles wrapping this in quotes, you just need to specify the class
(making sure that it has spaces where it should). Exhale does **not** perform
any validity checks on the value of this variable. For example, you could use
something like:
.. code-block:: py
exhale_args = {
# ... required / other optional args ...
# you can set one, both, or neither. just showing both in same example
# set the icon to show it can be expanded
"treeViewBootstrapExpandIcon": "glyphicon glyphicon-chevron-right",
# set the icon to show it can be collapsed
"treeViewBootstrapCollapseIcon": "glyphicon glyphicon-chevron-down"
}
'''
treeViewBootstrapCollapseIcon = "glyphicon glyphicon-minus"
'''
**Optional**
Global setting for what the "collapse" icon is for the bootstrap treeview. The
default value here is the default of the ``bootstrap-treeview`` library.
**Value in** ``exhale_args`` (str)
See the `collapseIcon`_ description of ``bootstrap-treeview`` for more information.
See :data:`~exhale.configs.treeViewBootstrapExpandIcon` for how to specify this
CSS class value.
.. _collapseIcon: https://github.com/jonmiles/bootstrap-treeview#collapseicon
'''
treeViewBootstrapLevels = 1
'''
**Optional**
The default number of levels to expand on page load. Note that the
``bootstrap-treeview`` default `levels`_ value is ``2``. ``1`` seems like a safer
default for Exhale since the value you choose here largely depends on how you have
structured your code.
.. _levels: https://github.com/jonmiles/bootstrap-treeview#levels
**Value in** ``exhale_args`` (int)
An integer representing the number of levels to expand for **both** the Class and
File hierarchies. **This value should be greater than or equal to** ``1``, but
**no validity checks are performed** on your input. Buyer beware.
'''
_class_hierarchy_id = "class-treeView"
'''
The ``id`` attribute of the HTML element associated with the **Class** Hierarchy when
:data:`~exhale.configs.createTreeView` is ``True``.
1. When :data:`~exhale.configs.treeViewIsBootstrap` is ``False``, this ``id`` is attached
to the outer-most ``ul``.
2. For bootstrap, an empty ``div`` is inserted with this ``id``, which will be the
anchor point for the ``bootstrap-treeview`` library.
'''
_file_hierarchy_id = "file-treeView"
'''
The ``id`` attribute of the HTML element associated with the **Class** Hierarchy when
:data:`~exhale.configs.createTreeView` is ``True``.
1. When :data:`~exhale.configs.treeViewIsBootstrap` is ``False``, this ``id`` is attached
to the outer-most ``ul``.
2. For bootstrap, an empty ``div`` is inserted with this ``id``, which will be the
anchor point for the ``bootstrap-treeview`` library.
'''
_bstrap_class_hierarchy_fn_data_name = "getClassHierarchyTree"
'''
The name of the JavaScript function that returns the ``json`` data associated with the
**Class** Hierarchy when :data:`~exhale.configs.createTreeView` is ``True`` **and**
:data:`~exhale.configs.treeViewIsBootstrap` is ``True``.
'''
_bstrap_file_hierarchy_fn_data_name = "getFileHierarchyTree"
'''
The name of the JavaScript function that returns the ``json`` data associated with the
**File** Hierarchy when :data:`~exhale.configs.createTreeView` is ``True`` **and**
:data:`~exhale.configs.treeViewIsBootstrap` is ``True``.
'''
########################################################################################
# Page Level Customization #
########################################################################################
includeTemplateParamOrderList = False
'''
**Optional**
For Classes and Structs (only), Exhale can provide a numbered list enumeration
displaying the template parameters in the order they should be specified.
**Value in** ``exhale_args`` (bool)
This feature can be useful when you have template classes that have **many**
template parameters. The Breathe directives **will** include the parameters in the
order they should be given. However, if you have a template class with more than
say 5 parameters, it can become a little hard to read.
.. note::
This configuration is all or nothing, and applies to every template Class /
Struct. Additionally, **no** ``tparam`` documentation is displayed with this
listing. Just the types / names they are declared as (and default values if
provided).
This feature really only exists as a historical accident.
.. warning::
As a consequence of the (hacky) implementation, if you use this feature you commit
to HTML output only. Where applicable, template parameters that generate links to
other items being documented **only** work in HTML.
'''
pageLevelConfigMeta = None
'''
**Optional**
reStructuredText allows you to employ page-level configurations. These are included
at the top of the page, before the title.
**Value in** ``exhale_args`` (str)
An example of one such feature would be ``":tocdepth: 5"``. To be honest, I'm not
sure why you would need this feature. But it's easy to implement, you just need to
make sure that you provide valid reStructuredText or *every* page will produce
errors.
See the `Field Lists`__ guide for more information.
__ https://www.sphinx-doc.org/en/master/usage/restructuredtext/field-lists.html
'''
repoRedirectURL = None
'''
.. todo::
**This feature is NOT implemented yet**! Hopefully soon. It definitely gets under
my skin. It's mostly documented just to show up in the ``todolist`` for me ;)
**Optional**
When using the Sphinx RTD theme, there is a button placed in the top-right saying
something like "Edit this on GitHub". Since the documents are all being generated
dynamically (and not supposed to be tracked by ``git``), the links all go nowhere.
Set this so Exhale can try and fix this.
**Value in** ``exhale_args`` (str)
The url of the repository your documentation is being generated from.
.. warning::
Seriously this isn't implemented. I may not even need this from you. The harder
part is figuring out how to map a given nodes "``def_in_file``" to the correct
URL. I should be able to get the URL from ``git remote`` and construct the
URL from that and ``git branch``. Probably just some path hacking with
``git rev-parse --show-toplevel`` and comparing that to
:data:`~exhale.configs.doxygenStripFromPath`?
Please feel free to `add your input here`__.
__ https://github.com/svenevs/exhale/issues/2
'''
# Using Contents Directives ############################################################
contentsDirectives = True
'''
**Optional**
Include a ``.. contents::`` directive beneath the title on pages that have potential
to link to a decent number of documents.
**Value in** ``exhale_args`` (bool)
By default, Exhale will include a ``.. contents::`` directive on the individual
generated pages for the types specified by
:data:`~exhale.configs.kindsWithContentsDirectives`. Set this to ``False`` to
disable globally.
See the :ref:`using_contents_directives` section for all pieces of the puzzle.
'''
contentsTitle = "Contents"
'''
**Optional**
The title of the ``.. contents::`` directive for an individual file page, when it's
``kind`` is in the list specified by
:data:`~exhale.configs.kindsWithContentsDirectives` **and**
:data:`~exhale.configs.contentsDirectives` is ``True``.
**Value in** ``exhale_args`` (str)
The default (for both Exhale and reStructuredText) is to label this as ``Contents``.
You can choose whatever value you like. If you prefer to have **no title** for the
``.. contents::`` directives, **specify the empty string**.
.. note::
Specifying the empty string only removes the title **when** ``":local:"`` **is
present in** :data:`~exhale.configs.contentsSpecifiers`. See the
:ref:`using_contents_directives` section for more information.
'''
contentsSpecifiers = [":local:", ":backlinks: none"]
'''
**Optional**
The specifications to apply to ``.. contents::`` directives for the individual file
pages when it's ``kind`` is in the list specified by
:data:`~exhale.configs.kindsWithContentsDirectives` **and**
:data:`~exhale.configs.contentsDirectives` is ``True``.
**Value in** ``exhale_args`` (list)
A (one-dimensional) list of strings that will be applied to any ``.. contents::``
directives generated. Provide the **empty list** if you wish to have no specifiers
added to these directives. See the :ref:`using_contents_directives` section for
more information.
'''
kindsWithContentsDirectives = ["file", "namespace"]
'''
**Optional**
The kinds of compounds that will include a ``.. contents::`` directive on their
individual library page. The default is to generate one for Files and Namespaces.
Only takes meaning when :data:`~exhale.configs.contentsDirectives` is ``True``.
**Value in** ``exhale_args`` (list)
Provide a (one-dimensional) ``list`` or ``tuple`` of strings of the kinds of
compounds that should include a ``.. contents::`` directive. Each kind given
must one of the entries in :data:`~exhale.utils.AVAILABLE_KINDS`.
For example, if you wanted to enable Structs and Classes as well you would do
something like:
.. code-block:: py
# in conf.py
exhale_args = {
# ... required / optional args ...
"kindsWithContentsDirectives": ["file", "namespace", "class", "struct"]
}
.. note::
This is a "full override". So if you want to still keep the defaults of
``"file"`` and ``"namespace"``, **you** must include them yourself.
'''
########################################################################################
# Breathe Customization #
########################################################################################
customSpecificationsMapping = None
'''
**Optional**
See the :ref:`usage_customizing_breathe_output` section for how to use this.
**Value in** ``exhale_args`` (dict)
The dictionary produced by calling
:func:`~exhale.utils.makeCustomSpecificationsMapping` with your custom function.
'''
_closure_map_sanity_check = "blargh_BLARGH_blargh"
'''
See :func:`~exhale.utils.makeCustomSpecificationsMapping` implementation, this is
inserted to help enforce that Exhale made the dictionary going into
:data:`~exhale.configs.customSpecificationsMapping`.
'''
########################################################################################
# Doxygen Execution and Customization #
########################################################################################
_doxygen_xml_output_directory = None
'''
The absolute path the the root level of the doxygen xml output. If the path to the
``index.xml`` file created by doxygen was ``./doxyoutput/xml/index.xml``, then this
would simply be ``./doxyoutput/xml``.
.. note::
This is the exact same path as ``breathe_projects[breathe_default_project]``, only it
is an absolute path.
'''
exhaleExecutesDoxygen = False
'''
**Optional**
Have Exhale launch Doxygen when you execute ``make html``.
**Value in** ``exhale_args`` (bool)
Set to ``True`` to enable launching Doxygen. You must set either
:data:`~exhale.configs.exhaleUseDoxyfile` or :data:`~exhale.configs.exhaleDoxygenStdin`.
'''
exhaleUseDoxyfile = False
'''
**Optional**
If :data:`~exhale.configs.exhaleExecutesDoxygen` is ``True``, this tells Exhale to
use your own ``Doxyfile``. The encouraged approach is to use
:data:`~exhale.configs.exhaleDoxygenStdin`.
**Value in** ``exhale_args`` (bool)
Set to ``True`` to have Exhale use your ``Doxyfile``.
.. note::
The ``Doxyfile`` must be in the **same** directory as ``conf.py``. Exhale will
change directories to here before launching Doxygen when you have separate source
and build directories for Sphinx configured.
.. warning::
No sanity checks on the ``Doxyfile`` are performed. If you are using this option
you need to verify two parameters in particular:
1. ``OUTPUT_DIRECTORY`` is configured so that
``breathe_projects[breathe_default_project]`` agrees. See the
:ref:`Mapping of Project Names to Doxygen XML Output Paths <breathe_project>`
section.
2. ``STRIP_FROM_PATH`` is configured to be identical to what is specified with
:data:`~exhale.configs.doxygenStripFromPath`.
I have no idea what happens when these conflict, but it likely will never result
in valid documentation.
'''
exhaleDoxygenStdin = None
'''
**Optional**
If :data:`~exhale.configs.exhaleExecutesDoxygen` is ``True``, this tells Exhale to
use the (multiline string) value specified in this argument *in addition to* the
:data:`~exhale.configs.DEFAULT_DOXYGEN_STDIN_BASE`.
**Value in** ``exhale_args`` (str)
This string describes your project's specific Doxygen configurations. At the very
least, it must provide ``INPUT``. See the :ref:`usage_exhale_executes_doxygen`
section for how to use this in conjunction with the default configurations, as well
as how to override them.
'''
DEFAULT_DOXYGEN_STDIN_BASE = textwrap.dedent(r'''
# If you need this to be YES, exhale will probably break.
CREATE_SUBDIRS = NO
# So that only Doxygen does not trim paths, which affects the File hierarchy
FULL_PATH_NAMES = YES
# Nested folders will be ignored without this. You may not need it.
RECURSIVE = YES
# Set to YES if you are debugging or want to compare.
GENERATE_HTML = NO
# Unless you want it...
GENERATE_LATEX = NO
# Both breathe and exhale need the xml.
GENERATE_XML = YES
# Set to NO if you do not want the Doxygen program listing included.
XML_PROGRAMLISTING = YES
# Allow for rst directives and advanced functions e.g. grid tables
ALIASES = "rst=\verbatim embed:rst:leading-asterisk"
ALIASES += "endrst=\endverbatim"
# Enable preprocessing and related preprocessor necessities
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
SKIP_FUNCTION_MACROS = NO
# extra defs for to help with building the _right_ version of the docs
PREDEFINED = DOXYGEN_DOCUMENTATION_BUILD
PREDEFINED += DOXYGEN_SHOULD_SKIP_THIS
''')
'''
These are the default values sent to Doxygen along stdin when
:data:`~exhale.configs.exhaleExecutesDoxygen` is ``True``. This is sent to Doxygen
immediately **before** the :data:`~exhale.configs.exhaleDoxygenStdin` provided to
``exhale_args`` in your ``conf.py``. In this way, you can override any of the specific
defaults shown here.
.. tip::
See the documentation for :data:`~exhale.configs.exhaleDoxygenStdin`, as well as
:data:`~exhale.configs.exhaleUseDoxyfile`. Only **one** may be provided to the
``exhale_args`` in your ``conf.py``.
.. include:: ../DEFAULT_DOXYGEN_STDIN_BASE_value.rst
'''
exhaleSilentDoxygen = False
'''
**Optional**
When set to ``True``, the Doxygen output is omitted from the build.
**Value in** ``exhale_args`` (bool)
Documentation generation can be quite verbose, especially when running both Sphinx
and Doxygen in the same process. Use this to silence Doxygen.
.. danger::
You are **heavily** discouraged from setting this to ``True``. Many problems
that may arise through either Exhale or Breathe are because the Doxygen
documentation itself has errors. It will be much more difficult to find these
when you squelch the Doxygen output.
The reason you would do this is for actual limitations on your specific
``stdout`` (e.g. you are getting a buffer maxed out). The likelihood of this
being a problem for you is exceptionally small.
'''
########################################################################################
# Programlisting Customization #
########################################################################################
lexerMapping = {}
'''
**Optional**
When specified, and ``XML_PROGRAMLISTING`` is set to ``YES`` in Doxygen (either via
your ``Doxyfile`` or :data:`exhaleDoxygenStdin <exhale.configs.exhaleDoxygenStdin>`),
this mapping can be used to customize / correct the Pygments lexer used for the
program listing page generated for files. Most projects will **not** need to use
this setting.
**Value in** ``exhale_args`` (dict)
The keys and values are both strings. Each key is a regular expression that will be
used to check with :func:`python:re.match`, noting that the primary difference
between :func:`python:re.match` and :func:`python:re.search` that you should be
aware of is that ``match`` searches from the **beginning** of the string. Each
value should be a **valid** `Pygments lexer <http://pygments.org/docs/lexers/>`_.
Example usage:
.. code-block:: py
exhale_args {
# ...
"lexerMapping": {
r".*\.cuh": "cuda",
r"path/to/exact_filename\.ext": "c"
}
}
.. note::
The pattern is used to search the full path of a file, **as represented in
Doxygen**. This is so that duplicate file names in separate folders can be
distinguished if needed. The file path as represented in Doxygen is defined
by the path to the file, with some prefix stripped out. The prefix stripped out
depends entirely on what you provided to
:data:`doxygenStripFromPath <exhale.configs.doxygenStripFromPath>`.
.. tip::
This mapping is used in
:func:`utils.doxygenLanguageToPygmentsLexer <exhale.utils.doxygenLanguageToPygmentsLexer>`,
when provided it is queried first. If you are trying to get program listings for
a file that is otherwise not supported directly by Doxygen, you typically want to
tell Doxygen to interpret the file as a different language. Take the CUDA case.
In my input to :data:`exhaleDoxygenStdin <exhale.configs.exhaleDoxygenStdin>`, I
will want to set both ``FILE_PATTERNS`` and append to ``EXTENSION_MAPPING``:
.. code-block:: make
FILE_PATTERNS = *.hpp *.cuh
EXTENSION_MAPPING += cuh=c++
By setting ``FILE_PATTERNS``, Doxygen will now try and process ``*.cuh`` files.
By *appending* to ``EXTENSION_MAPPING``, it will treat ``*.cuh`` as C++ files.
For CUDA, this is a reasonable choice because Doxygen is generally able to parse
the file as C++ and get everything right in terms of member definitions,
docstrings, etc. **However**, now the XML generated by doxygen looks like this:
.. code-block:: xml
<!-- >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> vvv -->
<compounddef id="bilateral__filter_8cuh" kind="file" language="C++">
So Exhale would be default put the program listing in a ``.. code-block:: cpp``.
By setting this variable in ``exhale_args``, you can bypass this and get the
desired lexer of your choice.
Some important notes for those not particularly comfortable or familiar with regular
expressions in python:
1. Note that each key defines a *raw* string (prefix with ``r``): ``r"pattern"``.
This is not entirely necessary for this case, but using raw strings makes it so
that you do not have to escape as many things. It's a good practice to adopt,
but for these purposes should not matter all that much.
2. Note the escaped ``.`` character. This means find the literal ``.``, rather than
the regular expression wildcard for *any character*. Observe the difference
with and without:
.. code-block:: pycon
>>> import re
>>> if re.match(r".*.cuh", "some_filecuh.hpp"): print("Oops!")
...
Oops!
>>> if re.match(r".*\.cuh", "some_filecuh.hpp"): print("Oops!")
...
>>>
Without ``\.``, the ``.cuh`` matches ``ecuh`` since ``.`` is a wildcard for *any*
character. You may also want to use ``$`` at the end of the expression if there
are multiple file extensions involved: ``r".*\.cuh$"``. The ``$`` states
"end-of-pattern", which in the usage of Exhale means end of line (the compiled
regular expressions are not compiled with :data:`python:re.MULTILINE`).
3. Take special care at the beginning of your regular expression. The pattern
``r"*\.cuh"`` does **not** compile! You need to use ``r".*\.cuh"``, with the
leading ``.`` being required.
'''
_compiled_lexer_mapping = {}
'''
Internal mapping of compiled regular expression objects to Pygments lexer strings. This
dictionary is created by compiling every key in
:data:`lexerMapping <exhale.configs.lexerMapping>`. See implementation of
:func:`utils.doxygenLanguageToPygmentsLexer <exhale.utils.doxygenLanguageToPygmentsLexer>`
for usage.
'''
########################################################################################
## #
## Utility variables. #
## #
########################################################################################
SECTION_HEADING_CHAR = "="
''' The restructured text H1 heading character used to underline sections. '''
SUB_SECTION_HEADING_CHAR = "-"
''' The restructured text H2 heading character used to underline subsections. '''
SUB_SUB_SECTION_HEADING_CHAR = "*"
''' The restructured text H3 heading character used to underline sub-subsections. '''
MAXIMUM_FILENAME_LENGTH = 255
'''
When a potential filename is longer than ``255``, a sha1 sum is used to shorten. Note
that there is no ubiquitous and reliable way to query this information, as it depends
on both the operating system, filesystem, **and** even the location (directory path) the
file would be generated to (depending on the filesystem). As such, a conservative value
of ``255`` should guarantee that the desired filename can always be created.
'''
MAXIMUM_WINDOWS_PATH_LENGTH = 260
r'''
The file path length on Windows cannot be greater than or equal to ``260`` characters.
Since Windows' pathetically antiquated filesystem cannot handle this, they have enabled
a "magic" prefix they call an *extended-length path*. This is achieved by inserting
the prefix ``\\?\`` which allows you to go up to a maximum path of ``32,767`` characters
**but you may only do this for absolute paths**. See `Maximum Path Length Limitation`__
for more information.
Dear Windows, did you know it is the 21st century?
__ https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file#maximum-path-length-limitation
'''
_the_app = None
''' The Sphinx ``app`` object. Currently unused, saved for availability in future. '''
_app_src_dir = None
'''
**Do not modify**. The location of ``app.srcdir`` of the Sphinx application, once the
build process has begun to execute. Saved to be able to run a few different sanity
checks in different places.
'''
_on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
'''
**Do not modify**. Signals whether or not the build is taking place on ReadTheDocs. If
it is, then colorization of output is disabled, as well as the Doxygen output (where
applicable) is directed to ``/dev/null`` as capturing it can cause the ``subprocess``
buffers to overflow.
'''
########################################################################################
## #
## Secondary Sphinx Entry Point #
## Called from exhale/__init__.py:environment_ready during the sphinx build process. #
## #
########################################################################################
def apply_sphinx_configurations(app):
'''
This method applies the various configurations users place in their ``conf.py``, in
the dictionary ``exhale_args``. The error checking seems to be robust, and
borderline obsessive, but there may very well be some glaring flaws.
When the user requests for the ``treeView`` to be created, this method is also
responsible for adding the various CSS / JavaScript to the Sphinx Application
to support the hierarchical views.
.. danger::
This method is **not** supposed to be called directly. See
``exhale/__init__.py`` for how this function is called indirectly via the Sphinx
API.
**Parameters**
``app`` (:class:`sphinx.application.Sphinx`)
The Sphinx Application running the documentation build.
'''
# Import local to function to prevent circular imports elsewhere in the framework.
from . import deploy
from . import utils
####################################################################################
# Make sure they have the `breathe` configs setup in a way that we can use them. #
####################################################################################
# Breathe allows users to have multiple projects to configure in one `conf.py`
# A dictionary of keys := project names, values := path to Doxygen xml output dir
breathe_projects = app.config.breathe_projects
if not breathe_projects:
raise ConfigError("You must set the `breathe_projects` in `conf.py`.")
elif type(breathe_projects) is not dict:
raise ConfigError("The type of `breathe_projects` in `conf.py` must be a dictionary.")
# The breathe_default_project is required by `exhale` to determine where to look for
# the doxygen xml.
#
# TODO: figure out how to allow multiple breathe projects?
breathe_default_project = app.config.breathe_default_project
if not breathe_default_project:
raise ConfigError("You must set the `breathe_default_project` in `conf.py`.")
elif not isinstance(breathe_default_project, six.string_types):
raise ConfigError("The type of `breathe_default_project` must be a string.")
if breathe_default_project not in breathe_projects:
raise ConfigError(
"The given breathe_default_project='{0}' was not a valid key in `breathe_projects`:\n{1}".format(
breathe_default_project, breathe_projects
)
)
# Grab where the Doxygen xml output is supposed to go, make sure it is a string,
# defer validation of existence until after potentially running Doxygen based on
# the configs given to exhale
doxy_xml_dir = breathe_projects[breathe_default_project]
if not isinstance(doxy_xml_dir, six.string_types):
raise ConfigError(
"The type of `breathe_projects[breathe_default_project]` from `conf.py` was not a string."
)
# Make doxy_xml_dir relative to confdir (where conf.py is)
if not os.path.isabs(doxy_xml_dir):
doxy_xml_dir = os.path.abspath(os.path.join(app.confdir, doxy_xml_dir))
####################################################################################
# Initial sanity-check that we have the arguments needed. #
####################################################################################
exhale_args = app.config.exhale_args
if not exhale_args:
raise ConfigError("You must set the `exhale_args` dictionary in `conf.py`.")
elif type(exhale_args) is not dict:
raise ConfigError("The type of `exhale_args` in `conf.py` must be a dictionary.")
####################################################################################
# In order to be able to loop through things below, we want to grab the globals #
# dictionary (rather than needing to do `global containmentFolder` etc for every #
# setting that is being changed). #
####################################################################################
configs_globals = globals()
# Used for internal verification of available keys
keys_available = []
# At the end of input processing, fail out if unrecognized keys were found.
keys_processed = []
####################################################################################
# Gather the mandatory input for exhale. #
####################################################################################
key_error = "Did not find required key `{key}` in `exhale_args`."
val_error = "The type of the value for key `{key}` must be `{exp}`, but was `{got}`."
req_kv = [
("containmentFolder", six.string_types, True),
("rootFileName", six.string_types, False),
("rootFileTitle", six.string_types, False),
("doxygenStripFromPath", six.string_types, True)
]
for key, expected_type, make_absolute in req_kv:
# Used in error checking later
keys_available.append(key)
# Make sure we have the key
if key not in exhale_args:
raise ConfigError(key_error.format(key=key))
# Make sure the value is at the very least the correct type
val = exhale_args[key]
if not isinstance(val, expected_type):
val_t = type(val)
raise ConfigError(val_error.format(key=key, exp=expected_type, got=val_t))
# Make sure that a value was provided (e.g. no empty strings)
if not val:
raise ConfigError("Non-empty value for key [{0}] required.".format(key))
# If the string represents a path, make it absolute
if make_absolute:
# Directories are made absolute relative to app.confdir (where conf.py is)
if not os.path.isabs(val):
val = os.path.abspath(os.path.join(os.path.abspath(app.confdir), val))
# Set the config for use later
try:
configs_globals[key] = val
keys_processed.append(key)
except Exception as e:
raise ExtensionError(
"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(
key, val, e
)
)
####################################################################################
# Validate what can be checked from the required arguments at this time. #
####################################################################################
global _the_app
_the_app = app
# Make sure they know this is a bad idea. The order of these checks is important.
# This assumes the path given was not the empty string (3 will break if it is).
#
# 1. If containmentFolder and app.srcdir are the same, problem.
# 2. If app.srcdir is not at the beginning of containmentFolder, problem.
# 3. If the first two checks have not raised a problem, the final check is to make
# sure that a subdirectory was actually used, as opposed to something that just
# starts with the same path.
#
# Note for the third check lazy evaluation is the only thing that makes checking
# _parts[1] acceptable ;)
_one = containmentFolder == app.srcdir
_two = not containmentFolder.startswith(app.srcdir)
_parts = containmentFolder.split(app.srcdir)
_three = _parts[0] != "" or len(_parts[1].split(os.path.sep)) > 2 or \
os.path.join(app.srcdir, _parts[1].replace(os.path.sep, "", 1)) != containmentFolder # noqa
# If they are equal, containmentFolder points somewhere entirely differently, or the
# relative path (made absolute again) does not have the srcdir
if _one or _two or _three:
raise ConfigError(
"The given `containmentFolder` [{0}] must be a *SUBDIRECTORY* of [{1}].".format(
containmentFolder, app.srcdir
)
)
global _app_src_dir
_app_src_dir = os.path.abspath(app.srcdir)
# We *ONLY* generate reStructuredText, make sure Sphinx is expecting this as well as
# the to-be-generated library root file is correctly suffixed.
if not rootFileName.endswith(".rst"):
raise ConfigError(
"The given `rootFileName` ({0}) did not end with '.rst'; Exhale is reStructuredText only.".format(
rootFileName
)
)
if ".rst" not in app.config.source_suffix:
raise ConfigError(
"Exhale is reStructuredText only, but '.rst' was not found in `source_suffix` list of `conf.py`."
)
# Make sure the doxygen strip path is an exclude-able path
if not os.path.exists(doxygenStripFromPath):
raise ConfigError(
"The path given as `doxygenStripFromPath` ({0}) does not exist!".format(doxygenStripFromPath)
)
####################################################################################
# Gather the optional input for exhale. #
####################################################################################
# TODO: `list` -> `(list, tuple)`, update docs too.
opt_kv = [
# Build Process Logging, Colors, and Debugging
("verboseBuild", bool),
("alwaysColorize", bool),
("generateBreatheFileDirectives", bool),
# Root API Document Customization and Treeview
("afterTitleDescription", six.string_types),
("afterHierarchyDescription", six.string_types),
("fullApiSubSectionTitle", six.string_types),
("afterBodySummary", six.string_types),
("fullToctreeMaxDepth", int),
("listingExclude", list),
("unabridgedOrphanKinds", (list, set)),
# Clickable Hierarchies <3
("createTreeView", bool),
("minifyTreeView", bool),
("treeViewIsBootstrap", bool),
("treeViewBootstrapTextSpanClass", six.string_types),
("treeViewBootstrapIconMimicColor", six.string_types),
("treeViewBootstrapOnhoverColor", six.string_types),
("treeViewBootstrapUseBadgeTags", bool),
("treeViewBootstrapExpandIcon", six.string_types),
("treeViewBootstrapCollapseIcon", six.string_types),
("treeViewBootstrapLevels", int),
# Page Level Customization
("includeTemplateParamOrderList", bool),
("pageLevelConfigMeta", six.string_types),
("repoRedirectURL", six.string_types),
("contentsDirectives", bool),
("contentsTitle", six.string_types),
("contentsSpecifiers", list),
("kindsWithContentsDirectives", list),
# Breathe Customization
("customSpecificationsMapping", dict),
# Doxygen Execution and Customization
("exhaleExecutesDoxygen", bool),
("exhaleUseDoxyfile", bool),
("exhaleDoxygenStdin", six.string_types),
("exhaleSilentDoxygen", bool),
# Programlisting Customization
("lexerMapping", dict)
]
for key, expected_type in opt_kv:
# Used in error checking later
keys_available.append(key)
# Override the default settings if the key was provided
if key in exhale_args:
# Make sure the value is at the very least the correct type
val = exhale_args[key]
if not isinstance(val, expected_type):
val_t = type(val)
raise ConfigError(val_error.format(key=key, exp=expected_type, got=val_t))
# Set the config for use later
try:
configs_globals[key] = val
keys_processed.append(key)
except Exception as e:
raise ExtensionError(
"Critical error: unable to set `global {0}` to `{1}` in exhale.configs:\n{2}".format(
key, val, e
)
)
# These two need to be lists of strings, check to make sure
def _list_of_strings(lst, title):
for spec in lst:
if not isinstance(spec, six.string_types):
raise ConfigError(
"`{title}` must be a list of strings. `{spec}` was of type `{spec_t}`".format(
title=title,
spec=spec,
spec_t=type(spec)
)
)
_list_of_strings( contentsSpecifiers, "contentsSpecifiers")
_list_of_strings(kindsWithContentsDirectives, "kindsWithContentsDirectives")
_list_of_strings( unabridgedOrphanKinds, "unabridgedOrphanKinds")
# Make sure the kinds they specified are valid
unknown = "Unknown kind `{kind}` given in `{config}`. See utils.AVAILABLE_KINDS."
for kind in kindsWithContentsDirectives:
if kind not in utils.AVAILABLE_KINDS:
raise ConfigError(
unknown.format(kind=kind, config="kindsWithContentsDirectives")
)
for kind in unabridgedOrphanKinds:
if kind not in utils.AVAILABLE_KINDS:
raise ConfigError(
unknown.format(kind=kind, config="unabridgedOrphanKinds")
)
# Make sure the listingExlcude is usable
if "listingExclude" in exhale_args:
import re
# TODO: remove this once config objects are in. Reset needed for testing suite.
configs_globals["_compiled_listing_exclude"] = []
# used for error printing, tries to create string out of item otherwise
# returns 'at index {idx}'
def item_or_index(item, idx):
try:
return "`{item}`".format(item=item)
except:
return "at index {idx}".format(idx=idx)
exclusions = exhale_args["listingExclude"]
for idx in range(len(exclusions)):
# Gather the `pattern` and `flags` parameters for `re.compile`
item = exclusions[idx]
if isinstance(item, six.string_types):
pattern = item
flags = 0
else:
try:
pattern, flags = item
except Exception as e:
raise ConfigError(
"listingExclude item {0} cannot be unpacked as `pattern, flags = item`:\n{1}".format(
item_or_index(item, idx), e
)
)
# Compile the regular expression object.
try:
regex = re.compile(pattern, flags)
except Exception as e:
raise ConfigError(
"Unable to compile specified listingExclude {0}:\n{1}".format(
item_or_index(item, idx), e
)
)
configs_globals["_compiled_listing_exclude"].append(regex)
# Make sure the lexerMapping is usable
if "lexerMapping" in exhale_args:
from pygments import lexers
import re
# TODO: remove this once config objects are in. Reset needed for testing suite.
configs_globals["_compiled_lexer_mapping"] = {}
lexer_mapping = exhale_args["lexerMapping"]
for key in lexer_mapping:
val = lexer_mapping[key]
# Make sure both are strings
if not isinstance(key, six.string_types) or not isinstance(val, six.string_types):
raise ConfigError("All keys and values in `lexerMapping` must be strings.")
# Make sure the key is a valid regular expression
try:
regex = re.compile(key)
except Exception as e:
raise ConfigError(
"The `lexerMapping` key [{0}] is not a valid regular expression: {1}".format(key, e)
)
# Make sure the provided lexer is available
try:
lex = lexers.find_lexer_class_by_name(val)
except Exception as e:
raise ConfigError(
"The `lexerMapping` value of [{0}] for key [{1}] is not a valid Pygments lexer.".format(
val, key
)
)
# Everything works, stash for later processing
configs_globals["_compiled_lexer_mapping"][regex] = val
####################################################################################
# Internal consistency check to make sure available keys are accurate. #
####################################################################################
# See naming conventions described at top of file for why this is ok!
keys_expected = []
for key in configs_globals.keys():
val = configs_globals[key]
# Ignore modules and functions
if not isinstance(val, FunctionType) and not isinstance(val, ModuleType):
if key != "logger": # band-aid for logging api with Sphinx prior to config objects
# Ignore specials like __name__ and internal variables like _the_app
if "_" not in key and len(key) > 0: # don't think there can be zero length ones...
first = key[0]
if first.isalpha() and first.islower():
keys_expected.append(key)
keys_expected = set(keys_expected)
keys_available = set(keys_available)
if keys_expected != keys_available:
err = StringIO()
err.write(textwrap.dedent('''
CRITICAL: Exhale encountered an internal error, please raise an Issue on GitHub:
https://github.com/svenevs/exhale/issues
Please paste the following in the issue report:
Expected keys:
'''))
for key in keys_expected:
err.write("- {0}\n".format(key))
err.write(textwrap.dedent('''
Available keys:
'''))
for key in keys_available:
err.write("- {0}\n".format(key))
err.write(textwrap.dedent('''
The Mismatch(es):
'''))
for key in (keys_available ^ keys_expected):
err.write("- {0}\n".format(key))
err_msg = err.getvalue()
err.close()
raise ExtensionError(err_msg)
####################################################################################
# See if unexpected keys were presented. #
####################################################################################
all_keys = set(exhale_args.keys())
keys_processed = set(keys_processed)
if all_keys != keys_processed:
# Much love: https://stackoverflow.com/a/17388505/3814202
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio() * 100.0
# If there are keys left over after taking the differences of keys_processed
# (which is all keys Exhale expects to see), inform the user of keys they might
# have been trying to provide.
#
# Convert everything to lower case for better matching success
potential_keys = keys_available - keys_processed
potential_keys_lower = {key.lower(): key for key in potential_keys}
extras = all_keys - keys_processed
extra_error = StringIO()
extra_error.write("Exhale found unexpected keys in `exhale_args`:\n")
for key in extras:
extra_error.write(" - Extra key: {0}\n".format(key))
potentials = []
for mate in potential_keys_lower:
similarity = similar(key, mate)
if similarity > 50.0:
# Output results with the non-lower version they should put in exhale_args
potentials.append((similarity, potential_keys_lower[mate]))
if potentials:
potentials = reversed(sorted(potentials))
for rank, mate in potentials:
extra_error.write(" - {0:2.2f}% match with: {1}\n".format(rank, mate))
extra_error_str = extra_error.getvalue()
extra_error.close()
raise ConfigError(extra_error_str)
####################################################################################
# Verify some potentially inconsistent or ignored settings. #
####################################################################################
# treeViewIsBootstrap only takes meaning when createTreeView is True
if not createTreeView and treeViewIsBootstrap:
logger.warning("Exhale: `treeViewIsBootstrap=True` ignored since `createTreeView=False`")
# fullToctreeMaxDepth > 5 may produce other sphinx issues unrelated to exhale
if fullToctreeMaxDepth > 5:
logger.warning(
"Exhale: `fullToctreeMaxDepth={0}` is greater than 5 and may build errors for non-html.".format(
fullToctreeMaxDepth
)
)
# Make sure that we received a valid mapping created by utils.makeCustomSpecificationsMapping
sanity = _closure_map_sanity_check
insane = "`customSpecificationsMapping` *MUST* be made using exhale.utils.makeCustomSpecificationsMapping"
if customSpecificationsMapping:
# Sanity check to make sure exhale made this mapping
if sanity not in customSpecificationsMapping:
raise ConfigError(insane)
elif customSpecificationsMapping[sanity] != sanity: # LOL
raise ConfigError(insane)
# Sanity check #2: enforce no new additions were made
expected_keys = set([sanity]) | set(utils.AVAILABLE_KINDS)
provided_keys = set(customSpecificationsMapping.keys())
diff = provided_keys - expected_keys
if diff:
raise ConfigError("Found extra keys in `customSpecificationsMapping`: {0}".format(diff))
# Sanity check #3: make sure the return values are all strings
for key in customSpecificationsMapping:
val_t = type(customSpecificationsMapping[key])
if not isinstance(key, six.string_types):
raise ConfigError(
"`customSpecificationsMapping` key `{key}` gave value type `{val_t}` (need `str`).".format(
key=key, val_t=val_t
)
)
# Specify where the doxygen output should be going
global _doxygen_xml_output_directory
_doxygen_xml_output_directory = doxy_xml_dir
# If requested, the time is nigh for executing doxygen. The strategy:
# 1. Execute doxygen if requested
# 2. Verify that the expected doxy_xml_dir (specified to `breathe`) was created
# 3. Assuming everything went to plan, let exhale take over and create all of the .rst docs
if exhaleExecutesDoxygen:
# Cannot use both, only one or the other
if exhaleUseDoxyfile and (exhaleDoxygenStdin is not None):
raise ConfigError("You must choose one of `exhaleUseDoxyfile` or `exhaleDoxygenStdin`, not both.")
# The Doxyfile *must* be at the same level as conf.py
# This is done so that when separate source / build directories are being used,
# we can guarantee where the Doxyfile is.
if exhaleUseDoxyfile:
doxyfile_path = os.path.abspath(os.path.join(app.confdir, "Doxyfile"))
if not os.path.exists(doxyfile_path):
raise ConfigError("The file [{0}] does not exist".format(doxyfile_path))
here = os.path.abspath(os.curdir)
if here == app.confdir:
returnPath = None
else:
returnPath = here
# All necessary information ready, go to where the Doxyfile is, run Doxygen
# and then return back (where applicable) so sphinx can continue
start = utils.get_time()
if returnPath:
logger.info(utils.info(
"Exhale: changing directories to [{0}] to execute Doxygen.".format(app.confdir)
))
os.chdir(app.confdir)
logger.info(utils.info("Exhale: executing doxygen."))
status = deploy.generateDoxygenXML()
# Being overly-careful to put sphinx back where it was before potentially erroring out
if returnPath:
logger.info(utils.info(
"Exhale: changing directories back to [{0}] after Doxygen.".format(returnPath)
))
os.chdir(returnPath)
if status:
raise ExtensionError(status)
else:
end = utils.get_time()
logger.info(utils.progress(
"Exhale: doxygen ran successfully in {0}.".format(utils.time_string(start, end))
))
else:
if exhaleUseDoxyfile:
logger.warning("Exhale: `exhaleUseDoxyfile` ignored since `exhaleExecutesDoxygen=False`")
if exhaleDoxygenStdin is not None:
logger.warning("Exhale: `exhaleDoxygenStdin` ignored since `exhaleExecutesDoxygen=False`")
if exhaleSilentDoxygen:
logger.warning("Exhale: `exhaleSilentDoxygen=True` ignored since `exhaleExecutesDoxygen=False`")
# Either Doxygen was run prior to this being called, or we just finished running it.
# Make sure that the files we need are actually there.
if not os.path.isdir(doxy_xml_dir):
raise ConfigError(
"Exhale: the specified folder [{0}] does not exist. Has Doxygen been run?".format(doxy_xml_dir)
)
index = os.path.join(doxy_xml_dir, "index.xml")
if not os.path.isfile(index):
raise ConfigError("Exhale: the file [{0}] does not exist. Has Doxygen been run?".format(index))
# Legacy / debugging feature, warn of its purpose
if generateBreatheFileDirectives:
logger.warning("Exhale: `generateBreatheFileDirectives` is a debugging feature not intended for production.")
####################################################################################
# If using a fancy treeView, add the necessary frontend files. #
####################################################################################
if createTreeView:
if treeViewIsBootstrap:
tree_data_static_base = "treeView-bootstrap"
tree_data_css = [os.path.join("bootstrap-treeview", "bootstrap-treeview.min.css")]
tree_data_js = [
os.path.join("bootstrap-treeview", "bootstrap-treeview.min.js"),
# os.path.join("bootstrap-treeview", "apply-bootstrap-treview.js")
]
tree_data_ext = []
else:
tree_data_static_base = "treeView"
tree_data_css = [os.path.join("collapsible-lists", "css", "tree_view.css")]
tree_data_js = [
os.path.join("collapsible-lists", "js", "CollapsibleLists.compressed.js"),
os.path.join("collapsible-lists", "js", "apply-collapsible-lists.js")
]
# The tree_view.css file uses these
tree_data_ext = [
os.path.join("collapsible-lists", "css", "button-closed.png"),
os.path.join("collapsible-lists", "css", "button-open.png"),
os.path.join("collapsible-lists", "css", "button.png"),
os.path.join("collapsible-lists", "css", "list-item-contents.png"),
os.path.join("collapsible-lists", "css", "list-item-last-open.png"),
os.path.join("collapsible-lists", "css", "list-item-last.png"),
os.path.join("collapsible-lists", "css", "list-item-open.png"),
os.path.join("collapsible-lists", "css", "list-item.png"),
os.path.join("collapsible-lists", "css", "list-item-root.png"),
]
# Make sure we have everything we need
collapse_data = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", tree_data_static_base)
if not os.path.isdir(collapse_data):
raise ExtensionError(
"Exhale: the path to [{0}] was not found, possible installation error.".format(collapse_data)
)
else:
all_files = tree_data_css + tree_data_js + tree_data_ext
missing = []
for file in all_files:
path = os.path.join(collapse_data, file)
if not os.path.isfile(path):
missing.append(path)
if missing:
raise ExtensionError(
"Exhale: the path(s) {0} were not found, possible installation error.".format(missing)
)
# We have all the files we need, the extra files will be copied automatically by
# sphinx to the correct _static/ location, but stylesheets and javascript need
# to be added explicitly
logger.info(utils.info("Exhale: adding tree view css / javascript."))
app.config.html_static_path.append(collapse_data)
# In Sphinx 1.8+ these have been renamed.
# - app.add_stylesheet -> app.add_css_file
# - app.add_javascript -> app.add_js_file
#
# RemovedInSphinx40Warning:
# - The app.add_stylesheet() is deprecated. Please use app.add_css_file() instead.
# - The app.add_javascript() is deprecated. Please use app.add_js_file() instead.
#
# So we'll need to keep this funky `getattr` chain for a little while ;)
# Or else pin min sphinx version to 1.8 or higher. Probably when 2.0 is out?
add_css_file = getattr(app, "add_css_file", getattr(app, "add_stylesheet", None))
add_js_file = getattr(app, "add_js_file", getattr(app, "add_javascript", None))
# Add the stylesheets
for css in tree_data_css:
add_css_file(css)
# Add the javascript
for js in tree_data_js:
add_js_file(js)
logger.info(utils.progress("Exhale: added tree view css / javascript."))
|
scudcloud/downloader.py | p-mongo/scudcloud | 1,480 | 12681191 | from urllib import request
from PyQt5.QtCore import QThread
class Downloader(QThread):
def __init__(self, wrapper, icon, path):
QThread.__init__(self)
self.wrapper = wrapper
self.icon = icon
self.path = path
def run(self):
try:
file_name, headers = request.urlretrieve(self.icon, self.path)
self.wrapper.icon = file_name
except:
pass |
opendeep/utils/midi/MidiInFile.py | vitruvianscience/OpenDeep | 252 | 12681201 | <filename>opendeep/utils/midi/MidiInFile.py
# -*- coding: ISO-8859-1 -*-
from __future__ import absolute_import
from .RawInstreamFile import RawInstreamFile
from .MidiFileParser import MidiFileParser
class MidiInFile:
"""
Parses a midi file, and triggers the midi events on the outStream
object.
Get example data from a minimal midi file, generated with cubase.
>>> test_file = 'minimal-cubase-type0.mid'
Do parsing, and generate events with MidiToText,
so we can see what a minimal midi file contains
>>> from opendeep.utils.midi.MidiToText import MidiToText
>>> midi_in = MidiInFile(MidiToText(), test_file)
>>> midi_in.read()
format: 0, nTracks: 1, division: 480
----------------------------------
<BLANKLINE>
Start - track #0
sequence_name: Type 0
tempo: 500000
time_signature: 4 2 24 8
note_on - ch:00, note:48, vel:64 time:0
note_off - ch:00, note:48, vel:40 time:480
End of track
<BLANKLINE>
End of file
"""
def __init__(self, outStream, infile):
# these could also have been mixins, would that be better? Nah!
self.raw_in = RawInstreamFile(infile)
self.parser = MidiFileParser(self.raw_in, outStream)
def read(self):
"Start parsing the file"
p = self.parser
p.parseMThdChunk()
p.parseMTrkChunks()
def setData(self, data=''):
"Sets the data from a plain string"
self.raw_in.setData(data)
|
src/bindings/python/tests/test_inference_engine/test_output_const_node.py | opencv/dldt | 1,127 | 12681207 | <filename>src/bindings/python/tests/test_inference_engine/test_output_const_node.py
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
from ..conftest import model_path
import openvino.runtime.opset8 as ops
from openvino.runtime import (
ConstOutput,
Shape,
PartialShape,
Type,
Output,
RTMap,
OVAny,
Core,
)
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
def test_const_output_type(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
node = exec_net.input(0)
assert isinstance(node, ConstOutput)
def test_const_output_docs(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
node = exec_net.input(0)
exptected_string = "openvino.runtime.ConstOutput represents port/node output."
assert node.__doc__ == exptected_string
def test_const_output_get_index(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
node = exec_net.input("data")
assert node.get_index() == 0
assert node.index == 0
def test_const_output_get_element_type(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
node = exec_net.input("data")
assert node.get_element_type() == Type.f32
assert node.element_type == Type.f32
def test_const_output_get_shape(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
node = exec_net.input("data")
expected_shape = Shape([1, 3, 32, 32])
assert str(node.get_shape()) == str(expected_shape)
assert str(node.shape) == str(expected_shape)
def test_const_output_get_partial_shape(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
node = exec_net.input("data")
expected_partial_shape = PartialShape([1, 3, 32, 32])
assert node.get_partial_shape() == expected_partial_shape
assert node.partial_shape == expected_partial_shape
def test_const_output_get_target_inputs(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
outputs = exec_net.outputs
for node in outputs:
assert isinstance(node.get_target_inputs(), set)
assert isinstance(node.target_inputs, set)
def test_const_output_get_names(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
input_name = "data"
node = exec_net.input(input_name)
expected_names = set()
expected_names.add(input_name)
assert node.get_names() == expected_names
assert node.names == expected_names
assert node.get_any_name() == input_name
assert node.any_name == input_name
def test_const_get_rf_info(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
output_node = exec_net.output(0)
rt_info = output_node.get_rt_info()
assert isinstance(rt_info, RTMap)
def test_const_output_runtime_info(device):
core = Core()
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
input_name = "data"
output_node = exec_net.input(input_name)
rt_info = output_node.rt_info
assert isinstance(rt_info, RTMap)
def test_update_rt_info(device):
relu = ops.relu(5)
output_node = Output._from_node(relu)
rt = output_node.get_rt_info()
rt["test12345"] = "test"
for key, value in output_node.get_rt_info().items():
assert key == "test12345"
assert isinstance(value, OVAny)
def test_operations():
data = ops.parameter([2])
split = ops.split(data, 0, 2)
outputs = split.outputs()
assert outputs[0] < outputs[1]
assert outputs[0] == split.output(0)
assert hash(outputs[0]) == hash(split.output(0))
assert hash(outputs[0]) != hash(outputs[0].node)
|
batchflow/batch_image.py | analysiscenter/dataset | 101 | 12681228 | """ Contains Batch classes for images """
import os
import warnings
from numbers import Number
import numpy as np
import PIL
import PIL.ImageOps
import PIL.ImageChops
import PIL.ImageFilter
import PIL.ImageEnhance
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from .batch import Batch
from .decorators import action, apply_parallel, inbatch_parallel
from .dsindex import FilesIndex
class BaseImagesBatch(Batch):
""" Batch class for 2D images.
Note, that if any class method is wrapped with `@apply_parallel` decorator
than for inner calls (i.e. from other class methods) should be used version
of desired method with underscores. (For example, if there is a decorated
`method` than you need to call `_method_` from inside of `other_method`).
Same is applicable for all child classes of :class:`batch.Batch`.
"""
components = "images", "labels", "masks"
# Class-specific defaults for :meth:`.Batch.apply_parallel`
apply_defaults = dict(target='for',
post='_assemble',
src='images',
dst='images',
)
def _make_path(self, ix, src=None):
""" Compose path.
Parameters
----------
ix : str
element's index (filename)
src : str
Path to folder with images. Used if `self.index` is not `FilesIndex`.
Returns
-------
path : str
Full path to an element.
"""
if isinstance(src, FilesIndex):
path = src.get_fullpath(ix)
elif isinstance(self.index, FilesIndex):
path = self.index.get_fullpath(ix)
else:
path = os.path.join(src, str(ix))
return path
def _load_image(self, ix, src=None, fmt=None, dst="images"):
""" Loads image.
.. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str, dataset.FilesIndex, None
path to the folder with an image. If src is None then it is determined from the index.
dst : str
Component to write images to.
fmt : str
Format of the an image
Raises
------
NotImplementedError
If this method is not defined in a child class
"""
_ = self, ix, src, dst, fmt
raise NotImplementedError("Must be implemented in a child class")
@action
def load(self, *args, src=None, fmt=None, dst=None, **kwargs):
""" Load data.
.. note:: if `fmt='images'` than ``components`` must be a single component (str).
.. note:: All parameters must be named only.
Parameters
----------
src : str, None
Path to the folder with data. If src is None then path is determined from the index.
fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}
Format of the file to download.
dst : str, sequence
components to download.
"""
if fmt == 'image':
return self._load_image(src, fmt=fmt, dst=dst)
return super().load(src=src, fmt=fmt, dst=dst, *args, **kwargs)
def _dump_image(self, ix, src='images', dst=None, fmt=None):
""" Saves image to dst.
.. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str
Component to get images from.
dst : str
Folder where to dump. If dst is None then it is determined from index.
Raises
------
NotImplementedError
If this method is not defined in a child class
"""
_ = self, ix, src, dst, fmt
raise NotImplementedError("Must be implemented in a child class")
@action
def dump(self, *args, dst=None, fmt=None, components="images", **kwargs):
""" Dump data.
.. note:: If `fmt='images'` than ``dst`` must be a single component (str).
.. note:: All parameters must be named only.
Parameters
----------
dst : str, None
Path to the folder where to dump. If dst is None then path is determined from the index.
fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}
Format of the file to save.
components : str, sequence
Components to save.
ext: str
Format to save images to.
Returns
-------
self
"""
if fmt == 'image':
return self._dump_image(components, dst, fmt=kwargs.pop('ext'))
return super().dump(dst=dst, fmt=fmt, components=components, *args, **kwargs)
class ImagesBatch(BaseImagesBatch):
""" Batch class for 2D images.
Images are stored as numpy arrays of PIL.Image.
PIL.Image has the following system of coordinates::
X
0 -------------- >
|
|
| images's pixels
|
|
Y v
Pixel's position is defined as (x, y)
Note, that if any class method is wrapped with `@apply_parallel` decorator
than for inner calls (i.e. from other class methods) should be used version
of desired method with underscores. (For example, if there is a decorated
`method` than you need to call `_method_` from inside of `other_method`).
Same is applicable for all child classes of :class:`batch.Batch`.
"""
@classmethod
def _get_image_shape(cls, image):
if isinstance(image, PIL.Image.Image):
return image.size
return image.shape[:2]
@property
def image_shape(self):
""": tuple - shape of the image"""
_, shapes_count = np.unique([image.size for image in self.images], return_counts=True, axis=0)
if len(shapes_count) == 1:
if isinstance(self.images[0], PIL.Image.Image):
return (*self.images[0].size, len(self.images[0].getbands()))
return self.images[0].shape
raise RuntimeError('Images have different shapes')
@inbatch_parallel(init='indices', post='_assemble')
def _load_image(self, ix, src=None, fmt=None, dst="images"):
""" Loads image
.. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str, dataset.FilesIndex, None
Path to the folder with an image. If src is None then it is determined from the index.
dst : str
Component to write images to.
fmt : str
Format of an image.
"""
return PIL.Image.open(self._make_path(ix, src))
@inbatch_parallel(init='indices')
def _dump_image(self, ix, src='images', dst=None, fmt=None):
""" Saves image to dst.
.. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.
Parameters
----------
src : str
Component to get images from.
dst : str
Folder where to dump.
fmt : str
Format of saved image.
"""
if dst is None:
raise RuntimeError('You must specify `dst`')
image = self.get(ix, src)
ix = str(ix) + '.' + fmt if fmt is not None else str(ix)
image.save(os.path.join(dst, ix))
def _assemble_component(self, result, *args, component='images', **kwargs):
""" Assemble one component after parallel execution.
Parameters
----------
result : sequence, array_like
Results after inbatch_parallel.
component : str
component to assemble
"""
_ = args, kwargs
if isinstance(result[0], PIL.Image.Image):
setattr(self, component, np.asarray(result, dtype=object))
else:
try:
setattr(self, component, np.stack(result))
except ValueError:
array_result = np.empty(len(result), dtype=object)
array_result[:] = result
setattr(self, component, array_result)
@apply_parallel
def to_pil(self, image, mode=None):
"""converts images in Batch to PIL format
Parameters
----------
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
"""
if isinstance(image, PIL.Image.Image):
return image
if mode is None:
if len(image.shape) == 2:
mode = 'L'
elif len(image.shape) == 3:
if image.shape[-1] == 3:
mode = 'RGB'
elif image.shape[-1] == 1:
mode = 'L'
image = image[:, :, 0]
elif image.shape[-1] == 2:
mode = 'LA'
elif image.shape[-1] == 4:
mode = 'RGBA'
else:
raise ValueError('Unknown image type as image has', image.shape[-1], 'channels')
elif mode == 'L' and len(image.shape) == 3:
image = image[..., 0]
return PIL.Image.fromarray(image, mode)
def _calc_origin(self, image_shape, origin, background_shape):
""" Calculate coordinate of the input image with respect to the background.
Parameters
----------
image_shape : sequence
shape of the input image.
origin : array_like, sequence, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}
Position of the input image with respect to the background. Can be one of:
- 'center' - place the center of the input image on the center of the background and crop
the input image accordingly.
- 'top_left' - place the upper-left corner of the input image on the upper-left of the background
and crop the input image accordingly.
- 'top_right' - crop an image such that upper-right corners of
an image and the cropping box coincide
- 'bottom_left' - crop an image such that lower-left corners of
an image and the cropping box coincide
- 'bottom_right' - crop an image such that lower-right corners of
an image and the cropping box coincide
- 'random' - place the upper-left corner of the input image on the randomly sampled position
in the background. Position is sampled uniformly such that there is no need for cropping.
- other - sequence of ints or sequence of floats in [0, 1) interval;
place the upper-left corner of the input image on the given position in the background.
If `origin` is a sequence of floats in [0, 1), it defines a relative position of
the origin in a valid region of image.
background_shape : sequence
shape of the background image.
Returns
-------
sequence : calculated origin in the form (column, row)
"""
if isinstance(origin, str):
if origin == 'top_left':
origin = 0, 0
elif origin == 'top_right':
origin = (background_shape[0]-image_shape[0]+1, 0)
elif origin == 'bottom_left':
origin = (0, background_shape[1]-image_shape[1]+1)
elif origin == 'bottom_right':
origin = (background_shape[0]-image_shape[0]+1,
background_shape[1]-image_shape[1]+1)
elif origin == 'center':
origin = np.maximum(0, np.asarray(background_shape) - image_shape) // 2
elif origin == 'random':
origin = (np.random.randint(background_shape[0]-image_shape[0]+1),
np.random.randint(background_shape[1]-image_shape[1]+1))
else:
raise ValueError("If string, origin should be one of ['center', 'top_left', 'top_right', "
"'bottom_left', 'bottom_right', 'random']. Got '{}'.".format(origin))
elif all(0 <= elem < 1 for elem in origin):
region = ((background_shape[0]-image_shape[0]+1),
(background_shape[1]-image_shape[1]+1))
origin = np.asarray(origin) * region
elif not all(isinstance(elem, int) for elem in origin):
raise ValueError('If not a string, origin should be either a sequence of ints or sequence of '
'floats in [0, 1) interval. Got {}'.format(origin))
return np.asarray(origin, dtype=np.int)
@apply_parallel
def scale(self, image, factor, preserve_shape=False, origin='center', resample=0):
""" Scale the content of each image in the batch.
Resulting shape is obtained as original_shape * factor.
Parameters
-----------
factor : float, sequence
resulting shape is obtained as original_shape * factor
- float - scale all axes with the given factor
- sequence (factor_1, factort_2, ...) - scale each axis with the given factor separately
preserve_shape : bool
whether to preserve the shape of the image after scaling
origin : array-like, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}
Relevant only if `preserve_shape` is True.
If `scale` < 1, defines position of the scaled image with respect to the original one's shape.
If `scale` > 1, defines position of cropping box.
Can be one of:
- 'center' - place the center of the input image on the center of the background and crop
the input image accordingly.
- 'top_left' - place the upper-left corner of the input image on the upper-left of the background
and crop the input image accordingly.
- 'top_right' - crop an image such that upper-right corners of
an image and the cropping box coincide
- 'bottom_left' - crop an image such that lower-left corners of
an image and the cropping box coincide
- 'bottom_right' - crop an image such that lower-right corners of
an image and the cropping box coincide
- 'random' - place the upper-left corner of the input image on the randomly sampled position
in the background. Position is sampled uniformly such that there is no need for cropping.
- array_like - sequence of ints or sequence of floats in [0, 1) interval;
place the upper-left corner of the input image on the given position in the background.
If `origin` is a sequence of floats in [0, 1), it defines a relative position
of the origin in a valid region of image.
resample: int
Parameter passed to PIL.Image.resize. Interpolation order
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
Notes
-----
Using 'random' option for origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
Returns
-------
self
"""
original_shape = self._get_image_shape(image)
rescaled_shape = list(np.int32(np.ceil(np.asarray(original_shape)*factor)))
rescaled_image = image.resize(rescaled_shape, resample=resample)
if preserve_shape:
rescaled_image = self._preserve_shape(original_shape, rescaled_image, origin)
return rescaled_image
@apply_parallel
def crop(self, image, origin, shape, crop_boundaries=False):
""" Crop an image.
Extract image data from the window of the size given by `shape` and placed at `origin`.
Parameters
----------
origin : sequence, str
Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.
shape : sequence
crop size in the form of (rows, columns)
crop_boundaries : bool
If `True` then crop is got only from image's area. Shape of the crop might diverge with the passed one
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
Notes
-----
Using 'random' origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
"""
origin = self._calc_origin(shape, origin, image.size)
right_bottom = origin + shape
if crop_boundaries:
out_of_boundaries = origin < 0
origin[out_of_boundaries] = 0
image_shape = np.asarray(image.size)
out_of_boundaries = right_bottom > image_shape
right_bottom[out_of_boundaries] = image_shape[out_of_boundaries]
return image.crop((*origin, *right_bottom))
@apply_parallel
def put_on_background(self, image, background, origin, mask=None):
""" Put an image on a background at given origin
Parameters
----------
background : PIL.Image, np.ndarray of np.uint8
Blank background to put image on.
origin : sequence, str
Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.
mask : None, PIL.Image, np.ndarray of np.uint8
mask passed to PIL.Image.paste
Notes
-----
Using 'random' origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
"""
if not isinstance(background, PIL.Image.Image):
background = PIL.Image.fromarray(background)
else:
background = background.copy()
if not isinstance(mask, PIL.Image.Image):
mask = PIL.Image.fromarray(mask) if mask is not None else None
origin = list(self._calc_origin(self._get_image_shape(image), origin,
self._get_image_shape(background)))
background.paste(image, origin, mask)
return background
def _preserve_shape(self, original_shape, transformed_image, origin='center'):
""" Change the transformed image's shape by cropping and adding empty pixels to fit the shape of original image.
Parameters
----------
original_shape : sequence
transformed_image : np.ndarray
input_origin : array-like, {'center', 'top_left', 'random'}
Position of the scaled image with respect to the original one's shape.
- 'center' - place the center of the input image on the center of the background and crop
the input image accordingly.
- 'top_left' - place the upper-left corner of the input image on the upper-left of the background
and crop the input image accordingly.
- 'top_right' - crop an image such that upper-right corners of
an image and the cropping box coincide
- 'bottom_left' - crop an image such that lower-left corners of
an image and the cropping box coincide
- 'bottom_right' - crop an image such that lower-right corners of
an image and the cropping box coincide
- 'random' - place the upper-left corner of the input image on the randomly sampled position
in the background. Position is sampled uniformly such that there is no need for cropping.
- array_like - sequence of ints or sequence of floats in [0, 1) interval;
place the upper-left corner of the input image on the given position in the background.
If `origin` is a sequence of floats in [0, 1), it defines a relative position
of the origin in a valid region of image.
crop_origin: array-like, {'center', 'top_left', 'random'}
Position of crop from transformed image.
Has same values as `input_origin`.
Returns
-------
np.ndarray : image after described actions
"""
transformed_shape = self._get_image_shape(transformed_image)
if np.any(np.array(transformed_shape) < np.array(original_shape)):
n_channels = len(transformed_image.getbands())
if n_channels == 1:
background = np.zeros(original_shape, dtype=np.uint8)
else:
background = np.zeros((*original_shape, n_channels), dtype=np.uint8)
return self._put_on_background_(transformed_image, background, origin)
return self._crop_(transformed_image, origin, original_shape, True)
@apply_parallel
def filter(self, image, mode, *args, **kwargs):
""" Filters an image. Calls ``image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))``.
For more details see `ImageFilter <http://pillow.readthedocs.io/en/stable/reference/ImageFilter.html>_`.
Parameters
----------
mode : str
Name of the filter.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))
@apply_parallel
def transform(self, image, *args, **kwargs):
""" Calls ``image.transform(*args, **kwargs)``.
For more information see
`<http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform>_`.
Parameters
----------
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
size = kwargs.pop('size', self._get_image_shape(image))
return image.transform(*args, size=size, **kwargs)
@apply_parallel
def resize(self, image, size, *args, **kwargs):
""" Calls ``image.resize(*args, **kwargs)``.
For more details see `<https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize>_`.
Parameters
----------
size : tuple
the resulting size of the image. If one of the components of tuple is None,
corresponding dimension will be proportionally resized.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if size[0] is None and size[1] is None:
raise ValueError('At least one component of the parameter "size" must be a number.')
if size[0] is None:
new_size = (int(image.size[0] * size[1] / image.size[1]), size[1])
elif size[1] is None:
new_size = (size[0], int(image.size[1] * size[0] / image.size[0]))
else:
new_size = size
return image.resize(new_size, *args, **kwargs)
@apply_parallel
def shift(self, image, offset, mode='const'):
""" Shifts an image.
Parameters
----------
offset : (Number, Number)
mode : {'const', 'wrap'}
How to fill borders
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if mode == 'const':
image = image.transform(size=image.size,
method=PIL.Image.AFFINE,
data=(1, 0, -offset[0], 0, 1, -offset[1]))
elif mode == 'wrap':
image = PIL.ImageChops.offset(image, *offset)
else:
raise ValueError("mode must be one of ['const', 'wrap']")
return image
@apply_parallel
def pad(self, image, *args, **kwargs):
""" Calls ``PIL.ImageOps.expand``.
For more details see `<http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.expand>`_.
Parameters
----------
offset : sequence
Size of the borders in pixels. The order is (left, top, right, bottom).
mode : {'const', 'wrap'}
Filling mode
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return PIL.ImageOps.expand(image, *args, **kwargs)
@apply_parallel
def rotate(self, image, *args, **kwargs):
""" Rotates an image.
kwargs are passed to PIL.Image.rotate
Parameters
----------
angle: Number
In degrees counter clockwise.
resample: int
Interpolation order
expand: bool
Whether to expand the output to hold the whole image. Default is False.
center: (Number, Number)
Center of rotation. Default is the center of the image.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return image.rotate(*args, **kwargs)
@apply_parallel
def flip(self, image, mode='lr'):
""" Flips image.
Parameters
----------
mode : {'lr', 'ud'}
- 'lr' - apply the left/right flip
- 'ud' - apply the upside/down flip
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if mode == 'lr':
return PIL.ImageOps.mirror(image)
return PIL.ImageOps.flip(image)
@apply_parallel
def invert(self, image, channels='all'):
""" Invert givn channels.
Parameters
----------
channels : int, sequence
Indices of the channels to invert.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if channels == 'all':
image = PIL.ImageChops.invert(image)
else:
bands = list(image.split())
channels = (channels,) if isinstance(channels, Number) else channels
for channel in channels:
bands[channel] = PIL.ImageChops.invert(bands[channel])
image = PIL.Image.merge('RGB', bands)
return image
@apply_parallel
def salt(self, image, p_noise=.015, color=255, size=(1, 1)):
""" Set random pixel on image to givan value.
Every pixel will be set to ``color`` value with probability ``p_noise``.
Parameters
----------
p_noise : float
Probability of salting a pixel.
color : float, int, sequence, callable
Color's value.
- int, float, sequence -- value of color
- callable -- color is sampled for every chosen pixel (rules are the same as for int, float and sequence)
size : int, sequence of int, callable
Size of salt
- int -- square salt with side ``size``
- sequence -- recangular salt in the form (row, columns)
- callable -- size is sampled for every chosen pixel (rules are the same as for int and sequence)
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
mask_size = np.asarray(self._get_image_shape(image))
mask_salt = np.random.binomial(1, p_noise, size=mask_size).astype(bool)
image = np.array(image)
if isinstance(size, (tuple, int)) and size in [1, (1, 1)] and not callable(color):
image[mask_salt] = color
else:
size_lambda = size if callable(size) else lambda: size
color_lambda = color if callable(color) else lambda: color
mask_salt = np.where(mask_salt)
for i in range(len(mask_salt[0])):
current_size = size_lambda()
current_size = (current_size, current_size) if isinstance(current_size, Number) else current_size
left_top = np.asarray((mask_salt[0][i], mask_salt[1][i]))
right_bottom = np.minimum(left_top + current_size, self._get_image_shape(image))
image[left_top[0]:right_bottom[0], left_top[1]:right_bottom[1]] = color_lambda()
return PIL.Image.fromarray(image)
@apply_parallel
def clip(self, image, low=0, high=255):
""" Truncate image's pixels.
Parameters
----------
low : int, float, sequence
Actual pixel's value is equal max(value, low). If sequence is given, then its length must coincide
with the number of channels in an image and each channel is thresholded separately
high : int, float, sequence
Actual pixel's value is equal min(value, high). If sequence is given, then its length must coincide
with the number of channels in an image and each channel is thresholded separately
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
if isinstance(low, Number):
low = tuple([low]*3)
if isinstance(high, Number):
high = tuple([high]*3)
high = PIL.Image.new('RGB', image.size, high)
low = PIL.Image.new('RGB', image.size, low)
return PIL.ImageChops.lighter(PIL.ImageChops.darker(image, high), low)
@apply_parallel
def enhance(self, image, layout='hcbs', factor=(1, 1, 1, 1)):
""" Apply enhancements from PIL.ImageEnhance to the image.
Parameters
----------
layout : str
defines layout of operations, default is `hcbs`:
h - color
c - contrast
b - brightness
s - sharpness
factor : float or tuple of float
factor of enhancement for each operation listed in `layout`.
"""
enhancements = {
'h': 'Color',
'c': 'Contrast',
'b': 'Brightness',
's': 'Sharpness'
}
if isinstance(factor, float):
factor = (factor,) * len(layout)
if len(layout) != len(factor):
raise ValueError("'layout' and 'factor' should be of same length!")
for alias, multiplier in zip(layout, factor):
enhancement = enhancements.get(alias)
if enhancement is None:
raise ValueError('Unknown enhancement alias: ', alias)
image = getattr(PIL.ImageEnhance, enhancement)(image).enhance(multiplier)
return image
@apply_parallel
def multiply(self, image, multiplier=1., clip=False, preserve_type=False):
""" Multiply each pixel by the given multiplier.
Parameters
----------
multiplier : float, sequence
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
multiplier = np.float32(multiplier)
if isinstance(image, PIL.Image.Image):
if preserve_type is False:
warnings.warn("Note that some info might be lost during `multiply` transformation since PIL.image "
"stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or "
"consider using `to_array` action before multiplication.")
return PIL.Image.fromarray(np.clip(multiplier*np.asarray(image), 0, 255).astype(np.uint8))
dtype = image.dtype if preserve_type else np.float
if clip:
image = np.clip(multiplier*image, 0, 255 if dtype == np.uint8 else 1.)
else:
image = multiplier * image
return image.astype(dtype)
@apply_parallel
def add(self, image, term=1., clip=False, preserve_type=False):
""" Add term to each pixel.
Parameters
----------
term : float, sequence
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
term = np.float32(term)
if isinstance(image, PIL.Image.Image):
return PIL.Image.fromarray(np.clip(term+np.asarray(image), 0, 255).astype(np.uint8))
dtype = image.dtype if preserve_type else np.float
if clip:
image = np.clip(term+image, 0, 255 if dtype == np.uint8 else 1.)
else:
image = term + image
return image.astype(dtype)
@apply_parallel
def pil_convert(self, image, mode="L"):
""" Convert image. Actually calls ``image.convert(mode)``.
Parameters
----------
mode : str
Pass 'L' to convert to grayscale
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return image.convert(mode)
@apply_parallel
def posterize(self, image, bits=4):
""" Posterizes image.
More concretely, it quantizes pixels' values so that they have``2^bits`` colors
Parameters
----------
bits : int
Number of bits used to store a color's component.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
return PIL.ImageOps.posterize(image, bits)
@apply_parallel
def cutout(self, image, origin, shape, color):
""" Fills given areas with color
.. note:: It is assumed that ``origins``, ``shapes`` and ``colors`` have the same length.
Parameters
----------
origin : sequence, str
Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.
shape : sequence, int
Shape of a filled box. Can be one of:
- sequence - crop size in the form of (rows, columns)
- int - shape has squared form
color : sequence, number
Color of a filled box. Can be one of:
- sequence - (r,g,b) form
- number - grayscale
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
Notes
-----
Using 'random' origin with `src` as list with multiple elements will not result in same crop for each
element, as origin will be sampled independently for each `src` element.
To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.
"""
image = image.copy()
shape = (shape, shape) if isinstance(shape, Number) else shape
origin = self._calc_origin(shape, origin, self._get_image_shape(image))
color = (color, color, color) if isinstance(color, Number) else color
image.paste(PIL.Image.new('RGB', tuple(shape), tuple(color)), tuple(origin))
return image
def _assemble_patches(self, patches, *args, dst, **kwargs):
""" Assembles patches after parallel execution.
Parameters
----------
patches : sequence
Patches to gather. pathces.shape must be like (batch.size, patches_i, patch_height, patch_width, n_channels)
dst : str
Component to put patches in.
"""
_ = args, kwargs
new_items = np.concatenate(patches)
setattr(self, dst, new_items)
return self
@action
@inbatch_parallel(init='indices', post='_assemble_patches')
def split_to_patches(self, ix, patch_shape, stride=1, drop_last=False, src='images', dst=None):
""" Splits image to patches.
Small images with the same shape (``patch_shape``) are cropped from the original one with stride ``stride``.
Parameters
----------
patch_shape : int, sequence
Patch's shape in the from (rows, columns). If int is given then patches have square shape.
stride : int, square
Step of the moving window from which patches are cropped. If int is given then the window has square shape.
drop_last : bool
Whether to drop patches whose window covers area out of the image.
If False is passed then these patches are cropped from the edge of an image. See more in tutorials.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
_ = dst
image = self.get(ix, src)
image_shape = self._get_image_shape(image)
image = np.array(image)
stride = (stride, stride) if isinstance(stride, Number) else stride
patch_shape = (patch_shape, patch_shape) if isinstance(patch_shape, Number) else patch_shape
patches = []
def _iterate_columns(row_from, row_to):
column = 0
while column < image_shape[1]-patch_shape[1]+1:
patches.append(PIL.Image.fromarray(image[column:column+patch_shape[1], row_from:row_to]))
column += stride[1]
if not drop_last and column + patch_shape[1] != image_shape[1]:
patches.append(PIL.Image.fromarray(image[image_shape[1]-patch_shape[1]:image_shape[1],
row_from:row_to]))
row = 0
while row < image_shape[0]-patch_shape[0]+1:
_iterate_columns(row, row+patch_shape[0])
row += stride[0]
if not drop_last and row + patch_shape[0] != image_shape[0]:
_iterate_columns(image_shape[0]-patch_shape[0], image_shape[0])
array = np.empty(len(patches), dtype=object)
for i, patch in enumerate(patches):
array[i] = patch
return array
@apply_parallel
def additive_noise(self, image, noise, clip=False, preserve_type=False):
""" Add additive noise to an image.
Parameters
----------
noise : callable
Distribution. Must have ``size`` parameter.
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)
return self._add_(image, noise, clip, preserve_type)
@apply_parallel
def multiplicative_noise(self, image, noise, clip=False, preserve_type=False):
""" Add multiplicative noise to an image.
Parameters
----------
noise : callable
Distribution. Must have ``size`` parameter.
clip : bool
whether to force image's pixels to be in [0, 255] or [0, 1.]
preserve_type : bool
Whether to preserve ``dtype`` of transformed images.
If ``False`` is given then the resulting type will be ``np.float``.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)
return self._multiply_(image, noise, clip, preserve_type)
@apply_parallel
def elastic_transform(self, image, alpha, sigma, **kwargs):
""" Deformation of images as described by Simard, Steinkraus and Platt, `Best Practices for Convolutional
Neural Networks applied to Visual Document Analysis <http://cognitivemedium.com/assets/rmnist/Simard.pdf>_`.
Code slightly differs from `<https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`_.
Parameters
----------
alpha : number
maximum of vectors' norms.
sigma : number
Smooth factor.
src : str
Component to get images from. Default is 'images'.
dst : str
Component to write images to. Default is 'images'.
p : float
Probability of applying the transform. Default is 1.
"""
image = np.array(image)
# full shape is needed
shape = image.shape
if len(shape) == 2:
image = image[..., None]
shape = image.shape
kwargs.setdefault('mode', 'constant')
kwargs.setdefault('cval', 0)
column_shift = gaussian_filter(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha
row_shift = gaussian_filter(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha
row, column, channel = np.meshgrid(range(shape[0]), range(shape[1]), range(shape[2]))
indices = (column + column_shift, row + row_shift, channel)
distored_image = map_coordinates(image, indices, order=1, mode='reflect')
if shape[-1] == 1:
return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape))[..., 0])
return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape)))
|
FWCore/Services/test/test_resource_succeed_cfg.py | ckamtsikis/cmssw | 852 | 12681237 | <filename>FWCore/Services/test/test_resource_succeed_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("EmptySource")
process.add_(cms.Service("ResourceEnforcer",
maxVSize = cms.untracked.double(1.0),
maxRSS = cms.untracked.double(1.0),
maxTime = cms.untracked.double(1.0)))
process.thing = cms.EDProducer("ThingProducer")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.p = cms.Path(process.thing)
|
pyNastran/dev/bdf_vectorized/cards/elements/solid/ctetra4.py | luzpaz/pyNastran | 293 | 12681241 | <reponame>luzpaz/pyNastran<gh_stars>100-1000
from itertools import count
import numpy as np
from numpy import zeros, arange, dot, cross, searchsorted, array, eye, ones
#from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import integer
from pyNastran.dev.bdf_vectorized.cards.elements.solid.solid_element import SolidElement
def volume4(xyz1, xyz2, xyz3, xyz4):
r"""
Gets the volume, :math:`V`, of the tetrahedron.
.. math:: V = \frac{(a-d) \cdot \left( (b-d) \times (c-d) \right) }{6}
"""
V = -dot((xyz1 - xyz4), cross(xyz2 - xyz4, xyz3 - xyz4)) / 6.
#V = 1/6. * np.det(
#np.hstack(
#[1., 1., 1., 1.],
#np.vstack(n1, n2, n3, n4).T,
#),
#)
return V
class CTETRA4(SolidElement):
type = 'CTETRA4'
nnodes = 4
def __init__(self, model):
"""
Defines the CTETRA object.
Parameters
----------
model : BDF
the BDF object
"""
SolidElement.__init__(self, model)
def add_card(self, card, comment=''):
i = self.i
eid = integer(card, 1, 'element_id')
if comment:
self.set_comment(eid, comment)
#: Element ID
self.element_id[i] = eid
#: Property ID
self.property_id[i] = integer(card, 2, 'property_id')
#: Node IDs
nids = array([
integer(card, 3, 'node_id_1'),
integer(card, 4, 'node_id_2'),
integer(card, 5, 'node_id_3'),
integer(card, 6, 'node_id_4'),
], dtype='int32')
assert 0 not in nids, '%s\n%s' % (nids, card)
self.node_ids[i, :] = nids
assert len(card) == 7, 'len(CTETRA4 card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
nid_map = maps['node']
pid_map = maps['property']
for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id, self.node_ids)):
print(self.print_card(i))
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
self.node_ids[i, 2] = nid_map[nids[2]]
self.node_ids[i, 3] = nid_map[nids[3]]
def get_mass_matrix(self, i, model, positions, index0s):
r"""
A mass matrix is a discrete representation of a continuous mass distribution.
To compute our mass matrix for a tetrahedral element with linear shape
functions we need the formula (pp. 266 in Cook)
a!b!c!d!
\int_V N_1^a N_2^b N_3^c N_4^d dV = 6V -------------------------- (**)
(3 + a + b +c + d)!
A consistent element mass matrix (pp. 376 Cook) is defined as
m = \int_V \rho N^T N dV (***)
This equation can be derived from work balance, the details of which is unimportant
here (look Cook pp. 375-376 for details).
Assumping \rho is constant over each tetrahedral element and using the linear shape
functions the above definition (***) results in
|N_1|
m = \rho \int_V |N_2| |N_1 N_2 N_3 N_4| dV
|N_3|
|N_4|
|(N_1 N_1) (N_1 N_2) (N_1 N_3) (N_1 N_4)|
m = \rho \int_V |(N_2 N_1) (N_2 N_2) (N_2 N_3) (N_2 N_4)| dV
|(N_3 N_1) (N_3 N_2) (N_3 N_3) (N_3 N_4)|
|(N_4 N_1) (N_4 N_2) (N_4 N_3) (N_4 N_4)|
by (**)
| 2 1 1 1|
m = \rho V/20 | 1 2 1 1| (****)
| 1 1 2 1|
| 1 1 1 2|
V
m_ij = \rho --- (1+delta_ij)
20
in 3D this means that for the tetrahedral element
| 2 2 2 1 1 1 1 1 1 1 1 1 |
| 2 2 2 1 1 1 1 1 1 1 1 1 |
| 2 2 2 1 1 1 1 1 1 1 1 1 |
| |
| 1 1 1 2 2 2 1 1 1 1 1 1 |
| 1 1 1 2 2 2 1 1 1 1 1 1 |
V | 1 1 1 2 2 2 1 1 1 1 1 1 |
Me = \rho --- | |
20 | 1 1 1 1 1 1 2 2 2 1 1 1 |
| 1 1 1 1 1 1 2 2 2 1 1 1 |
| 1 1 1 1 1 1 2 2 2 1 1 1 |
| |
| 1 1 1 1 1 1 1 1 1 2 2 2 |
| 1 1 1 1 1 1 1 1 1 2 2 2 |
| 1 1 1 1 1 1 1 1 1 2 2 2 |
Notice that in order to obtain the global/system mass matrix an assembly similar to the
stiffness matrix assembly must be carried out. Further, the global M matrix will
have the same sub-block pattern as the global K matrix.
A consistent mass matrix is often not used in computer graphics. Instead and
ad-hoc approach named ``lumped'' mass matrix is applied.
The lumped mass matrix is obtained by placing particle masses at the nodes.
This corresponds to shifting all the masses in the rows of (****) onto the
diagonal. In 3D this yields the element mass matrix
| 1 0 0 0 0 0 0 0 0 0 0 0 |
| 0 1 0 0 0 0 0 0 0 0 0 0 |
| 0 0 1 0 0 0 0 0 0 0 0 0 |
| |
| 0 0 0 1 0 0 0 0 0 0 0 0 |
| 0 0 0 0 1 0 0 0 0 0 0 0 |
V | 0 0 0 0 0 1 0 0 0 0 0 0 |
Me = \rho --- | |
4 | 0 0 0 0 0 0 1 0 0 0 0 0 |
| 0 0 0 0 0 0 0 1 0 0 0 0 |
| 0 0 0 0 0 0 0 0 1 0 0 0 |
| |
| 0 0 0 0 0 0 0 0 0 1 0 0 |
| 0 0 0 0 0 0 0 0 0 0 1 0 |
| 0 0 0 0 0 0 0 0 0 0 0 1 |
Thus a lumped mass matrix is diagonal whereas a consistent mass matrix
is not. Observe that the global mass matrix would also diagonal and the
assembly is simplified to an iteration over all tetrahedra, while
incementing the nodal mass by one fourth of the tetrahedral mass.
for each node n
mass(n) = 0
next n
for each tetrahedron e
mass(n_i) += \rho_e Ve / 4
mass(n_j) += \rho_e Ve / 4
mass(n_k) += \rho_e Ve / 4
mass(n_m) += \rho_e Ve / 4
next e
where n_i,n_j,n_k and n_m are the four nodes of the e'th tetrahedron.
The advantage of lumping is less storage and higher performance. On the downside
lumping introduces a discontinouty in the displacement field.
Obrien.shen state that the errors in lumping is negligeble for small-size course
meshes used in computer graphics. However, for finer meshes the errors becomes
noticeable.
There do exist other approaches for computing mass matrices, even methods which
combine other methods. We refer the interested reader to Cook for more details. Here
we have limited our selfes to the two most common methods.
It is worthwhile to notice that under the reasonable assumptions that V and \rho are
positive for all elements both the element mass matrices and the global mass matrices
are symmetric positive definite matrices.
http://image.diku.dk/svn/OpenTissue/archieve/silcowitz/OpenTissue/dynamics/fem/fem_compute_mass.h
"""
is_lumped = True
is_consistent = False
nnodes = 4
ndof = 3 * nnodes
pid = self.property_id[i]
rho = self.model.elements.properties_solid.psolid.get_density_by_property_id(pid)[0]
n0, n1, n2, n3 = self.node_ids[i, :]
V = volume4(positions[self.node_ids[i, 0]],
positions[self.node_ids[i, 1]],
positions[self.node_ids[i, 2]],
positions[self.node_ids[i, 3]])
mass = rho * V
if is_lumped:
mi = mass / 4.
nnodes = 4
M = eye(ndof, dtype='float32')
else:
mi = mass / 20.
M = ones((ndof, ndof), dtype='float32')
for i in range(nnodes):
j = i * 3
M[j:j+3, j:j+3] = 2.
M *= mi
dofs, nijv = self.get_dofs_nijv(index0s, n0, n1, n2, n3)
return M, dofs, nijv
def get_stiffness_matrices(self, model, positions, index0s):
out = []
# volume coordinates
# FEM: Volume I (Zienkiewicz) p.186
volume6 = volume * 6
L1 = (a1 + b1 * x + c1 * y + d1 * z) / volume6
L2 = (a2 + b2 * x + c2 * y + d2 * z) / volume6
L3 = (a3 + b3 * x + c3 * y + d3 * z) / volume6
# FEM: Volume I (Zienkiewicz) p.186
#x = L1*x1 + L2*x2 + L3*x3 + L4*x4
#y = L1*y1 + L2*y2 + L3*y3 + L4*y4
#z = L1*z1 + L2*z2 + L3*z3 + L4*z4
#1 = L1 + L2 + L3 + L4
for i in range(self.n):
K, dofs, nijv = self.get_stiffness_matrix(
i, model, self.positions, index0s)
out.append(K, dofs, nijv)
self.add_stiffness(K, dofs, nijv)
def get_stiffness_matrix(self, i, model, positions, index0s):
nnodes = 4
ndof = 3 * nnodes
pid = self.property_id[i]
prop = self.model.elements.properties_solid.psolid
rho = prop.get_density_by_property_id(pid)[0]
n0, n1, n2, n3 = self.node_ids[i, :]
xyz1 = positions[self.node_ids[i, 0]]
xyz2 = positions[self.node_ids[i, 1]]
xyz3 = positions[self.node_ids[i, 2]]
xyz4 = positions[self.node_ids[i, 3]]
vol = volume4(xyz1, xyz2, xyz3, xyz4)
stiffness = rho * vol
ki = stiffness / 4.
nnodes = 4
K = eye(ndof, dtype='float32') # not done...
u = 0.
v = 0.
#wts = [-0.57735, 0.57735]
#for u in wts:
#for v in wts:
Ji = array([
[v - 1.0, -v + 1.0, v + 1.0, -v - 1.0],
[u - 1.0, -u - 1.0, u + 1.0, -u + 1.0],
]) / 4.
#J = Ji @ xy
#Jinv = np.linalg.inv(J)
#det_j = np.linalg.det(J)
#darea = det_j
#B1 = Jinv @ Ji
#print('B1 =\n', B1)
#N1x, N2x, N3x, N4x = B1[0, :]
#N1y, N2y, N3y, N4y = B1[1, :]
#print('Nix =', B1[0, :])
vol_matrix = np.hstack(
[1., 1., 1., 1.],
np.vstack([xyz1, xyz2, xyz3, xyz4]).T,
)
ivol_matrix = np.linalg.inv(vol_matrix)
a1, b1, c1 = ivol_matrix[0, 1:]
a2, b2, c2 = ivol_matrix[1, 1:]
a3, b3, c3 = ivol_matrix[2, 1:]
a4, b4, c4 = ivol_matrix[3, 1:]
#N1x, N2x, N3x, N4x = v - 1.0, -v + 1.0, v + 1.0, -v - 1.0
#N1y, N2y, N3y, N4y = u - 1.0, -u - 1.0, u + 1.0, -u + 1.0
B = array([
[a1, 0., 0., a2, 0., 0., a3, 0., 0., a4, 0., 0.],
[0., b1, 0., 0., b2, 0., 0., b3, 0., 0., b4, 0.],
[0., 0., c1, 0., 0., c2, 0., 0., c3, 0., 0., c4],
[b1, a1, 0., b2, a2, 0., b3, a3, 0., b4, a4, 0.],
[0., c1, b1, 0., c2, b2, 0., c3, b3, 0., c4, b4],
[c1, 0., a1, c2, 0., a2, c3, 0., a3, c4, 0., a4],
]) / (6 * vol)
#N = array([
#[N1, 0., 0., N2, 0., 0., N3, 0., N4, 0., 0.],
#[0., N1, 0., 0., N2, 0., 0., N3, 0., N4, 0.],
#[0., 0., N1, 0., 0., N2, 0., 0., N3, 0., N4],
#])
#print('B =\n', B)
#E = 1.0
#nu = 0.25
mid1 = prop.material_id[0]
mat = self.model.materials.get_solid_material(mid1)
print(mat)
E = mat.E[0]
nu = mat.nu[0]
G = mat.G[0]
# [sigma] = [C] * [epsilon]
#denom = 1 - nu**2
#C = np.zeros((6, 6), dtype='float64')
#outside = E / ((1 + nu) * (1 - 2 * nu))
#C[0, 0] = C[1, 1] = C[2, 2] = (1 - nu) * outside
#C[3, 3] = C[4, 4] = C[5, 5] = (0.5 - nu) * outside
if 0:
## [stress] = [E] [strain]
#emat = np.zeros((5, 5), dtype='float64')
#emat[0, 0] = emat[1, 1] = E / denom
#emat[1, 0] = emat[0, 1] = (E * nu) / denom
#emat[2, 2] = emat[3, 3] = emat[4, 4] = G
## [M] = [D] * [bending]
#dmat = np.zeros((5, 5), dtype='float64')
#D = E * h**3 / (12 * denom)
#dmat[0, 0] = dmat[1, 1] = D
#dmat[1, 0] = dmat[0, 1] = D * nu
#dmat[2, 2] = D * (1. - nu) / 2.
#dmat[3, 3] = emat[4, 4] = G * h
# FEM: Volume I (Zienkiewicz) p.132
dmat2 = np.array(6, 6)
dmat2[0, 0] = dmat2[1, 1] = dmat2[2, 2] = 1 - nu
dmat2[0, 1] = dmat2[0, 2] = dmat2[1, 0] = dmat2[2, 0] = nu
dmat2[3, 3] = dmat2[4, 4] = dmat[5, 5] = (1 - 2 * nu) / 2.
dmat2 *= E / ((1 + nu) * (1 - 2 * nu))
#print('C =\n', C)
#print('thickness =', thickness)
Ki = B.T @ C @ B
#print('Ki(%s,%s) =%s\n' % (u, v, Ki))
#print('Ki(%s,%s) =\n%s\n' % (u, v, list_print(Ki, '%.4e')))
K += Ki
#K *= ki
dofs, nijv = self.get_dofs_nijv(index0s, n0, n1, n2, n3)
return K, dofs, nijv
def get_dofs_nijv(self, index0s, n0, n1, n2, n3):
i0 = index0s[n0]
i1 = index0s[n1]
i2 = index0s[n2]
i3 = index0s[n3]
dofs = array([
i0, i0+1, i0+2,
i1, i1+1, i1+2,
i2, i2+1, i2+2,
i3, i3+1, i3+2,
], 'int32')
nijv = [
# translation
(n0, 1), (n0, 2), (n0, 3),
(n1, 1), (n1, 2), (n1, 3),
(n2, 1), (n2, 2), (n2, 3),
(n3, 1), (n3, 2), (n3, 3),
]
return dofs, nijv
def _verify(self, xref=True):
eid = self.eid
pid = self.Pid()
nids = self.node_ids
assert isinstance(eid, int)
assert isinstance(pid, int)
for i, nid in enumerate(nids):
assert isinstance(nid, int), 'nid%i is not an integer; nid=%s' %(i, nid)
if xref:
c = self.centroid()
v = self.volume()
assert isinstance(v, float)
for i in range(3):
assert isinstance(c[i], float)
def get_node_indicies(self, i=None):
if i is None:
i1 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 0])
i2 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 1])
i3 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 2])
i4 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 3])
else:
i1 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 0])
i2 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 1])
i3 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 2])
i4 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 3])
return i1, i2, i3, i4
def _get_node_locations_by_index(self, i, xyz_cid0):
"""
:param i: None or an array of node IDs
:param xyz_cid0: the node positions as a dictionary
"""
grid = self.model.grid
get_node_index_by_node_id = self.model.grid.get_node_index_by_node_id
node_ids = self.node_ids
#msg = ', which is required by %s' % self.type
i1, i2, i3, i4 = self.get_node_indicies(i)
n1 = xyz_cid0[i1, :]
n2 = xyz_cid0[i2, :]
n3 = xyz_cid0[i3, :]
n4 = xyz_cid0[i4, :]
return n1, n2, n3, n4
def get_volume_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the volume for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None -> all
the elements to consider
xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto)
the positions of the GRIDs in CID=0
total : bool; default=False
should the volume be summed
"""
n1, n2, n3, n4 = self._get_node_locations_by_element_id(element_id, xyz_cid0)
V = zeros(n1.shape[0], self.model.float_fmt)
for i, n1i, n2i, n3i, n4i in zip(count(), n1, n2, n3, n4):
V[i] = volume4(n1i, n2i, n3i, n4i)
i += 1
return V
def get_mass_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the mass for one or more CTETRA elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None -> all
the elements to consider
xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto)
the positions of the GRIDs in CID=0
total : bool; default=False
should the centroid be summed
"""
if element_id is None:
element_id = self.element_id
if xyz_cid0 is None:
xyz_cid0 = self.model.grid.get_position_by_node_index()
V = self.get_volume_by_element_id(element_id, xyz_cid0)
mid = self.model.properties_solid.get_material_id_by_property_id(self.property_id)
rho = self.model.materials.get_density_by_material_id(mid)
mass = V * rho
if total:
mass = mass.sum()
return mass
def get_centroid_volume(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the centroid and volume for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None -> all
the elements to consider
xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto)
the positions of the GRIDs in CID=0
:param total: should the volume be summed; centroid be averaged (default=False)
.. seealso:: CTETRA4.volume() and CTETRA4.centroid for more information.
"""
n1, n2, n3, n4 = self._get_node_locations_by_element_id(element_id, xyz_cid0)
n = len(element_id)
volume = zeros(n, self.model.float_fmt)
i = 0
for n1i, n2i, n3i, n4i in zip(n1, n2, n3, n4):
volume[i] = volume4(n1i, n2i, n3i, n4i)
i += 1
centroid = (n1 + n2 + n3 + n4) / 4.0
if total:
centroid = centroid.mean()
volume = abs(volume).sum()
else:
volume = abs(volume)
assert volume.min() > 0.0, 'volume.min() = %f' % volume.min()
return centroid, volume
def get_centroid_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the centroid for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None -> all
the elements to consider
xyz_cid0 : dict[int node_id] : (3, ) float ndarray xyz (default=None -> auto)
the positions of the GRIDs in CID=0
total : bool; default=False
should the centroid be averaged
"""
n1, n2, n3, n4 = self._get_node_locations_by_element_id(element_id, xyz_cid0)
centroid = (n1 + n2 + n3 + n4) / 4.0
if total:
centroid = centroid.mean(axis=0)
return centroid
#def get_face_nodes(self, nid, nid_opposite):
#raise NotImplementedError()
#nids = self.node_ids[:4]
#indx = nids.index(nid_opposite)
#nids.pop(indx)
#return nids
def write_card(self, bdf_file, size=8, element_id=None):
if self.n:
if element_id is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, element_id)
if size == 16 or max(self.element_id[i].max(), self.property_id[i].max(),
self.node_ids[i, :].max()) > 1000000000:
msg = ('CTETRA %16i%16i%16i%16i\n'
' %16i%16i\n')
for (eid, pid, n) in zip(self.element_id[i], self.property_id[i], self.node_ids[i, :]):
if eid in self._comments:
bdf_file.write(self._comments[eid])
data = [eid, pid] + list(n)
bdf_file.write(msg)
else:
msg = 'CTETRA %8i%8i%8i%8i%8i%8i\n'
for (eid, pid, n) in zip(self.element_id[i], self.property_id[i], self.node_ids[i, :]):
if eid in self._comments:
bdf_file.write(self._comments[eid])
data = [eid, pid] + list(n)
bdf_file.write(msg % tuple(data))
|
picotui/screen.py | timeopochin/picotui | 739 | 12681253 | import os
import signal
class Screen:
@staticmethod
def wr(s):
# TODO: When Python is 3.5, update this to use only bytes
if isinstance(s, str):
s = bytes(s, "utf-8")
os.write(1, s)
@staticmethod
def wr_fixedw(s, width):
# Write string in a fixed-width field
s = s[:width]
Screen.wr(s)
Screen.wr(" " * (width - len(s)))
# Doesn't work here, as it doesn't advance cursor
#Screen.clear_num_pos(width - len(s))
@staticmethod
def cls():
Screen.wr(b"\x1b[2J")
@staticmethod
def goto(x, y):
# TODO: When Python is 3.5, update this to use bytes
Screen.wr("\x1b[%d;%dH" % (y + 1, x + 1))
@staticmethod
def clear_to_eol():
Screen.wr(b"\x1b[0K")
# Clear specified number of positions
@staticmethod
def clear_num_pos(num):
if num > 0:
Screen.wr("\x1b[%dX" % num)
@staticmethod
def attr_color(fg, bg=-1):
if bg == -1:
bg = fg >> 4
fg &= 0xf
# TODO: Switch to b"%d" % foo when py3.5 is everywhere
if bg is None:
if (fg > 8):
Screen.wr("\x1b[%d;1m" % (fg + 30 - 8))
else:
Screen.wr("\x1b[%dm" % (fg + 30))
else:
assert bg <= 8
if (fg > 8):
Screen.wr("\x1b[%d;%d;1m" % (fg + 30 - 8, bg + 40))
else:
Screen.wr("\x1b[0;%d;%dm" % (fg + 30, bg + 40))
@staticmethod
def attr_reset():
Screen.wr(b"\x1b[0m")
@staticmethod
def cursor(onoff):
if onoff:
Screen.wr(b"\x1b[?25h")
else:
Screen.wr(b"\x1b[?25l")
def draw_box(self, left, top, width, height):
# Use http://www.utf8-chartable.de/unicode-utf8-table.pl
# for utf-8 pseudographic reference
bottom = top + height - 1
self.goto(left, top)
# "┌"
self.wr(b"\xe2\x94\x8c")
# "─"
hor = b"\xe2\x94\x80" * (width - 2)
self.wr(hor)
# "┐"
self.wr(b"\xe2\x94\x90")
self.goto(left, bottom)
# "└"
self.wr(b"\xe2\x94\x94")
self.wr(hor)
# "┘"
self.wr(b"\xe2\x94\x98")
top += 1
while top < bottom:
# "│"
self.goto(left, top)
self.wr(b"\xe2\x94\x82")
self.goto(left + width - 1, top)
self.wr(b"\xe2\x94\x82")
top += 1
def clear_box(self, left, top, width, height):
# doesn't work
#self.wr("\x1b[%s;%s;%s;%s$z" % (top + 1, left + 1, top + height, left + width))
s = b" " * width
bottom = top + height
while top < bottom:
self.goto(left, top)
self.wr(s)
top += 1
def dialog_box(self, left, top, width, height, title=""):
self.clear_box(left + 1, top + 1, width - 2, height - 2)
self.draw_box(left, top, width, height)
if title:
#pos = (width - len(title)) / 2
pos = 1
self.goto(left + pos, top)
self.wr(title)
@classmethod
def init_tty(cls):
import tty, termios
cls.org_termios = termios.tcgetattr(0)
tty.setraw(0)
@classmethod
def deinit_tty(cls):
import termios
termios.tcsetattr(0, termios.TCSANOW, cls.org_termios)
@classmethod
def enable_mouse(cls):
# Mouse reporting - X10 compatibility mode
cls.wr(b"\x1b[?1000h")
@classmethod
def disable_mouse(cls):
# Mouse reporting - X10 compatibility mode
cls.wr(b"\x1b[?1000l")
@classmethod
def screen_size(cls):
import select
cls.wr(b"\x1b[18t")
res = select.select([0], [], [], 0.2)[0]
if not res:
return (80, 24)
resp = os.read(0, 32)
assert resp.startswith(b"\x1b[8;") and resp[-1:] == b"t"
vals = resp[:-1].split(b";")
return (int(vals[2]), int(vals[1]))
# Set function to redraw an entire (client) screen
# This is called to restore original screen, as we don't save it.
@classmethod
def set_screen_redraw(cls, handler):
cls.screen_redraw = handler
@classmethod
def set_screen_resize(cls, handler):
signal.signal(signal.SIGWINCH, lambda sig, stk: handler(cls))
|
src/genie/libs/parser/ios/tests/ShowNtpAssociations/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 12681325 | <reponame>balmasea/genieparser<gh_stars>100-1000
expected_output = {
"clock_state": {
"system_status": {
"associations_address": "10.16.2.2",
"associations_local_mode": "client",
"clock_offset": 27.027,
"clock_refid": "127.127.1.1",
"clock_state": "synchronized",
"clock_stratum": 3,
"root_delay": 5.61,
}
},
"peer": {
"10.16.2.2": {
"local_mode": {
"client": {
"delay": 5.61,
"jitter": 3.342,
"mode": "synchronized",
"offset": 27.027,
"poll": 64,
"reach": 7,
"receive_time": 25,
"refid": "127.127.1.1",
"remote": "10.16.2.2",
"stratum": 3,
"configured": True,
"local_mode": "client",
}
}
},
"10.36.3.3": {
"local_mode": {
"client": {
"delay": 0.0,
"jitter": 15937.0,
"mode": "unsynchronized",
"offset": 0.0,
"poll": 512,
"reach": 0,
"receive_time": "-",
"refid": ".STEP.",
"remote": "10.36.3.3",
"stratum": 16,
"configured": True,
"local_mode": "client",
}
}
},
},
}
|
bcbio/hla/groups.py | a113n/bcbio-nextgen | 418 | 12681330 | <reponame>a113n/bcbio-nextgen
"""Place HLA calls into group for validation and presentation.
Uses p-groups with identical protein sequences in the antigen binding domains:
http://hla.alleles.org/alleles/p_groups.html
HLA allele nomenclature:
https://www.ebi.ac.uk/ipd/imgt/hla/
https://github.com/jrob119/IMGTHLA
HLA sequences are from the 1000 genomes build 38 reference:
ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/
based on IMGT/HLA-3.18.0
"""
import toolz as tz
def hla_protein(name, data):
group = tz.get_in([data["genome_build"], name], pgroups)
if group:
base = name.split("*")[0]
group = "%s*%s" % (base, group)
else:
group = _from_name(name)
return group
def _from_name(name):
"""Parse the HLA base name (group + protein) from a full name.
Separates out synonymous and non-coding indicators.
http://hla.alleles.org/nomenclature/naming.html
"""
return ":".join(name.split(":")[:2])
pgroups = {}
pgroups["hg38"] = \
{'HLA-A*01:01:01:01': '01:01P',
'HLA-A*01:01:01:02N': '',
'HLA-A*01:01:38L': '01:01P',
'HLA-A*01:02': '',
'HLA-A*01:03': '',
'HLA-A*01:04N': '',
'HLA-A*01:09': '',
'HLA-A*01:11N': '',
'HLA-A*01:14': '',
'HLA-A*01:16N': '',
'HLA-A*01:20': '',
'HLA-A*02:01:01:01': '02:01P',
'HLA-A*02:01:01:02L': '02:01P',
'HLA-A*02:01:01:03': '02:01P',
'HLA-A*02:01:01:04': '02:01P',
'HLA-A*02:02:01': '',
'HLA-A*02:03:01': '02:03P',
'HLA-A*02:03:03': '02:03P',
'HLA-A*02:05:01': '02:05P',
'HLA-A*02:06:01': '',
'HLA-A*02:07:01': '02:07P',
'HLA-A*02:10': '02:10P',
'HLA-A*02:251': '',
'HLA-A*02:259': '',
'HLA-A*02:264': '02:03P',
'HLA-A*02:265': '02:07P',
'HLA-A*02:266': '02:01P',
'HLA-A*02:269': '',
'HLA-A*02:279': '',
'HLA-A*02:32N': '',
'HLA-A*02:376': '',
'HLA-A*02:43N': '',
'HLA-A*02:455': '02:01P',
'HLA-A*02:48': '',
'HLA-A*02:51': '',
'HLA-A*02:533': '',
'HLA-A*02:53N': '',
'HLA-A*02:57': '',
'HLA-A*02:60:01': '02:60P',
'HLA-A*02:65': '02:65P',
'HLA-A*02:68': '',
'HLA-A*02:77': '',
'HLA-A*02:81': '02:81P',
'HLA-A*02:89': '02:01P',
'HLA-A*02:95': '',
'HLA-A*03:01:01:01': '03:01P',
'HLA-A*03:01:01:02N': '',
'HLA-A*03:01:01:03': '03:01P',
'HLA-A*03:02:01': '03:02P',
'HLA-A*03:11N': '',
'HLA-A*03:21N': '',
'HLA-A*03:36N': '',
'HLA-A*11:01:01': '',
'HLA-A*11:01:18': '11:01P',
'HLA-A*11:02:01': '11:02P',
'HLA-A*11:05': '',
'HLA-A*11:110': '11:02P',
'HLA-A*11:25': '',
'HLA-A*11:50Q': '',
'HLA-A*11:60': '',
'HLA-A*11:69N': '',
'HLA-A*11:74': '',
'HLA-A*11:75': '',
'HLA-A*11:77': '11:02P',
'HLA-A*23:01:01': '23:01P',
'HLA-A*23:09': '',
'HLA-A*23:38N': '',
'HLA-A*24:02:01:01': '24:02P',
'HLA-A*24:02:01:02L': '24:02P',
'HLA-A*24:02:01:03': '24:02P',
'HLA-A*24:02:03Q': '24:02P',
'HLA-A*24:02:10': '24:02P',
'HLA-A*24:03:01': '24:03P',
'HLA-A*24:07:01': '24:07P',
'HLA-A*24:08': '',
'HLA-A*24:09N': '',
'HLA-A*24:10:01': '24:10P',
'HLA-A*24:11N': '',
'HLA-A*24:152': '',
'HLA-A*24:20': '',
'HLA-A*24:215': '',
'HLA-A*24:61': '',
'HLA-A*24:86N': '',
'HLA-A*25:01:01': '25:01P',
'HLA-A*26:01:01': '',
'HLA-A*26:11N': '',
'HLA-A*26:15': '',
'HLA-A*26:50': '',
'HLA-A*29:01:01:01': '29:01P',
'HLA-A*29:01:01:02N': '',
'HLA-A*29:02:01:01': '29:02P',
'HLA-A*29:02:01:02': '29:02P',
'HLA-A*29:46': '29:02P',
'HLA-A*30:01:01': '30:01P',
'HLA-A*30:02:01:01': '30:02P',
'HLA-A*30:02:01:02': '30:02P',
'HLA-A*30:04:01': '30:04P',
'HLA-A*30:89': '',
'HLA-A*31:01:02': '',
'HLA-A*31:01:23': '31:01P',
'HLA-A*31:04': '',
'HLA-A*31:14N': '',
'HLA-A*31:46': '31:01P',
'HLA-A*32:01:01': '32:01P',
'HLA-A*32:06': '',
'HLA-A*33:01:01': '33:01P',
'HLA-A*33:03:01': '33:03P',
'HLA-A*33:07': '',
'HLA-A*34:01:01': '34:01P',
'HLA-A*34:02:01': '34:02P',
'HLA-A*36:01': '',
'HLA-A*43:01': '',
'HLA-A*66:01:01': '66:01P',
'HLA-A*66:17': '66:01P',
'HLA-A*68:01:01:01': '68:01P',
'HLA-A*68:01:01:02': '68:01P',
'HLA-A*68:01:02:01': '68:01P',
'HLA-A*68:01:02:02': '68:01P',
'HLA-A*68:02:01:01': '68:02P',
'HLA-A*68:02:01:02': '68:02P',
'HLA-A*68:02:01:03': '68:02P',
'HLA-A*68:02:02': '68:02P',
'HLA-A*68:03:01': '68:03P',
'HLA-A*68:08:01': '68:08P',
'HLA-A*68:113': '',
'HLA-A*68:17': '',
'HLA-A*68:18N': '',
'HLA-A*68:22': '',
'HLA-A*68:71': '',
'HLA-A*69:01': '',
'HLA-A*74:01': '',
'HLA-A*74:02:01:01': '74:01P',
'HLA-A*74:02:01:02': '74:01P',
'HLA-A*80:01:01:01': '80:01P',
'HLA-A*80:01:01:02': '80:01P',
'HLA-B*07:02:01': '07:02P',
'HLA-B*07:05:01': '07:05P',
'HLA-B*07:06': '07:05P',
'HLA-B*07:156': '07:02P',
'HLA-B*07:33:01': '07:33P',
'HLA-B*07:41': '',
'HLA-B*07:44': '07:02P',
'HLA-B*07:50': '',
'HLA-B*08:01:01': '08:01P',
'HLA-B*08:08N': '',
'HLA-B*08:132': '',
'HLA-B*08:134': '',
'HLA-B*08:19N': '',
'HLA-B*08:20': '',
'HLA-B*08:33': '',
'HLA-B*08:79': '',
'HLA-B*13:01:01': '13:01P',
'HLA-B*13:02:01': '13:02P',
'HLA-B*13:02:03': '13:02P',
'HLA-B*13:02:09': '13:02P',
'HLA-B*13:08': '',
'HLA-B*13:15': '',
'HLA-B*13:25': '',
'HLA-B*14:01:01': '14:01P',
'HLA-B*14:02:01': '',
'HLA-B*14:07N': '',
'HLA-B*15:01:01:01': '15:01P',
'HLA-B*15:01:01:02N': '',
'HLA-B*15:01:01:03': '15:01P',
'HLA-B*15:02:01': '15:02P',
'HLA-B*15:03:01': '',
'HLA-B*15:04:01': '15:04P',
'HLA-B*15:07:01': '15:07P',
'HLA-B*15:108': '',
'HLA-B*15:10:01': '15:10P',
'HLA-B*15:11:01': '15:11P',
'HLA-B*15:13:01': '15:13P',
'HLA-B*15:16:01': '15:16P',
'HLA-B*15:17:01:01': '15:17P',
'HLA-B*15:17:01:02': '15:17P',
'HLA-B*15:18:01': '15:18P',
'HLA-B*15:220': '15:03P',
'HLA-B*15:25:01': '15:25P',
'HLA-B*15:27:01': '15:27P',
'HLA-B*15:32:01': '15:32P',
'HLA-B*15:42': '',
'HLA-B*15:58': '',
'HLA-B*15:66': '',
'HLA-B*15:77': '',
'HLA-B*15:83': '',
'HLA-B*18:01:01:01': '18:01P',
'HLA-B*18:01:01:02': '18:01P',
'HLA-B*18:02': '',
'HLA-B*18:03': '',
'HLA-B*18:17N': '',
'HLA-B*18:26': '',
'HLA-B*18:94N': '',
'HLA-B*27:04:01': '27:04P',
'HLA-B*27:05:02': '27:05P',
'HLA-B*27:05:18': '27:05P',
'HLA-B*27:06': '',
'HLA-B*27:07:01': '27:07P',
'HLA-B*27:131': '',
'HLA-B*27:24': '',
'HLA-B*27:25': '',
'HLA-B*27:32': '',
'HLA-B*35:01:01:01': '35:01P',
'HLA-B*35:01:01:02': '35:01P',
'HLA-B*35:01:22': '35:01P',
'HLA-B*35:02:01': '35:02P',
'HLA-B*35:03:01': '35:03P',
'HLA-B*35:05:01': '35:05P',
'HLA-B*35:08:01': '35:08P',
'HLA-B*35:14:02': '35:14P',
'HLA-B*35:241': '35:01P',
'HLA-B*35:41': '',
'HLA-B*37:01:01': '37:01P',
'HLA-B*37:01:05': '37:01P',
'HLA-B*38:01:01': '38:01P',
'HLA-B*38:02:01': '38:02P',
'HLA-B*38:14': '',
'HLA-B*39:01:01:01': '39:01P',
'HLA-B*39:01:01:02L': '39:01P',
'HLA-B*39:01:01:03': '39:01P',
'HLA-B*39:01:03': '39:01P',
'HLA-B*39:01:16': '39:01P',
'HLA-B*39:01:21': '39:01P',
'HLA-B*39:05:01': '39:05P',
'HLA-B*39:06:02': '39:06P',
'HLA-B*39:10:01': '39:10P',
'HLA-B*39:13:02': '39:13P',
'HLA-B*39:14': '',
'HLA-B*39:34': '',
'HLA-B*39:38Q': '',
'HLA-B*40:01:01': '40:01P',
'HLA-B*40:01:02': '40:01P',
'HLA-B*40:02:01': '40:02P',
'HLA-B*40:03': '40:03P',
'HLA-B*40:06:01:01': '40:06P',
'HLA-B*40:06:01:02': '40:06P',
'HLA-B*40:10:01': '',
'HLA-B*40:150': '40:01P',
'HLA-B*40:40': '40:40P',
'HLA-B*40:72:01': '40:72P',
'HLA-B*40:79': '',
'HLA-B*41:01:01': '41:01P',
'HLA-B*41:02:01': '41:02P',
'HLA-B*42:01:01': '42:01P',
'HLA-B*42:02': '',
'HLA-B*42:08': '',
'HLA-B*44:02:01:01': '44:02P',
'HLA-B*44:02:01:02S': '44:02P',
'HLA-B*44:02:01:03': '44:02P',
'HLA-B*44:02:17': '44:02P',
'HLA-B*44:02:27': '44:02P',
'HLA-B*44:03:01': '',
'HLA-B*44:03:02': '44:03P',
'HLA-B*44:04': '',
'HLA-B*44:09': '',
'HLA-B*44:138Q': '',
'HLA-B*44:150': '',
'HLA-B*44:23N': '',
'HLA-B*44:26': '',
'HLA-B*44:46': '',
'HLA-B*44:49': '',
'HLA-B*44:56N': '',
'HLA-B*45:01:01': '45:01P',
'HLA-B*45:04': '',
'HLA-B*46:01:01': '46:01P',
'HLA-B*46:01:05': '46:01P',
'HLA-B*47:01:01:01': '47:01P',
'HLA-B*47:01:01:02': '47:01P',
'HLA-B*48:01:01': '48:01P',
'HLA-B*48:03:01': '48:03P',
'HLA-B*48:04': '',
'HLA-B*48:08': '',
'HLA-B*49:01:01': '49:01P',
'HLA-B*49:32': '',
'HLA-B*50:01:01': '50:01P',
'HLA-B*51:01:01': '',
'HLA-B*51:01:02': '51:01P',
'HLA-B*51:02:01': '51:02P',
'HLA-B*51:07:01': '51:07P',
'HLA-B*51:42': '',
'HLA-B*52:01:01:01': '52:01P',
'HLA-B*52:01:01:02': '52:01P',
'HLA-B*52:01:01:03': '52:01P',
'HLA-B*52:01:02': '52:01P',
'HLA-B*53:01:01': '53:01P',
'HLA-B*53:11': '',
'HLA-B*54:01:01': '54:01P',
'HLA-B*54:18': '',
'HLA-B*55:01:01': '55:01P',
'HLA-B*55:01:03': '55:01P',
'HLA-B*55:02:01': '',
'HLA-B*55:12': '',
'HLA-B*55:24': '',
'HLA-B*55:48': '',
'HLA-B*56:01:01': '',
'HLA-B*56:03': '',
'HLA-B*56:04': '',
'HLA-B*57:01:01': '57:01P',
'HLA-B*57:03:01': '57:03P',
'HLA-B*57:06': '',
'HLA-B*57:11': '',
'HLA-B*57:29': '57:01P',
'HLA-B*58:01:01': '',
'HLA-B*58:31N': '',
'HLA-B*59:01:01:01': '59:01P',
'HLA-B*59:01:01:02': '59:01P',
'HLA-B*67:01:01': '67:01P',
'HLA-B*67:01:02': '67:01P',
'HLA-B*67:02': '',
'HLA-B*73:01': '',
'HLA-B*78:01:01': '78:01P',
'HLA-B*81:01': '81:01P',
'HLA-B*82:02:01': '82:02P',
'HLA-C*01:02:01': '01:02P',
'HLA-C*01:02:11': '01:02P',
'HLA-C*01:02:29': '01:02P',
'HLA-C*01:02:30': '01:02P',
'HLA-C*01:03': '01:03P',
'HLA-C*01:06': '',
'HLA-C*01:08': '',
'HLA-C*01:14': '',
'HLA-C*01:21': '',
'HLA-C*01:30': '',
'HLA-C*01:40': '01:02P',
'HLA-C*02:02:02:01': '02:02P',
'HLA-C*02:02:02:02': '02:02P',
'HLA-C*02:10': '02:02P',
'HLA-C*02:11': '',
'HLA-C*02:16:02': '02:16P',
'HLA-C*02:69': '02:02P',
'HLA-C*02:85': '',
'HLA-C*02:86': '',
'HLA-C*02:87': '',
'HLA-C*03:02:01': '03:02P',
'HLA-C*03:02:02:01': '03:02P',
'HLA-C*03:02:02:02': '03:02P',
'HLA-C*03:02:02:03': '03:02P',
'HLA-C*03:03:01': '03:03P',
'HLA-C*03:04:01:01': '03:04P',
'HLA-C*03:04:01:02': '03:04P',
'HLA-C*03:04:02': '03:04P',
'HLA-C*03:04:04': '03:04P',
'HLA-C*03:05': '',
'HLA-C*03:06': '',
'HLA-C*03:100': '03:04P',
'HLA-C*03:13:01': '03:13P',
'HLA-C*03:20N': '',
'HLA-C*03:219': '03:04P',
'HLA-C*03:261': '',
'HLA-C*03:40:01': '03:40P',
'HLA-C*03:41:02': '03:41P',
'HLA-C*03:46': '',
'HLA-C*03:61': '',
'HLA-C*04:01:01:01': '04:01P',
'HLA-C*04:01:01:02': '04:01P',
'HLA-C*04:01:01:03': '04:01P',
'HLA-C*04:01:01:04': '04:01P',
'HLA-C*04:01:01:05': '04:01P',
'HLA-C*04:01:62': '04:01P',
'HLA-C*04:03:01': '04:03P',
'HLA-C*04:06': '',
'HLA-C*04:09N': '',
'HLA-C*04:128': '',
'HLA-C*04:161': '04:01P',
'HLA-C*04:177': '',
'HLA-C*04:70': '',
'HLA-C*04:71': '',
'HLA-C*05:01:01:01': '05:01P',
'HLA-C*05:01:01:02': '05:01P',
'HLA-C*05:08': '',
'HLA-C*05:09:01': '05:09P',
'HLA-C*05:93': '05:01P',
'HLA-C*06:02:01:01': '06:02P',
'HLA-C*06:02:01:02': '06:02P',
'HLA-C*06:02:01:03': '06:02P',
'HLA-C*06:23': '',
'HLA-C*06:24': '',
'HLA-C*06:46N': '',
'HLA-C*07:01:01:01': '07:01P',
'HLA-C*07:01:01:02': '07:01P',
'HLA-C*07:01:02': '07:01P',
'HLA-C*07:01:19': '07:01P',
'HLA-C*07:01:27': '07:01P',
'HLA-C*07:01:45': '07:01P',
'HLA-C*07:02:01:01': '07:02P',
'HLA-C*07:02:01:02': '07:02P',
'HLA-C*07:02:01:03': '07:02P',
'HLA-C*07:02:01:04': '07:02P',
'HLA-C*07:02:01:05': '07:02P',
'HLA-C*07:02:05': '07:02P',
'HLA-C*07:02:06': '07:02P',
'HLA-C*07:02:64': '07:02P',
'HLA-C*07:04:01': '07:04P',
'HLA-C*07:04:02': '07:04P',
'HLA-C*07:06': '07:01P',
'HLA-C*07:149': '',
'HLA-C*07:18': '07:01P',
'HLA-C*07:19': '',
'HLA-C*07:26': '',
'HLA-C*07:30': '',
'HLA-C*07:32N': '',
'HLA-C*07:384': '',
'HLA-C*07:385': '',
'HLA-C*07:386': '',
'HLA-C*07:391': '',
'HLA-C*07:392': '',
'HLA-C*07:49': '',
'HLA-C*07:56:02': '07:56P',
'HLA-C*07:66': '07:02P',
'HLA-C*07:67': '',
'HLA-C*08:01:01': '08:01P',
'HLA-C*08:01:03': '08:01P',
'HLA-C*08:02:01:01': '08:02P',
'HLA-C*08:02:01:02': '08:02P',
'HLA-C*08:03:01': '08:03P',
'HLA-C*08:04:01': '08:04P',
'HLA-C*08:112': '',
'HLA-C*08:20': '08:01P',
'HLA-C*08:21': '',
'HLA-C*08:22': '08:01P',
'HLA-C*08:24': '08:01P',
'HLA-C*08:27': '',
'HLA-C*08:36N': '',
'HLA-C*08:40': '08:03P',
'HLA-C*08:41': '',
'HLA-C*08:62': '',
'HLA-C*12:02:02': '12:02P',
'HLA-C*12:03:01:01': '12:03P',
'HLA-C*12:03:01:02': '12:03P',
'HLA-C*12:08': '',
'HLA-C*12:13': '',
'HLA-C*12:19': '',
'HLA-C*12:22': '',
'HLA-C*12:99': '',
'HLA-C*14:02:01': '14:02P',
'HLA-C*14:03': '',
'HLA-C*14:21N': '',
'HLA-C*14:23': '14:02P',
'HLA-C*15:02:01': '',
'HLA-C*15:05:01': '15:05P',
'HLA-C*15:05:02': '15:05P',
'HLA-C*15:13': '15:02P',
'HLA-C*15:16': '',
'HLA-C*15:17': '',
'HLA-C*15:96Q': '',
'HLA-C*16:01:01': '',
'HLA-C*16:02:01': '16:02P',
'HLA-C*16:04:01': '16:04P',
'HLA-C*17:01:01:01': '17:01P',
'HLA-C*17:01:01:02': '17:01P',
'HLA-C*17:01:01:03': '17:01P',
'HLA-C*17:03': '17:01P',
'HLA-C*18:01': '18:01P',
'HLA-DQA1*01:01:02': '01:01P',
'HLA-DQA1*01:02:01:01': '01:02P',
'HLA-DQA1*01:02:01:02': '01:02P',
'HLA-DQA1*01:02:01:03': '01:02P',
'HLA-DQA1*01:02:01:04': '01:02P',
'HLA-DQA1*01:03:01:01': '01:03P',
'HLA-DQA1*01:03:01:02': '01:03P',
'HLA-DQA1*01:04:01:01': '01:01P',
'HLA-DQA1*01:04:01:02': '01:01P',
'HLA-DQA1*01:05:01': '01:01P',
'HLA-DQA1*01:07': '',
'HLA-DQA1*01:10': '',
'HLA-DQA1*01:11': '01:02P',
'HLA-DQA1*02:01': '',
'HLA-DQA1*03:01:01': '03:01P',
'HLA-DQA1*03:02': '03:01P',
'HLA-DQA1*03:03:01': '03:01P',
'HLA-DQA1*04:01:02:01': '04:01P',
'HLA-DQA1*04:01:02:02': '04:01P',
'HLA-DQA1*04:02': '04:01P',
'HLA-DQA1*05:01:01:01': '05:01P',
'HLA-DQA1*05:01:01:02': '05:01P',
'HLA-DQA1*05:03': '05:01P',
'HLA-DQA1*05:05:01:01': '05:01P',
'HLA-DQA1*05:05:01:02': '05:01P',
'HLA-DQA1*05:05:01:03': '05:01P',
'HLA-DQA1*05:11': '05:01P',
'HLA-DQA1*06:01:01': '06:01P',
'HLA-DQB1*02:01:01': '02:01P',
'HLA-DQB1*02:02:01': '',
'HLA-DQB1*03:01:01:01': '03:01P',
'HLA-DQB1*03:01:01:02': '03:01P',
'HLA-DQB1*03:01:01:03': '03:01P',
'HLA-DQB1*03:02:01': '03:02P',
'HLA-DQB1*03:03:02:01': '03:03P',
'HLA-DQB1*03:03:02:02': '03:03P',
'HLA-DQB1*03:03:02:03': '03:03P',
'HLA-DQB1*03:05:01': '03:05P',
'HLA-DQB1*05:01:01:01': '05:01P',
'HLA-DQB1*05:01:01:02': '05:01P',
'HLA-DQB1*05:03:01:01': '05:03P',
'HLA-DQB1*05:03:01:02': '05:03P',
'HLA-DQB1*06:01:01': '06:01P',
'HLA-DQB1*06:02:01': '06:02P',
'HLA-DQB1*06:03:01': '06:03P',
'HLA-DQB1*06:09:01': '06:09P',
'HLA-DRB1*01:01:01': '01:01P',
'HLA-DRB1*01:02:01': '01:02P',
'HLA-DRB1*03:01:01:01': '03:01P',
'HLA-DRB1*03:01:01:02': '03:01P',
'HLA-DRB1*04:03:01': '04:03P',
'HLA-DRB1*07:01:01:01': '07:01P',
'HLA-DRB1*07:01:01:02': '07:01P',
'HLA-DRB1*08:03:02': '08:03P',
'HLA-DRB1*09:21': '09:01P',
'HLA-DRB1*10:01:01': '10:01P',
'HLA-DRB1*11:01:01': '11:01P',
'HLA-DRB1*11:01:02': '11:01P',
'HLA-DRB1*11:04:01': '11:04P',
'HLA-DRB1*12:01:01': '12:01P',
'HLA-DRB1*12:17': '12:01P',
'HLA-DRB1*13:01:01': '13:01P',
'HLA-DRB1*13:02:01': '13:02P',
'HLA-DRB1*14:05:01': '14:05P',
'HLA-DRB1*14:54:01': '14:01P',
'HLA-DRB1*15:01:01:01': '15:01P',
'HLA-DRB1*15:01:01:02': '15:01P',
'HLA-DRB1*15:01:01:03': '15:01P',
'HLA-DRB1*15:01:01:04': '15:01P',
'HLA-DRB1*15:02:01': '15:02P',
'HLA-DRB1*15:03:01:01': '15:03P',
'HLA-DRB1*15:03:01:02': '15:03P',
'HLA-DRB1*16:02:01': '16:02P'}
|
desktop/core/ext-py/opentracing-2.2.0/opentracing/mocktracer/tracer.py | yetsun/hue | 5,079 | 12681333 | <reponame>yetsun/hue
# Copyright (c) The OpenTracing Authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from threading import Lock
import time
import opentracing
from opentracing import Format, Tracer
from opentracing import UnsupportedFormatException
from opentracing.scope_managers import ThreadLocalScopeManager
from .context import SpanContext
from .span import MockSpan
class MockTracer(Tracer):
"""MockTracer makes it easy to test the semantics of OpenTracing
instrumentation.
By using a MockTracer as a :class:`~opentracing.Tracer` implementation
for tests, a developer can assert that :class:`~opentracing.Span`
properties and relationships with other
**Spans** are defined as expected by instrumentation code.
By default, MockTracer registers propagators for :attr:`Format.TEXT_MAP`,
:attr:`Format.HTTP_HEADERS` and :attr:`Format.BINARY`. The user should
call :func:`register_propagator()` for each additional inject/extract
format.
"""
def __init__(self, scope_manager=None):
"""Initialize a MockTracer instance."""
scope_manager = ThreadLocalScopeManager() \
if scope_manager is None else scope_manager
super(MockTracer, self).__init__(scope_manager)
self._propagators = {}
self._finished_spans = []
self._spans_lock = Lock()
# Simple-as-possible (consecutive for repeatability) id generation.
self._next_id = 0
self._next_id_lock = Lock()
self._register_required_propagators()
def register_propagator(self, format, propagator):
"""Register a propagator with this MockTracer.
:param string format: a :class:`~opentracing.Format`
identifier like :attr:`~opentracing.Format.TEXT_MAP`
:param **Propagator** propagator: a **Propagator** instance to handle
inject/extract calls involving `format`
"""
self._propagators[format] = propagator
def _register_required_propagators(self):
from .text_propagator import TextPropagator
from .binary_propagator import BinaryPropagator
self.register_propagator(Format.TEXT_MAP, TextPropagator())
self.register_propagator(Format.HTTP_HEADERS, TextPropagator())
self.register_propagator(Format.BINARY, BinaryPropagator())
def finished_spans(self):
"""Return a copy of all finished **Spans** started by this MockTracer
(since construction or the last call to :meth:`~MockTracer.reset()`)
:rtype: list
:return: a copy of the finished **Spans**.
"""
with self._spans_lock:
return list(self._finished_spans)
def reset(self):
"""Clear the finished **Spans** queue.
Note that this does **not** have any effect on **Spans** created by
MockTracer that have not finished yet; those
will still be enqueued in :meth:`~MockTracer.finished_spans()`
when they :func:`finish()`.
"""
with self._spans_lock:
self._finished_spans = []
def _append_finished_span(self, span):
with self._spans_lock:
self._finished_spans.append(span)
def _generate_id(self):
with self._next_id_lock:
self._next_id += 1
return self._next_id
def start_active_span(self,
operation_name,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False,
finish_on_close=True):
# create a new Span
span = self.start_span(
operation_name=operation_name,
child_of=child_of,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span,
)
return self.scope_manager.activate(span, finish_on_close)
def start_span(self,
operation_name=None,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False):
start_time = time.time() if start_time is None else start_time
# See if we have a parent_ctx in `references`
parent_ctx = None
if child_of is not None:
parent_ctx = (
child_of if isinstance(child_of, opentracing.SpanContext)
else child_of.context)
elif references is not None and len(references) > 0:
# TODO only the first reference is currently used
parent_ctx = references[0].referenced_context
# retrieve the active SpanContext
if not ignore_active_span and parent_ctx is None:
scope = self.scope_manager.active
if scope is not None:
parent_ctx = scope.span.context
# Assemble the child ctx
ctx = SpanContext(span_id=self._generate_id())
if parent_ctx is not None:
if parent_ctx._baggage is not None:
ctx._baggage = parent_ctx._baggage.copy()
ctx.trace_id = parent_ctx.trace_id
else:
ctx.trace_id = self._generate_id()
# Tie it all together
return MockSpan(
self,
operation_name=operation_name,
context=ctx,
parent_id=(None if parent_ctx is None else parent_ctx.span_id),
tags=tags,
start_time=start_time)
def inject(self, span_context, format, carrier):
if format in self._propagators:
self._propagators[format].inject(span_context, carrier)
else:
raise UnsupportedFormatException()
def extract(self, format, carrier):
if format in self._propagators:
return self._propagators[format].extract(carrier)
else:
raise UnsupportedFormatException()
|
strategies/gekko-japonicus-master/promoterz/webServer/graphs.py | tobby2002/tradyai-api | 229 | 12681336 | <filename>strategies/gekko-japonicus-master/promoterz/webServer/graphs.py<gh_stars>100-1000
#!/bin/python
import dash_core_components as dcc
from evaluation.gekko.statistics import epochStatisticsNames, periodicStatisticsNames
def updateWorldGraph(app, WORLD):
environmentData = [
{
}
]
populationGroupData = [
{
'x': [locale.position[0]],
'y': [locale.position[1]],
'type': 'scatter',
'name': locale.name,
'showscale': False,
'mode': 'markers',
'marker': {
'symbol': 'square'
}
} for locale in WORLD.locales
]
fig = {
'data': populationGroupData,
'layout': {
'title': "World Topology: 2D MAP"
}
}
G = dcc.Graph(id="WorldGraph", figure=fig)
#app.layout.get("WorldGraphContainer").children = [G]
app.WorldGraph = G
return G
def updateLocaleGraph(app, LOCALE):
GraphName = LOCALE.name
print('Loading %s' % GraphName)
Statistics = LOCALE.EvolutionStatistics
ID = [s for s in GraphName if s.isdigit()]
annotations = []
oldLocaleGraph = None
for lidx, localeGraph in enumerate(app.LocaleGraphs):
if localeGraph.id == LOCALE.name:
oldLocaleGraph = lidx
break
statisticsNames = {}
statisticsNames.update(epochStatisticsNames)
# statisticsNames.update(periodicStatisticsNames)
annotationFontDescription = {
'family': 'Arial',
'size': 12,
'color': 'rgb(37,37,37)'
}
"""
for Statistic in Statistics:
if 'dateRange' in Statistic.keys():
if Statistic['dateRange']:
for R, dateRange in enumerate(Statistic['dateRange']):
if dateRange is not None:
annotations.append(
{
'xref': 'axis',
'yref': 'paper',
'xanchor': 'left',
'yanchor': 'bottom',
'font': annotationFontDescription,
'x': R,
'y': 1 if not len(annotations) %
2 else 0.93, # avoid label overlap;
'text': dateRange,
}
)
"""
colorSequence = [
(188, 189, 34),
(100, 11, 182),
(186, 3, 34),
(45, 111, 45),
(66, 128, 66),
(128, 66, 66),
]
statNames = [
'avg', 'std', 'min',
'max',
#'evaluationScore',
#'evaluationScoreOnSecondary'
]
DATA = [
{
'x': [Statistic['id'] for Statistic in Statistics],
'y': [Statistic[statNames[S]] for Statistic in Statistics],
'type': 'line',
'name': statisticsNames[statNames[S]],
'line': {'color': 'rgb%s' % str(colorSequence[S])},
}
for S in range(len(statNames))
]
fig = {
'data': DATA,
'layout': {
'title': 'Evolution at %s' % GraphName,
'annotations': annotations
},
}
G = dcc.Graph(figure=fig, id=LOCALE.name)
if oldLocaleGraph is not None:
app.LocaleGraphs[oldLocaleGraph] = G
else:
app.LocaleGraphs.append(G)
return G
def updateEvalbreakGraph(app, EvaluationSummary):
K = ["evaluation", "secondary"]
GES = dict([(k, []) for k in K])
for E in EvaluationSummary:
for k in K:
if k in E.keys():
GES[k].append(E[k])
else:
GES[k].append(None)
DATA = [
{
'x': list(range(len(GES[KEY]))),
'y': GES[KEY],
'type': 'line',
'name': KEY.upper()
} for KEY in GES.keys()
]
figure = {
'data': DATA,
'layout': {
'title': "Evaluation Breaks"
}
}
G = dcc.Graph(figure=figure, id="EvaluationBreaksGraph")
app.EvalBreakGraph = G
return G
|
peregrinearb/utils/__init__.py | kecheon/peregrine | 954 | 12681348 | <reponame>kecheon/peregrine<gh_stars>100-1000
from .drawing import *
from .general import *
from .multi_exchange import create_multi_exchange_graph, create_weighted_multi_exchange_digraph, \
multi_graph_to_log_graph
from .single_exchange import load_exchange_graph, create_exchange_graph, FeesNotAvailable
from .misc import last_index_in_list, next_to_each_other
from .data_structures import StackSet, PrioritySet, Collections
from .graph_utils import get_greatest_edge_in_bunch, get_least_edge_in_bunch
from .wss_graph_builder import *
|
django/contrib/localflavor/hr/hr_choices.py | kix/django | 790 | 12681350 | <filename>django/contrib/localflavor/hr/hr_choices.py
# -*- coding: utf-8 -*-
"""
Sources:
Croatian Counties: http://en.wikipedia.org/wiki/ISO_3166-2:HR
Croatia doesn't have official abbreviations for counties.
The ones provided are in common use.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
HR_COUNTY_CHOICES = (
('GZG', _('Grad Zagreb')),
('BBŽ', _('Bjelovarsko-bilogorska županija')),
('BPŽ', _('Brodsko-posavska županija')),
('DNŽ', _('Dubrovačko-neretvanska županija')),
('IŽ', _('Istarska županija')),
('KŽ', _('Karlovačka županija')),
('KKŽ', _('Koprivničko-križevačka županija')),
('KZŽ', _('Krapinsko-zagorska županija')),
('LSŽ', _('Ličko-senjska županija')),
('MŽ', _('Međimurska županija')),
('OBŽ', _('Osječko-baranjska županija')),
('PSŽ', _('Požeško-slavonska županija')),
('PGŽ', _('Primorsko-goranska županija')),
('SMŽ', _('Sisačko-moslavačka županija')),
('SDŽ', _('Splitsko-dalmatinska županija')),
('ŠKŽ', _('Šibensko-kninska županija')),
('VŽ', _('Varaždinska županija')),
('VPŽ', _('Virovitičko-podravska županija')),
('VSŽ', _('Vukovarsko-srijemska županija')),
('ZDŽ', _('Zadarska županija')),
('ZGŽ', _('Zagrebačka županija')),
)
"""
Sources:
http://hr.wikipedia.org/wiki/Dodatak:Popis_registracijskih_oznaka_za_cestovna_vozila_u_Hrvatskoj
Only common license plate prefixes are provided. Special cases and obsolete prefixes are omitted.
"""
HR_LICENSE_PLATE_PREFIX_CHOICES = (
('BJ', 'BJ'),
('BM', 'BM'),
('ČK', 'ČK'),
('DA', 'DA'),
('DE', 'DE'),
('DJ', 'DJ'),
('DU', 'DU'),
('GS', 'GS'),
('IM', 'IM'),
('KA', 'KA'),
('KC', 'KC'),
('KR', 'KR'),
('KT', 'KT'),
('KŽ', 'KŽ'),
('MA', 'MA'),
('NA', 'NA'),
('NG', 'NG'),
('OG', 'OG'),
('OS', 'OS'),
('PU', 'PU'),
('PŽ', 'PŽ'),
('RI', 'RI'),
('SB', 'SB'),
('SK', 'SK'),
('SL', 'SL'),
('ST', 'ST'),
('ŠI', 'ŠI'),
('VK', 'VK'),
('VT', 'VT'),
('VU', 'VU'),
('VŽ', 'VŽ'),
('ZD', 'ZD'),
('ZG', 'ZG'),
('ŽU', 'ŽU'),
)
"""
The list includes county and cellular network phone number prefixes.
"""
HR_PHONE_NUMBER_PREFIX_CHOICES = (
('1', '01'),
('20', '020'),
('21', '021'),
('22', '022'),
('23', '023'),
('31', '031'),
('32', '032'),
('33', '033'),
('34', '034'),
('35', '035'),
('40', '040'),
('42', '042'),
('43', '043'),
('44', '044'),
('47', '047'),
('48', '048'),
('49', '049'),
('51', '051'),
('52', '052'),
('53', '053'),
('91', '091'),
('92', '092'),
('95', '095'),
('97', '097'),
('98', '098'),
('99', '099'),
)
|
OnePy/custom_module/trade_log_analysis.py | Chandlercjy/OnePyfx | 321 | 12681352 | <reponame>Chandlercjy/OnePyfx
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import pandas as pd
import plotly
from dash.dependencies import Input, Output, State
from plotly import graph_objs as go
from OnePy.sys_module.metabase_env import OnePyEnvBase
TRADE_LOG = OnePyEnvBase.full_trade_log
APP = dash.Dash()
APP.scripts.config.serve_locally = True
APP.layout = html.Div([
html.H4('OnePy Trade Log Analysis'),
dt.DataTable(
rows=TRADE_LOG.to_dict('records'),
row_selectable=True,
filterable=True,
sortable=True,
selected_row_indices=[],
id='trade_log'
),
dcc.Graph(
id='drawdown_pnl'
),
dcc.Graph(
id='run_up_pnl'
),
], className="container")
@APP.callback(
Output('trade_log', 'selected_row_indices'),
[Input('drawdown_pnl', 'clickData')],
[State('trade_log', 'selected_row_indices')])
def update_selected_row_indices(clickData, selected_row_indices):
if clickData:
for point in clickData['points']:
if point['pointNumber'] in selected_row_indices:
selected_row_indices.remove(point['pointNumber'])
else:
selected_row_indices.append(point['pointNumber'])
return selected_row_indices
@APP.callback(
Output('drawdown_pnl', 'figure'),
[Input('trade_log', 'rows'),
Input('trade_log', 'selected_row_indices')])
def update_run_up_figure(rows, selected_row_indices):
dff = pd.DataFrame(rows)
profit_diff = dff.loc[dff.returns_diff > 0]
loss_diff = dff.loc[dff.returns_diff < 0]
fig = plotly.tools.make_subplots(
rows=1, cols=1,
shared_xaxes=True)
fig['layout'].update(dict(title='Profit & Loss vs Run-up'))
fig['layout']['xaxis'].update(dict(title='Run-up(%)'))
fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))
fig.append_trace({
'x': profit_diff['run_up']*100,
'y': profit_diff['returns_diff']*100,
'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,
'type': 'scatter',
'marker': dict(color='black'),
'mode': 'markers',
'name': 'win',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': loss_diff['run_up']*100,
'y': -loss_diff['returns_diff']*100,
'type': 'scatter',
'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,
'marker': dict(color='red'),
'mode': 'markers',
'name': 'lose',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': [0, 10],
'y': [0, 10],
'type': 'scatter',
'mode': 'lines',
'name': 'Win diagonal',
'line': {'width': 1}
}, 1, 1)
return fig
@APP.callback(
Output('run_up_pnl', 'figure'),
[Input('trade_log', 'rows'),
Input('trade_log', 'selected_row_indices')])
def update__drawdown_figure(rows, selected_row_indices):
dff = pd.DataFrame(rows)
profit_diff = dff.loc[dff.returns_diff > 0]
loss_diff = dff.loc[dff.returns_diff < 0]
fig = plotly.tools.make_subplots(
rows=1, cols=1,
shared_xaxes=True)
fig['layout'].update(dict(title='Profit & Loss vs Drawdown'))
fig['layout']['xaxis'].update(dict(title='Drawdown(%)'))
fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))
fig.append_trace({
'x': profit_diff['drawdown']*100,
'y': profit_diff['returns_diff']*100,
'type': 'scatter',
'marker': dict(color='black'),
'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,
'mode': 'markers',
'name': 'win',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': loss_diff['drawdown']*100,
'y': -loss_diff['returns_diff']*100,
'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,
'type': 'scatter',
'marker': dict(color='red'),
'mode': 'markers',
'name': 'lose',
'line': {'width': 1}
}, 1, 1)
fig.append_trace({
'x': [0, 10],
'y': [0, 10],
'type': 'scatter',
'mode': 'lines',
'name': 'Loss diagonal',
'line': {'width': 1}
}, 1, 1)
return fig
if __name__ == '__main__':
APP.run_server(debug=True)
|
python/tests/test_ir.py | clayne/gtirb | 230 | 12681353 | <filename>python/tests/test_ir.py
import os
import tempfile
import unittest
import gtirb
IR_FILE = tempfile.mktemp(suffix=".gtirb")
class IRTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ir = gtirb.IR()
m = gtirb.Module(
binary_path="binary_path",
file_format=gtirb.Module.FileFormat.RAW,
isa=gtirb.Module.ISA.ValidButUnsupported,
name="name",
preferred_addr=1,
rebase_delta=2,
ir=ir,
)
s = gtirb.Section(
name="name",
flags=(
gtirb.Section.Flag.Executable,
gtirb.Section.Flag.Readable,
gtirb.Section.Flag.Loaded,
gtirb.Section.Flag.Initialized,
),
module=m,
)
bi = gtirb.ByteInterval(
address=0, size=10, contents=b"abcd", section=s
)
cb = gtirb.CodeBlock(size=4, offset=0, decode_mode=1, byte_interval=bi)
_ = gtirb.DataBlock(size=6, offset=4, byte_interval=bi)
sym = gtirb.Symbol(name="name", payload=cb, module=m)
sac = gtirb.SymAddrConst(
0, sym, {gtirb.SymbolicExpression.Attribute.Part1}
)
bi.symbolic_expressions[2] = sac
p = gtirb.ProxyBlock(module=m)
ir.cfg.add(
gtirb.Edge(
cb,
p,
gtirb.Edge.Label(
type=gtirb.Edge.Type.Branch, conditional=False, direct=True
),
)
)
ir.cfg.add(gtirb.Edge(p, p))
m.aux_data["key"] = gtirb.AuxData(gtirb.Offset(s, 777), "Offset")
ir.aux_data["key"] = gtirb.AuxData("value", "string")
self.ir = ir
def setUp(self):
self.ir.save_protobuf(IR_FILE)
def tearDown(self):
os.remove(IR_FILE)
def test_ir_protobuf_load(self):
new_ir = gtirb.IR.load_protobuf(IR_FILE)
self.assertTrue(self.ir.deep_eq(new_ir))
self.assertNotEqual(
self.ir.modules[0].aux_data["key"].data,
new_ir.modules[0].aux_data["key"].data,
)
if __name__ == "__main__":
unittest.main()
|
devito/builtins/arithmetic.py | reguly/devito | 204 | 12681354 | <filename>devito/builtins/arithmetic.py
import numpy as np
import devito as dv
from devito.builtins.utils import MPIReduction
__all__ = ['norm', 'sumall', 'inner', 'mmin', 'mmax']
@dv.switchconfig(log_level='ERROR')
def norm(f, order=2):
"""
Compute the norm of a Function.
Parameters
----------
f : Function
Input Function.
order : int, optional
The order of the norm. Defaults to 2.
"""
Pow = dv.finite_differences.differentiable.Pow
kwargs = {}
if f.is_TimeFunction and f._time_buffering:
kwargs[f.time_dim.max_name] = f._time_size - 1
# Protect SparseFunctions from accessing duplicated (out-of-domain) data,
# otherwise we would eventually be summing more than expected
p, eqns = f.guard() if f.is_SparseFunction else (f, [])
s = dv.types.Symbol(name='sum', dtype=f.dtype)
with MPIReduction(f) as mr:
op = dv.Operator([dv.Eq(s, 0.0)] +
eqns +
[dv.Inc(s, dv.Abs(Pow(p, order))), dv.Eq(mr.n[0], s)],
name='norm%d' % order)
op.apply(**kwargs)
v = np.power(mr.v, 1/order)
return f.dtype(v)
def sumall(f):
"""
Compute the sum of all Function data.
Parameters
----------
f : Function
Input Function.
"""
kwargs = {}
if f.is_TimeFunction and f._time_buffering:
kwargs[f.time_dim.max_name] = f._time_size - 1
# Protect SparseFunctions from accessing duplicated (out-of-domain) data,
# otherwise we would eventually be summing more than expected
p, eqns = f.guard() if f.is_SparseFunction else (f, [])
s = dv.types.Symbol(name='sum', dtype=f.dtype)
with MPIReduction(f) as mr:
op = dv.Operator([dv.Eq(s, 0.0)] +
eqns +
[dv.Inc(s, p), dv.Eq(mr.n[0], s)],
name='sum')
op.apply(**kwargs)
return f.dtype(mr.v)
def inner(f, g):
"""
Inner product of two Functions.
Parameters
----------
f : Function
First input operand
g : Function
Second input operand
Raises
------
ValueError
If the two input Functions are defined over different grids, or have
different dimensionality, or their dimension-wise sizes don't match.
If in input are two SparseFunctions and their coordinates don't match,
the exception is raised.
Notes
-----
The inner product is the sum of all dimension-wise products. For 1D Functions,
the inner product corresponds to the dot product.
"""
# Input check
if f.is_TimeFunction and f._time_buffering != g._time_buffering:
raise ValueError("Cannot compute `inner` between save/nosave TimeFunctions")
if f.shape != g.shape:
raise ValueError("`f` and `g` must have same shape")
if f._data is None or g._data is None:
raise ValueError("Uninitialized input")
if f.is_SparseFunction and not np.all(f.coordinates_data == g.coordinates_data):
raise ValueError("Non-matching coordinates")
kwargs = {}
if f.is_TimeFunction and f._time_buffering:
kwargs[f.time_dim.max_name] = f._time_size - 1
# Protect SparseFunctions from accessing duplicated (out-of-domain) data,
# otherwise we would eventually be summing more than expected
rhs, eqns = f.guard(f*g) if f.is_SparseFunction else (f*g, [])
s = dv.types.Symbol(name='sum', dtype=f.dtype)
with MPIReduction(f, g) as mr:
op = dv.Operator([dv.Eq(s, 0.0)] +
eqns +
[dv.Inc(s, rhs), dv.Eq(mr.n[0], s)],
name='inner')
op.apply(**kwargs)
return f.dtype(mr.v)
def mmin(f):
"""
Retrieve the minimum.
Parameters
----------
f : array_like or Function
Input operand.
"""
if isinstance(f, dv.Constant):
return f.data
elif isinstance(f, dv.types.dense.DiscreteFunction):
with MPIReduction(f, op=dv.mpi.MPI.MIN) as mr:
mr.n.data[0] = np.min(f.data_ro_domain).item()
return mr.v.item()
else:
raise ValueError("Expected Function, not `%s`" % type(f))
def mmax(f):
"""
Retrieve the maximum.
Parameters
----------
f : array_like or Function
Input operand.
"""
if isinstance(f, dv.Constant):
return f.data
elif isinstance(f, dv.types.dense.DiscreteFunction):
with MPIReduction(f, op=dv.mpi.MPI.MAX) as mr:
mr.n.data[0] = np.max(f.data_ro_domain).item()
return mr.v.item()
else:
raise ValueError("Expected Function, not `%s`" % type(f))
|
ppcls/loss/pairwisecosface.py | TxT1212/PaddleClas | 3,763 | 12681355 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class PairwiseCosface(nn.Layer):
def __init__(self, margin, gamma):
super(PairwiseCosface, self).__init__()
self.margin = margin
self.gamma = gamma
def forward(self, embedding, targets):
if isinstance(embedding, dict):
embedding = embedding['features']
# Normalize embedding features
embedding = F.normalize(embedding, axis=1)
dist_mat = paddle.matmul(embedding, embedding, transpose_y=True)
N = dist_mat.shape[0]
is_pos = targets.reshape([N,1]).expand([N,N]).equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float')
is_neg = targets.reshape([N,1]).expand([N,N]).not_equal(paddle.t(targets.reshape([N,1]).expand([N,N]))).astype('float')
# Mask scores related to itself
is_pos = is_pos - paddle.eye(N, N)
s_p = dist_mat * is_pos
s_n = dist_mat * is_neg
logit_p = -self.gamma * s_p + (-99999999.) * (1 - is_pos)
logit_n = self.gamma * (s_n + self.margin) + (-99999999.) * (1 - is_neg)
loss = F.softplus(paddle.logsumexp(logit_p, axis=1) + paddle.logsumexp(logit_n, axis=1)).mean()
return {"PairwiseCosface": loss}
|
python/src/main/python/cmd_helpers.py | KishkinJ10/graphicsfuzz | 519 | 12681367 |
# Copyright 2018 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
HERE = os.path.abspath(__file__)
path = os.path.join
def get_tool_path():
return path(get_bin_jar_dirs()[1], "tool-1.0.jar")
def get_bin_jar_dirs():
def try_get_jar_bin_dirs(install_root):
bin_dir = path(install_root, "bin")
jar_dir = path(install_root, "jar")
if os.path.isdir(bin_dir) and os.path.isdir(jar_dir):
return os.path.abspath(bin_dir), os.path.abspath(jar_dir)
return None
# Perhaps we are running from the IDE. Check this first, since the deployed files are likely also present if
# running from the IDE.
res = try_get_jar_bin_dirs(path(os.path.dirname(HERE), os.pardir, os.pardir, os.pardir, os.pardir, "graphicsfuzz",
"target", "graphicsfuzz"))
if res is not None:
return res
# Perhaps we are running from the zip.
res = try_get_jar_bin_dirs(path(os.path.dirname(HERE), os.pardir))
if res is not None:
return res
raise Exception("Could not find bin and jar directories")
def get_shaders_dir():
# Perhaps we are running from the IDE. Check this first, since the deployed files are likely also present if
# running from the IDE.
res = path(os.path.dirname(HERE), os.pardir, os.pardir, os.pardir, os.pardir, "shaders", "src", "main", "glsl")
if os.path.isdir(res):
return os.path.abspath(res)
# Perhaps we are running from the zip.
res = path(os.path.dirname(HERE), os.pardir, "shaders")
if os.path.isdir(res):
return os.path.abspath(res)
raise Exception("Could not find shaders directory")
def execute(cmd, verbose):
if verbose:
print("Validator command: " + " ".join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
validator_stdout, validator_stderr = proc.communicate()
assert (proc.returncode is not None)
return {"returncode": proc.returncode,
"stdout": validator_stdout,
"stderr": validator_stderr}
def validate_frag(frag_file, validator, verbose):
cmd = [validator, frag_file]
return execute(cmd, verbose)
|
tests/runner.py | feedhq/feedhq | 361 | 12681381 | <filename>tests/runner.py
from django.conf import settings
from django.core.management import call_command
from django.test.runner import DiscoverRunner
from elasticsearch.exceptions import NotFoundError
from feedhq import es
class ESTestSuiteRunner(DiscoverRunner):
def setup_test_environment(self):
super().setup_test_environment()
try:
es.client.indices.delete(settings.ES_INDEX)
except NotFoundError:
pass
call_command('create_index')
es.wait_for_yellow()
def teardown_test_environment(self):
super().teardown_test_environment()
es.client.indices.delete(settings.ES_INDEX)
|
todo/views/list_detail.py | Sowmya-1998/https-github.com-shacker-django-todo | 567 | 12681390 | import bleach
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from todo.forms import AddEditTaskForm
from todo.models import Task, TaskList
from todo.utils import send_notify_mail, staff_check
@login_required
@user_passes_test(staff_check)
def list_detail(request, list_id=None, list_slug=None, view_completed=False) -> HttpResponse:
"""Display and manage tasks in a todo list.
"""
# Defaults
task_list = None
form = None
# Which tasks to show on this list view?
if list_slug == "mine":
tasks = Task.objects.filter(assigned_to=request.user)
else:
# Show a specific list, ensuring permissions.
task_list = get_object_or_404(TaskList, id=list_id)
if task_list.group not in request.user.groups.all() and not request.user.is_superuser:
raise PermissionDenied
tasks = Task.objects.filter(task_list=task_list.id)
# Additional filtering
if view_completed:
tasks = tasks.filter(completed=True)
else:
tasks = tasks.filter(completed=False)
# ######################
# Add New Task Form
# ######################
if request.POST.getlist("add_edit_task"):
form = AddEditTaskForm(
request.user,
request.POST,
initial={"assigned_to": request.user.id, "priority": 999, "task_list": task_list},
)
if form.is_valid():
new_task = form.save(commit=False)
new_task.created_by = request.user
new_task.note = bleach.clean(form.cleaned_data["note"], strip=True)
form.save()
# Send email alert only if Notify checkbox is checked AND assignee is not same as the submitter
if (
"notify" in request.POST
and new_task.assigned_to
and new_task.assigned_to != request.user
):
send_notify_mail(new_task)
messages.success(request, 'New task "{t}" has been added.'.format(t=new_task.title))
return redirect(request.path)
else:
# Don't allow adding new tasks on some views
if list_slug not in ["mine", "recent-add", "recent-complete"]:
form = AddEditTaskForm(
request.user,
initial={"assigned_to": request.user.id, "priority": 999, "task_list": task_list},
)
context = {
"list_id": list_id,
"list_slug": list_slug,
"task_list": task_list,
"form": form,
"tasks": tasks,
"view_completed": view_completed,
}
return render(request, "todo/list_detail.html", context)
|
scripts/rmg2to3.py | tza0035/RMG-Py | 250 | 12681397 | #!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This script is to help users transition from RMG-Py 2.4.x to RMG-Py 3.x by
automatically detecting variables, functions, attributes, methods, and arguments
which have been renamed. It works by using basic regex searches, so it is not
syntax aware and should therefore be used with caution. However, one benefit to
this approach is that this script can be used on any text file, including
IPython notebooks.
There are a number of available options. The only required arguments are the
files to analyze and the stage of changes to apply. The name changes are roughly
categorized into stage 1 with commonly used functionality and stage 2 with less
commonly used or private names. In addition, there is a list of dangerous names
which have increased risk of resulting in undesired replacements. They are
not replaced by default, but can be included using the `-x` argument.
By default, names are matched as individual words, i.e. surrounded by non-word
characters. Additionally, argument replacements check for an equal sign after
the name, while attribute and method replacements check for a period before
the name. All of these additional requirements can be disabled if it is desired
to do a naive search for all matches, although it is not recommended.
Also by default, only a diff of the potential changes are displayed to stdout.
The changes can be written to file by providing the `-w` argument. A backup of
the file is saved by default if writing to file, which can be disabled by
providing the `-n` argument.
A suggested option set for transitioning code which depends on RMG is
python rmg2to3.py -0wn filename
which is equivalent to
python rmg2to3.py --both-stages --write --no-backups filename
The script also accepts multiple filenames by manual specification, e.g.
python rmg2to3.py -0wn file1 file2 file3
or using glob patterns, e.g.
python rmg2to3.py -0wn *.py
Always be sure to double-check the result!
"""
import argparse
import difflib
import os
import re
import shutil
import sys
from tqdm import tqdm
# Module names
MODULES = {
'canteraModel': 'canteramodel',
'canteraTest': 'canteramodelTest',
'extractInfoFromckcsv': 'ckcsvparser',
'diff_models': 'diffmodels',
'diff_modelsTest': 'diffmodelsTest',
'fluxtest': 'fluxdiagramTest',
'generate_reactions': 'generatereactions',
'testGenerateReactions': 'generatereactionsTest',
'merge_models': 'mergemodels',
'merge_modelsTest': 'mergemodelsTest',
'observablesRegression': 'observablesregression',
}
# Global variables and functions
GLOBALS1 = {
# Arkane
'jobList': 'job_list',
'transitionStateDict': 'transition_state_dict',
# rmgpy.data.base
'makeLogicNode': 'make_logic_node',
'getAllCombinations': 'get_all_combinations',
# rmgpy.data.rmg
'getDB': 'get_db',
# rmgpy.data.solvation
'saveEntry': 'save_entry',
# rmgpy.data.thermo
'findCp0andCpInf': 'find_cp0_and_cpinf',
# rmgpy.data.kinetics.family
'informationGain': 'information_gain',
'getObjectiveFunction': 'get_objective_function',
# rmgpy.data.kinetics.rules
'removeIdenticalKinetics': 'remove_identical_kinetics',
'getTemplateLabel': 'get_template_label',
# rmgpy.kinetics.model
'getRateCoefficientUnitsFromReactionOrder': 'get_rate_coefficient_units_from_reaction_order',
'getReactionOrderFromRateCoefficientUnits': 'get_reaction_order_from_rate_coefficient_units',
# rmgpy.kinetics.arrhenius
'getw0': 'get_w0',
'getw0s': 'get_w0s',
# rmgpy.kinetics.diffusionLimited
'diffusionLimiter': 'diffusion_limiter',
# rmgpy.molecule.adjlist
'fromAdjacencyList': 'from_adjacency_list',
'toAdjacencyList': 'to_adjacency_list',
# rmgpy.molecule.atomtype
'getFeatures': 'get_features',
'getAtomType': 'get_atomtype',
# rmgpy.molecule.converter
'toRDKitMol': 'to_rdkit_mol',
'fromRDKitMol': 'from_rdkit_mol',
'toOBMol': 'to_ob_mol',
'fromOBMol': 'from_ob_mol',
# rmgpy.molecule.element
'getElement': 'get_element',
'BDE_elements': 'bde_elements',
'BDEDict': 'bde_dict',
'BDEs': 'bdes',
# rmgpy.molecule.graph
'getVertexConnectivityValue': 'get_vertex_connectivity_value',
'getVertexSortingLabel': 'get_vertex_sorting_label',
# rmgpy.molecule.symmetry
'calculateAtomSymmetryNumber': 'calculate_atom_symmetry_number',
'calculateBondSymmetryNumber': 'calculate_bond_symmetry_number',
'calculateAxisSymmetryNumber': 'calculate_axis_symmetry_number',
'calculateCyclicSymmetryNumber': 'calculate_cyclic_symmetry_number',
# rmgpy.molecule.util
'retrieveElementCount': 'get_element_count',
# rmgpy.pdep.cse
'applyChemicallySignificantEigenvaluesMethod': 'apply_chemically_significant_eigenvalues_method',
# rmgpy.pdep.me
'generateFullMEMatrix': 'generate_full_me_matrix',
# rmgpy.pdep.msc
'applyModifiedStrongCollisionMethod': 'apply_modified_strong_collision_method',
# rmgpy.pdep.re
'applyReservoirStateMethod': 'apply_reservoir_state_method',
# rmgpy.pdep.reaction
'calculateMicrocanonicalRateCoefficients': 'calculate_microcanonical_rate_coefficient',
'applyRRKMTheory': 'apply_rrkm_theory',
'applyInverseLaplaceTransformMethod': 'apply_inverse_laplace_transform_method',
'fitInterpolationModel': 'fit_interpolation_model',
# rmgpy.rmg.main
'initializeLog': 'initialize_log',
'get_condaPackage': 'get_conda_package',
'processProfileStats': 'process_profile_stats',
'makeProfileGraph': 'make_profile_graph',
# rmgpy.rmg.input
'setGlobalRMG': 'set_global_rmg',
'readInputFile': 'read_input_file',
'readThermoInputFile': 'read_thermo_input_file',
'saveInputFile': 'save_input_file',
'getInput': 'get_input',
# rmgpy.thermo.thermoengine
'processThermoData': 'process_thermo_data',
'generateThermoData': 'generate_thermo_data',
# rmgpy.thermo.wilhoit
'Wilhoit_to_NASA': 'wilhoit_to_nasa',
'Wilhoit_to_NASA_TintOpt': 'wilhoit_to_nasa_t_int_opt',
'Wilhoit_to_NASA_TintOpt_objFun': 'wilhoit_to_nasa_t_int_opt_obj_fun',
'Wilhoit_to_NASA_TintOpt_objFun_NW': 'wilhoit_to_nasa_t_int_opt_obj_fun_nw',
'Wilhoit_to_NASA_TintOpt_objFun_W': 'wilhoit_to_nasa_t_int_opt_obj_fun_w',
# rmgpy.quantity
'conversionFactorsFromSItoCmMolS': 'conversion_factors_from_si_to_cm_mol_s',
# rmgpy.util
'makeOutputSubdirectory': 'make_output_subdirectory',
# rmgpy.chemkin
'readThermoEntry': 'read_thermo_entry',
'readKineticsEntry': 'read_kinetics_entry',
'readReactionComments': 'read_reaction_comments',
'loadSpeciesDictionary': 'load_species_dictionary',
'loadTransportFile': 'load_transport_file',
'loadChemkinFile': 'load_chemkin_file',
'readSpeciesBlock': 'read_species_block',
'readThermoBlock': 'read_thermo_block',
'readReactionsBlock': 'read_reactions_block',
'getSpeciesIdentifier': 'get_species_identifier',
'writeThermoEntry': 'write_thermo_entry',
'writeReactionString': 'write_reaction_string',
'writeKineticsEntry': 'write_kinetics_entry',
'markDuplicateReaction': 'mark_duplicate_reaction',
'markDuplicateReactions': 'mark_duplicate_reactions',
'saveSpeciesDictionary': 'save_species_dictionary',
'saveTransportFile': 'save_transport_file',
'saveChemkinFile': 'save_chemkin_file',
'saveChemkinSurfaceFile': 'save_chemkin_surface_file',
'saveChemkin': 'save_chemkin',
'saveChemkinFiles': 'save_chemkin_files',
'writeElementsSection': 'write_elements_sections',
# rmgpy.constraints
'failsSpeciesConstraints': 'fails_species_constraints',
# rmgpy.tools.canteraModel
'generateCanteraConditions': 'generate_cantera_conditions',
'getRMGSpeciesFromUserSpecies': 'get_rmg_species_from_user_species',
'findIgnitionDelay': 'find_ignition_delay',
'checkNearlyEqual': 'check_nearly_equal',
'checkEquivalentCanteraSpecies': 'check_equivalent_cantera_species',
'checkEquivalentCanteraReaction': 'check_equivalent_cantera_reaction',
# rmgpy.tools.diff_models
'compareModelKinetics': 'compare_model_kinetics',
'compareModelSpecies': 'compare_model_species',
'compareModelReactions': 'compare_model_reactions',
'saveCompareHTML': 'save_compare_html',
'enthalpyDiff': 'enthalpy_diff',
'kineticsDiff': 'kinetics_diff',
'identicalThermo': 'identical_thermo',
'identicalKinetics': 'identical_kinetics',
'parseCommandLineArguments': 'parse_command_line_arguments',
# rmgpy.tools.fluxdiagram
'maximumNodeCount': 'max_node_count',
'maximumEdgeCount': 'max_edge_count',
'concentrationTolerance': 'concentration_tol',
'speciesRateTolerance': 'species_rate_tol',
'maximumNodePenWidth': 'max_node_pen_width',
'maximumEdgePenWidth': 'max_edge_pen_width',
'centralReactionCount': 'central_reaction_count',
'initialTime': 'initial_time',
'timeStep': 'time_step',
'absoluteTolerance': 'abs_tol',
'relativeTolerance': 'rel_tol',
'framesPerSecond': 'video_fps',
'initialPadding': 'initial_padding',
'finalPadding': 'final_padding',
'generateFluxDiagram': 'generate_flux_diagram',
'addAdjacentNodes': 'add_adjacent_nodes',
'loadChemkinOutput': 'load_chemkin_output',
'createFluxDiagram': 'create_flux_diagram',
# rmgpy.tools.isotopes
'generate_RMG_model': 'generate_rmg_model',
# rmgpy.tools.loader
'loadRMGJob': 'load_rmg_job',
'loadRMGPyJob': 'load_rmg_py_job',
'loadRMGJavaJob': 'load_rmg_java_job',
# rmgpy.tools.plot
'parseCSVData': 'parse_csv_data',
'findNearest': 'find_nearest',
'linearlyInterpolatePoint': 'linearly_interpolate_point',
}
GLOBALS2 = {
# rmgpy.data.base
'removeCommentFromLine': 'remove_comment_from_line',
'splitLineAndComment': 'split_line_and_comment',
# rmgpy.data.rmg
'getDB': 'get_db',
# rmgpy.data.solvation
'generateOldLibraryEntry': 'generate_old_library_entry',
'processOldLibraryEntry': 'process_old_library_entry',
# rmgpy.data.statmechfit
'hoFreqLowerBound': 'ho_freq_lower_bound',
'hoFreqUpperBound': 'ho_freq_upper_bound',
'hrFreqLowerBound': 'hr_freq_lower_bound',
'hrFreqUpperBound': 'hr_freq_upper_bound',
'hrBarrLowerBound': 'hr_barr_lower_bound',
'hrBarrUpperBound': 'hr_barr_upper_bound',
'maxIter': 'max_iter',
'fitStatmechToHeatCapacity': 'fit_statmech_to_heat_capacity',
'fitStatmechDirect': 'fit_statmech_direct',
'fitStatmechPseudoRotors': 'fit_statmech_pseudo_rotors',
'fitStatmechPseudo': 'fit_statmech_pseudo',
'harmonicOscillator_heatCapacity': 'harmonic_oscillator_heat_capacity',
'harmonicOscillator_d_heatCapacity_d_freq': 'harmonic_oscillator_d_heat_capacity_d_freq',
'hinderedRotor_heatCapacity': 'hindered_rotor_heat_capacity',
'hinderedRotor_d_heatCapacity_d_freq': 'hindered_rotor_d_heat_capacity_d_freq',
'hinderedRotor_d_heatCapacity_d_barr': 'hindered_rotor_d_heat_capacity_d_barr',
# rmgpy.data.thermo
'addThermoData': 'add_thermo_data',
'removeThermoData': 'remove_thermo_data',
'averageThermoData': 'average_thermo_data',
'commonAtoms': 'common_atoms',
'combineCycles': 'combine_cycles',
'isAromaticRing': 'is_aromatic_ring',
'isBicyclic': 'is_bicyclic',
'findAromaticBondsFromSubMolecule': 'find_aromatic_bonds_from_sub_molecule',
'convertRingToSubMolecule': 'convert_ring_to_sub_molecule',
'combineTwoRingsIntoSubMolecule': 'combine_two_rings_into_sub_molecule',
'getCopyForOneRing': 'get_copy_for_one_ring',
'getCopyFromTwoRingsWithCommonAtoms': 'get_copy_from_two_rings_with_common_atoms',
'isRingPartialMatched': 'is_ring_partial_matched',
'bicyclicDecompositionForPolyring': 'bicyclic_decomposition_for_polyring',
'splitBicyclicIntoSingleRings': 'split_bicyclic_into_single_rings',
'saturateRingBonds': 'saturate_ring_bonds',
# rmgpy.data.kinetics.family
'_makeRule': '_make_rule',
'_spawnTreeProcess': '_spawn_tree_process',
'_childMakeTreeNodes': '_child_make_tree_nodes',
# rmgpy.molecule.adjlist
'fromOldAdjacencyList': 'from_old_adjacency_list',
'getOldElectronState': 'get_old_electron_state',
'toOldAdjacencyList': 'to_old_adjacency_list',
# rmgpy.molecule.atomtype
'atomTypes': 'ATOMTYPES',
# rmgpy.molecule.converter
'debugRDKitMol': 'debug_rdkit_mol',
# rmgpy.molecule.graph
'_getEdgeVertex1': '_get_edge_vertex1',
'_getEdgeVertex2': '_get_edge_vertex2',
# rmgpy.molecule.inchi
'_parse_H_layer': '_parse_h_layer',
'_parse_E_layer': '_parse_e_layer',
'_parse_N_layer': '_parse_n_layer',
'_create_U_layer': '_create_u_layer',
'_create_P_layer': '_create_p_layer',
# rmgpy.qm.main
'_write_QMfiles_star': '_write_qm_files_star',
'_write_QMfiles': '_write_qm_files',
# rmgpy.qm.molecule
'loadThermoDataFile': 'load_thermo_data_file',
# rmgpy.qm.qmdata
'parseCCLibData': 'parse_cclib_data',
# rmgpy.qm.symmetry'
'makePointGroupDictionary': 'make_point_group_dictionary',
'pointGroupDictionary': 'point_group_dictionary',
# rmgpy.rmg.model
'generateReactionKey': 'generate_reaction_key',
'generateReactionId': 'generate_reaction_id',
'getFamilyLibraryObject': 'get_family_library_object',
'getKey': 'get_key',
'areIdenticalSpeciesReferences': 'are_identical_species_references',
# rmgpy.rmg.output
'saveOutputHTML': 'save_output_html',
'saveDiffHTML': 'save_diff_html',
'saveOutput': 'save_output',
# rmgpy.statmech.conformer
'getDensityOfStatesForst': 'get_density_of_states_forst',
# rmgpy.statmech.schrodinger
'unitDegeneracy': 'unit_degeneracy',
'convolveBS': 'convolve_bs',
'convolveBSSR': 'convolve_bssr',
# rmgpy.util
'makeOutputSubdirectory': 'make_output_subdirectory',
# rmgpy.yml
'convertChemkin2yml': 'convert_chemkin_to_yml',
'writeyml': 'write_yml',
'getMechDict': 'get_mech_dict',
'getRadicals': 'get_radicals',
'obj2dict': 'obj_to_dict',
# rmgpy.chemkin
'__chemkin_reaction_count': '_chemkin_reaction_count',
'Ffloat': 'fortran_float',
'_readKineticsReaction': '_read_kinetics_reaction',
'_readKineticsLine': '_read_kinetics_line',
'_removeLineBreaks': '_remove_line_breaks',
'saveJavaKineticsLibrary': 'save_java_kinetics_library',
# rmgpy.tools.extractInfoFromckcsv
'getROPFromCKCSV': 'get_rop_from_ckcsv',
'getConcentrationDictFromCKCSV': 'get_concentration_dict_from_ckcsv',
'getFluxGraphEdgesDict': 'get_flux_graph_edges_dict',
'getROPFlux': 'get_rop_flux',
# rmgpy.tools.observablesRegression
'curvesSimilar': 'curves_similar',
# rmgpy.tools.regression
'parseArguments': 'parse_command_line_arguments',
}
# Class attributes
ATTRIBUTES1 = {
# Arkane:
'angleUnits': 'angle_units',
'energyUnits': 'energy_units',
'cosineRotor': 'cosine_rotor',
'fourierRotor': 'fourier_rotor',
'rotorIndex': 'rotor_index',
# rmgpy.data.base
'shortDesc': 'short_desc',
'longDesc': 'long_desc',
'referenceType': 'reference_type',
'nodalDistance': 'nodal_distance',
# rmgpy.data.rmg
'forbiddenStructures': 'forbidden_structures',
# rmgpy.data.thermo.ThermoDatabase
'libraryOrder': 'library_order',
'deltaAtomicAdsorptionEnergy': 'delta_atomic_adsorption_energy',
'genericNodes': 'generic_nodes',
# rmgpy.data.kinetics.database.KineticsDatabase
'recommendedFamilies': 'recommended_families',
# rmgpy.data.kinetics.depository.DepositoryReaction
'specificCollider': 'specific_collider',
'transitionState': 'transition_state',
# rmgpy.data.kinetics.family.KineticsFamily
'forwardTemplate': 'forward_template',
'forwardRecipe': 'forward_recipe',
'reverseTemplate': 'reverse_template',
'reverseRecipe': 'reverse_recipe',
'ownReverse': 'own_reverse',
'boundaryAtoms': 'boundary_atoms',
'treeDistance': 'tree_distance',
'reverseMap': 'reverse_map',
'reactantNum': 'reactant_num',
'productNum': 'product_num',
'autoGenerated': 'auto_generated',
# rmgpy.kinetics.diffusionLimited
'solventData': 'solvent_data',
# rmgpy.molecule.atomtype
'incrementBond': 'increment_bond',
'decrementBond': 'decrement_bond',
'formBond': 'form_bond',
'breakBond': 'break_bond',
'incrementRadical': 'increment_radical',
'decrementRadical': 'decrement_radical',
'incrementLonePair': 'increment_lone_pair',
'decrementLonePair': 'decrement_lone_pair',
'allDouble': 'all_double',
'rDouble': 'r_double',
'oDouble': 'o_double',
'sDouble': 's_double',
'lonePairs': 'lone_pairs',
# rmgpy.molecule.element
'chemkinName': 'chemkin_name',
'covRadius': 'cov_radius',
'elementList': 'element_list',
# rmgpy.molecule.graph
'sortingLabel': 'sorting_label',
# rmgpy.molecule.molecule
'radicalElectrons': 'radical_electrons',
'atomType': 'atomtype',
'symmetryNumber': 'symmetry_number',
# rmgpy.pdep.configuration
'Elist': 'e_list',
'densStates': 'dens_states',
'sumStates': 'sum_states',
'activeJRotor': 'active_j_rotor',
'activeKRotor': 'active_k_rotor',
# rmgpy.pdep.network
'pathReactions': 'path_reactions',
'bathGas': 'bath_gas',
'netReactions': 'net_reactions',
'Jlist': 'j_list',
'Nisom': 'n_isom',
'Nreac': 'n_reac',
'Nprod': 'n_prod',
'Ngrains': 'n_grains',
'NJ': 'n_j',
'grainSize': 'grain_size',
'grainCount': 'grain_count',
# rmgpy.qm.molecule
'uniqueID': 'unique_id',
'uniqueIDlong': 'unique_id_long',
'outputFilePath': 'output_file_path',
'inputFilePath': 'input_file_path',
'scriptAttempts': 'script_attempts',
'maxAttempts': 'max_attempts',
'qmData': 'qm_data',
# rmgpy.qm.symmetry
'pointGroup': 'point_group',
'attemptNumber': 'attempt_number',
'pointGroupFound': 'point_group_found',
# rmgpy.rmg.main
'inputFile': 'input_file',
'outputDirectory': 'output_directory',
'modelSettingsList': 'model_settings_list',
'simulatorSettingsList': 'simulator_settings_list',
'databaseDirectory': 'database_directory',
'thermoLibraries': 'thermo_libraries',
'transportLibraries': 'transport_libraries',
'reactionLibraries': 'reaction_libraries',
'statmechLibraries': 'statmech_libraries',
'seedMechanisms': 'seed_mechanisms',
'kineticsFamilies': 'kinetics_families',
'kineticsDepositories': 'kinetics_depositories',
'kineticsEstimator': 'kinetics_estimator',
'diffusionLimiter': 'diffusion_limiter',
'bindingEnergies': 'binding_energies',
'reactionModel': 'reaction_model',
'reactionSystems': 'reaction_systems',
'balanceSpecies': 'balance_species',
'filterReactions': 'filter_reactions',
'unimolecularReact': 'unimolecular_react',
'bimolecularReact': 'bimolecular_react',
'trimolecularReact': 'trimolecular_react',
'generateOutputHTML': 'generate_output_html',
'generatePlots': 'generate_plots',
'saveSimulationProfiles': 'save_simulation_profiles',
'verboseComments': 'verbose_comments',
'saveEdgeSpecies': 'save_edge_species',
'keepIrreversible': 'keep_irreversible',
'trimolecularProductReversible': 'trimolecular_product_reversible',
'pressureDependence': 'pressure_dependence',
'quantumMechanics': 'quantum_mechanics',
'speciesConstraints': 'species_constraints',
'wallTime': 'walltime',
'initialSpecies': 'initial_species',
'initializationTime': 'initialization_time',
'kineticsdatastore': 'kinetics_datastore',
'coreSeedPath': 'core_seed_path',
'edgeSeedPath': 'edge_seed_path',
'filtersPath': 'filters_path',
'speciesMapPath': 'species_map_path',
'generateSeedEachIteration': 'generate_seed_each_iteration',
'saveSeedToDatabase': 'save_seed_to_database',
'thermoCentralDatabase': 'thermo_central_database',
'execTime': 'exec_time',
'reactionSystem': 'reaction_system',
'conditionList': 'condition_list',
'scaledConditionList': 'scaled_condition_list',
'randState': 'rand_state',
# rmgpy.rmg.model
'networkDict': 'network_dict',
'networkList': 'network_list',
'networkCount': 'network_count',
'speciesDict': 'species_dict',
'reactionDict': 'reaction_dict',
'speciesCache': 'species_cache',
'speciesCounter': 'species_counter',
'reactionCounter': 'reaction_counter',
'newSpeciesList': 'new_species_list',
'newReactionList': 'new_reaction_list',
'outputSpeciesList': 'output_species_list',
'outputReactionList': 'output_reaction_list',
'indexSpeciesDict': 'index_species_dict',
'iterationNum': 'iteration_num',
'toleranceThermoKeepSpeciesInEdge': 'thermo_tol_keep_spc_in_edge',
'minCoreSizeForPrune': 'min_core_size_for_prune',
'maximumEdgeSpecies': 'maximum_edge_species',
'newSurfaceSpcsAdd': 'new_surface_spcs_add',
'newSurfaceRxnsAdd': 'new_surface_rxns_add',
'newSurfaceSpcsLoss': 'new_surface_spcs_loss',
'newSurfaceRxnsLoss': 'new_surface_rxns_loss',
'solventName': 'solvent_name',
# rmgpy.rmg.settings
'fluxToleranceKeepInEdge': 'tol_keep_in_edge',
'fluxToleranceMoveToCore': 'tol_move_to_core',
'toleranceMoveEdgeReactionToCore': 'tol_move_edge_rxn_to_core',
'fluxToleranceInterrupt': 'tol_interrupt_simulation',
'minSpeciesExistIterationsForPrune': 'min_species_exist_iterations_for_prune',
'filterThreshold': 'filter_threshold',
'ignoreOverallFluxCriterion': 'ignore_overall_flux_criterion',
'toleranceMoveEdgeReactionToSurface': 'tol_move_edge_rxn_to_surface',
'toleranceMoveSurfaceSpeciesToCore': 'tol_move_surface_spc_to_core',
'toleranceMoveSurfaceReactionToCore': 'tol_move_surface_rxn_to_core',
'terminateAtMaxObjects': 'terminate_at_max_objects',
'dynamicsTimeScale': 'dynamics_time_scale',
'toleranceBranchReactionToCore': 'tol_branch_rxn_to_core',
'branchingIndex': 'branching_index',
'branchingRatioMax': 'branching_ratio_max',
'toleranceMoveEdgeReactionToSurfaceInterrupt': 'tol_move_edge_rxn_to_surface_interrupt',
'toleranceMoveEdgeReactionToCoreInterrupt': 'tol_move_edge_rxn_to_core_interrupt',
'maxNumSpecies': 'max_num_species',
'maxNumObjsPerIter': 'max_num_objects_per_iter',
# rmgpy.statmech.conformer
'spinMultiplicity': 'spin_multiplicity',
'opticalIsomers': 'optical_isomers',
# rmgpy.tools.canteraModel
'reactorType': 'reactor_type',
'reactionTime': 'reaction_time',
'molFrac': 'mol_frac',
'speciesList': 'species_list',
'reactionList': 'reaction_list',
'reactionMap': 'reaction_map',
# rmgpy.species
'transportData': 'transport_data',
'molecularWeight': 'molecular_weight',
'energyTransferModel': 'energy_transfer_model',
'isSolvent': 'is_solvent',
'creationIteration': 'creation_iteration',
'explicitlyAllowed': 'explicitly_allowed',
}
ATTRIBUTES2 = {
# rmgpy.molecule.vf2
'initialMapping': 'initial_mapping',
'findAll': 'find_all',
'isMatch': 'is_match',
'mappingList': 'mapping_list',
# rmgpy.molecule.molecule
'InChI': 'inchi',
'SMILES': 'smiles',
# rmgpy.rmg.pdep
'collFreq': 'coll_freq',
# rmgpy.solver.base
'numCoreSpecies': 'num_core_species',
'numCoreReactions': 'num_core_reactions',
'numEdgeSpecies': 'num_edge_species',
'numEdgeReactions': 'num_edge_reactions',
'numPdepNetworks': 'num_pdep_networks',
'speciesIndex': 'species_index',
'reactionIndex': 'reaction_index',
'reactantIndices': 'reactant_indices',
'productIndices': 'product_indices',
'networkIndices': 'network_indices',
'networkLeakCoefficients': 'network_leak_coefficients',
'jacobianMatrix': 'jacobian_matrix',
'coreSpeciesConcentrations': 'core_species_concentrations',
'coreSpeciesRates': 'core_species_rates',
'coreReactionRates': 'core_reaction_rates',
'coreSpeciesProductionRates': 'core_species_production_rates',
'coreSpeciesConsumptionRates': 'core_species_consumption_rates',
'edgeSpeciesRates': 'edge_species_rates',
'edgeReactionRates': 'edge_reaction_rates',
'networkLeakRates': 'network_leak_rates',
'surfaceSpeciesIndices': 'surface_species_indices',
'surfaceReactionIndices': 'surface_reaction_indices',
'validLayeringIndices': 'valid_layering_indices',
'maxEdgeSpeciesRateRatios': 'max_edge_species_rate_ratios',
'maxNetworkLeakRateRatios': 'max_network_leak_rate_ratios',
'prunableSpecies': 'prunable_species',
'prunableNetworks': 'prunable_networks',
'prunableSpeciesIndices': 'prunable_species_indices',
'prunableNetworkIndices': 'prunable_network_indices',
'sensitivityCoefficients': 'sensitivity_coefficients',
'sensitiveSpecies': 'sensitive_species',
'sensitivityThreshold': 'sensitivity_threshold',
'unimolecularThreshold': 'unimolecular_threshold',
'bimolecularThreshold': 'bimolecular_threshold',
'trimolecularThreshold': 'trimolecular_threshold',
# rmgpy.solver.simple
'constantVolume': 'constant_volume',
'initialMoleFractions': 'initial_mole_fractions',
'pdepColliderKinetics': 'pdep_collider_kinetics',
'colliderEfficiencies': 'collider_efficiencies',
'pdepColliderReactionIndices': 'pdep_collision_reaction_indices',
'pdepSpecificColliderKinetics': 'pdep_specific_collider_kinetics',
'specificColliderSpecies': 'specific_collider_species',
'pdepSpecificColliderReactionIndices': 'pdep_specific_collider_reaction_indices',
'sensConditions': 'sens_conditions',
'nSims': 'n_sims',
# rmgpy.solver.liquid
'constSPCNames': 'const_spc_names',
'constSPCIndices': 'const_spc_indices',
'initialConcentrations': 'initial_concentrations',
# rmgpy.solver.surface
'initialP': 'P_initial',
'initialGasMoleFractions': 'initial_gas_mole_fractions',
'initialSurfaceCoverages': 'initial_surface_coverages',
'surfaceVolumeRatio': 'surface_volume_ratio',
'surfaceSiteDensity': 'surface_site_density',
'reactionsOnSurface': 'reactions_on_surface',
'speciesOnSurface': 'species_on_surface',
# rmgpy.quantity
'uncertaintyType': 'uncertainty_type',
'commonUnits': 'common_units',
'extraDimensionality': 'extra_dimensionality',
# rmgpy.statmech.ndTorsions
'calcPath': 'calc_path',
'isLinear': 'is_linear',
'isTS': 'is_ts',
# rmgpy.tools.observablesRegression
'oldDir': 'old_dir',
'newDir': 'new_dir',
'exptData': 'expt_data',
'oldSim': 'old_sim',
'newSim': 'new_sim',
# rmgpy.tools.plot
'xVar': 'x_var',
'yVar': 'y_var',
'csvFile': 'csv_file',
'numSpecies': 'num_species',
'numReactions': 'num_reactions',
# rmgpy.tools.uncertainty
'speciesSourcesDict': 'species_sources_dict',
'reactionSourcesDict': 'reaction_sources_dict',
'allThermoSources': 'all_thermo_sources',
'allKineticSources': 'all_kinetic_sources',
'thermoInputUncertainties': 'thermo_input_uncertainties',
'kineticInputUncertainties': 'kinetic_input_uncertainties',
'extraSpecies': 'extra_species',
# rmgpy.species
'_molecularWeight': '_molecular_weight',
}
# Class methods
METHODS1 = {
# Arkane:
'loadInputFile': 'load_input_file',
'generateTemperatureList': 'generate_T_list',
'generatePressureList': 'generate_P_list',
'getNumberOfAtoms': 'get_number_of_atoms',
'loadForceConstantMatrix': 'load_force_constant_matrix',
'loadGeometry': 'load_geometry',
'loadConformer': 'load_conformer',
'loadEnergy': 'load_energy',
'loadZeroPointEnergy': 'load_zero_point_energy',
'loadScanEnergies': 'load_scan_energies',
'loadNegativeFrequency': 'load_negative_frequency',
'loadNecessaryDatabases': 'load_necessary_databases',
'getLibraries': 'get_libraries',
'visit_Call': 'visit_call',
'visit_List': 'visit_list',
'visit_Tuple': 'visit_tuple',
'visit_Dict': 'visit_dict',
'visit_Str': 'visit_str',
'visit_Num': 'visit_num',
'fitInterpolationModels': 'fit_interpolation_models',
'projectRotors': 'project_rotors',
# rmgpy.__init__
'getPath': 'get_path',
# rmgpy.data.base
'getAllDescendants': 'get_all_descendants',
'getEntriesToSave': 'get_entries_to_save',
'getSpecies': 'get_species',
'saveDictionary': 'save_dictionary',
'matchNodeToNode': 'match_node_to_node',
'matchNodeToChild': 'match_node_to_child',
'matchNodeToStructure': 'match_node_to_structure',
'descendTree': 'descend_tree',
'areSiblings': 'are_siblings',
'removeGroup': 'remove_group',
'matchToStructure': 'match_to_structure',
'matchLogicOr': 'match_logic_or',
'getPossibleStructures': 'get_possible_structures',
'isMoleculeForbidden': 'is_molecule_forbidden',
'loadEntry': 'load_entry',
'saveEntry': 'save_entry',
# rmgpy.data.rmg
'loadThermo': 'load_thermo',
'loadTransport': 'load_transport',
'loadForbiddenStructures': 'load_forbidden_structures',
'loadKinetics': 'load_kinetics',
'loadSolvation': 'load_solvation',
'loadStatmech': 'load_statmech',
# rmgpy.data.solvation
'getHAbsCorrection': 'get_h_abs_correction',
'getSolventViscosity': 'get_solvent_viscosity',
'getStokesDiffusivity': 'get_stokes_diffusivity',
'setMcGowanVolume': 'set_mcgowan_volume',
'getSolventData': 'get_solvent_data',
'getSolventStructure': 'get_solvent_structure',
'loadGroups': 'load_groups',
'saveLibraries': 'save_libraries',
'saveGroups': 'save_groups',
'getSoluteData': 'get_solute_data',
'getAllSoluteData': 'get_all_solute_data',
'getSoluteDataFromLibrary': 'get_solute_data_from_library',
'getSoluteDataFromGroups': 'get_solute_data_from_groups',
'transformLonePairs': 'transform_lone_pairs',
'removeHBonding': 'remove_h_bonding',
'estimateSoluteViaGroupAdditivity': 'estimate_solute_via_group_additivity',
'calcH': 'calc_h',
'calcG': 'calc_g',
'calcS': 'calc_s',
'getSolvationCorrection': 'get_solvation_correction',
'checkSolventinInitialSpecies': 'check_solvent_in_initial_species',
# rmgpy.data.statmech
'getFrequencyGroups': 'get_frequency_groups',
'getStatmechData': 'get_statmech_data',
'loadDepository': 'load_depository',
'loadLibraries': 'load_libraries',
'saveDepository': 'save_depository',
'getStatmechDataFromDepository': 'get_statmech_data_from_depository',
'getStatmechDataFromLibrary': 'get_statmech_data_from_library',
'getStatmechDataFromGroups': 'get_statmech_data_from_groups',
'generateFrequencies': 'generate_frequencies',
# rmgpy.data.thermo
'pruneHeteroatoms': 'prune_heteroatoms',
'recordPolycyclicGenericNodes': 'record_polycyclic_generic_nodes',
'recordRingGenericNodes': 'record_ring_generic_nodes',
'getThermoData': 'get_thermo_data',
'setDeltaAtomicAdsorptionEnergies': 'set_delta_atomic_adsorption_energies',
'correctBindingEnergy': 'correct_binding_energy',
'getThermoDataForSurfaceSpecies': 'get_thermo_data_for_surface_species',
'getThermoDataFromLibraries': 'get_thermo_data_from_libraries',
'getAllThermoData': 'get_all_thermo_data',
'getThermoDataFromDepository': 'get_thermo_data_from_depository',
'getThermoDataFromLibrary': 'get_thermo_data_from_library',
'getThermoDataFromGroups': 'get_thermo_data_from_groups',
'prioritizeThermo': 'prioritize_thermo',
'estimatRadicalThermoViaHBI': 'estimate_radical_thermo_via_hbi',
'estimateThermoViaGroupAdditivity': 'estimate_thermo_via_group_additivity',
'computeGroupAdditivityThermo': 'compute_group_additivity_thermo',
'getBicyclicCorrectionThermoDataFromHeuristic': 'get_bicyclic_correction_thermo_data_from_heuristic',
'getRingGroupsFromComments': 'get_ring_groups_from_comments',
'extractSourceFromComments': 'extract_source_from_comments',
# rmgpy.data.transport
'getTransportProperties': 'get_transport_properties',
'getAllTransportProperties': 'get_all_transport_properties',
'getTransportPropertiesFromLibrary': 'get_transport_properties_from_library',
'getTransportPropertiesViaGroupEstimates': 'get_transport_properties_via_group_estimates',
'estimateCriticalPropertiesViaGroupAdditivity': 'estimate_critical_properties_via_group_additivity',
'getTransportPropertiesViaLennardJonesParameters': 'get_transport_properties_via_lennard_jones_parameters',
# rmgpy.data.kinetics.database
'loadRecommendedFamiliesList': 'load_recommended_families',
'loadFamilies': 'load_families',
'saveRecommendedFamilies': 'save_recommended_families',
'saveFamilies': 'save_families',
'getForwardReactionForFamilyEntry': 'get_forward_reaction_for_family_entry',
'reconstructKineticsFromSource': 'reconstruct_kinetics_from_source',
# rmgpy.data.kinetics.family
'applyForward': 'apply_forward',
'applyReverse': 'apply_reverse',
'loadTemplate': 'load_template',
'loadRecipe': 'load_recipe',
'loadForbidden': 'load_forbidden',
'saveTrainingReactions': 'save_training_reactions',
'generateProductTemplate': 'generate_product_template',
'addKineticsRulesFromTrainingSet': 'add_rules_from_training',
'getRootTemplate': 'get_root_template',
'fillKineticsRulesByAveragingUp': 'fill_rules_by_averaging_up',
'applyRecipe': 'apply_recipe',
'generateReactions': 'generate_reactions',
'addReverseAttribute': 'add_reverse_attribute',
'calculateDegeneracy': 'calculate_degeneracy',
'getReactionPairs': 'get_reaction_pairs',
'getReactionTemplate': 'get_reaction_template',
'getKineticsForTemplate': 'get_kinetics_for_template',
'getKineticsFromDepository': 'get_kinetics_from_depository',
'getKinetics': 'get_kinetics',
'estimateKineticsUsingGroupAdditivity': 'estimate_kinetics_using_group_additivity',
'estimateKineticsUsingRateRules': 'estimate_kinetics_using_rate_rules',
'getReactionTemplateLabels': 'get_reaction_template_labels',
'retrieveTemplate': 'retrieve_template',
'getLabeledReactantsAndProducts': 'get_labeled_reactants_and_products',
'addAtomLabelsForReaction': 'add_atom_labels_for_reaction',
'getTrainingDepository': 'get_training_depository',
'addEntry': 'add_entry',
# rmgpy.data.kinetics.library
'getLibraryReactions': 'get_library_reactions',
'markValidDuplicates': 'mark_valid_duplicates',
'checkForDuplicates': 'check_for_duplicates',
'convertDuplicatesToMulti': 'convert_duplicates_to_multi',
# rmgpy.data.kinetics.rules
'getEntries': 'get_entries',
'hasRule': 'has_rule',
'getRule': 'get_rule',
'getAllRules': 'get_all_rules',
'fillRulesByAveragingUp': 'fill_rules_by_averaging_up',
'estimateKinetics': 'estimate_kinetics',
# rmgpy.kinetics.model
'isPressureDependent': 'is_pressure_dependent',
'isTemperatureValid': 'is_temperature_valid',
'getRateCoefficient': 'get_rate_coefficient',
'toHTML': 'to_html',
'isSimilarTo': 'is_similar_to',
'isIdenticalTo': 'is_identical_to',
'getCanteraEfficiencies': 'get_cantera_efficiencies',
'setCanteraKinetics': 'set_cantera_kinetics',
'isPressureValid': 'is_pressure_valid',
'getEffectivePressure': 'get_effective_pressure',
'getEffectiveColliderEfficiencies': 'get_effective_collider_efficiencies',
'calculateTunnelingFactor': 'calculate_tunneling_factor',
'calculateTunnelingFunction': 'calculate_tunneling_function',
# rmgpy.kinetics.arrhenius
'changeT0': 'change_t0',
'fitToData': 'fit_to_data',
'changeRate': 'change_rate',
'toCanteraKinetics': 'to_cantera_kinetics',
'toArrheniusEP': 'to_arrhenius_ep',
'getActivationEnergy': 'get_activation_energy',
'toArrhenius': 'to_arrhenius',
'fitToReactions': 'fit_to_reactions',
'getAdjacentExpressions': 'get_adjacent_expressions',
# rmgpy.kinetics.chebyshev
'getReducedTemperature': 'get_reduced_temperature',
'getReducedPressure': 'get_reduced_pressure',
# rmgpy.kinetics.diffusionLimited
'getEffectiveRate': 'get_effective_rate',
'getDiffusionFactor': 'get_diffusion_factor',
'getDiffusionLimit': 'get_diffusion_limit',
# rmgpy.kinetics.surface
'getStickingCoefficient': 'get_sticking_coefficient',
# rmgpy.kinetics.uncertainties
'getExpectedLogUncertainty': 'get_expected_log_uncertainty',
# rmgpy.molecule.atomtype
'setActions': 'set_actions',
'isSpecificCaseOf': 'is_specific_case_of',
# rmgpy.molecule.graph
'resetConnectivityValues': 'reset_connectivity_values',
'getOtherVertex': 'get_other_vertex',
'addVertex': 'add_vertex',
'addEdge': 'add_edge',
'getAllEdges': 'get_all_edges',
'getEdges': 'get_edges',
'getEdge': 'get_edge',
'hasVertex': 'has_vertex',
'hasEdge': 'has_edge',
'removeVertex': 'remove_vertex',
'removeEdge': 'remove_edge',
'copyAndMap': 'copy_and_map',
'updateConnectivityValues': 'update_connectivity_values',
'sortVertices': 'sort_vertices',
'isIsomorphic': 'is_isomorphic',
'findIsomorphism': 'find_isomorphism',
'isSubgraphIsomorphic': 'is_subgraph_isomorphic',
'findSubgraphIsomorphisms': 'find_subgraph_isomorphisms',
'isCyclic': 'is_cyclic',
'isVertexInCycle': 'is_vertex_in_cycle',
'isEdgeInCycle': 'is_edge_in_cycle',
'getAllCyclicVertices': 'get_all_cyclic_vertices',
'getAllPolycyclicVertices': 'get_all_polycyclic_vertices',
'getPolycyclicRings': 'get_polycycles',
'getMonocyclicRings': 'get_monocycles',
'getDisparateRings': 'get_disparate_cycles',
'getAllCycles': 'get_all_cycles',
'getAllCyclesOfSize': 'get_all_cycles_of_size',
'getAllSimpleCyclesOfSize': 'get_all_simple_cycles_of_size',
'getSmallestSetOfSmallestRings': 'get_smallest_set_of_smallest_rings',
'getRelevantCycles': 'get_relevant_cycles',
'getMaxCycleOverlap': 'get_max_cycle_overlap',
'getLargestRing': 'get_largest_ring',
'isMappingValid': 'is_mapping_valid',
# rmgpy.molecule.molecule
'isHydrogen': 'is_hydrogen',
'isNonHydrogen': 'is_non_hydrogen',
'isCarbon': 'is_carbon',
'isNitrogen': 'is_nitrogen',
'isOxygen': 'is_oxygen',
'isFluorine': 'is_fluorine',
'isSurfaceSite': 'is_surface_site',
'isSilicon': 'is_silicon',
'isSulfur': 'is_sulfur',
'isChlorine': 'is_chlorine',
'isIodine': 'is_iodine',
'isNOS': 'is_nos',
'setLonePairs': 'set_lone_pairs',
'incrementLonePairs': 'increment_lone_pairs',
'decrementLonePairs': 'decrement_lone_pairs',
'updateCharge': 'update_charge',
'applyAction': 'apply_action',
'getBondOrdersForAtom': 'get_total_bond_order',
'getBDE': 'get_bde',
'getOrderStr': 'get_order_str',
'setOrderStr': 'set_order_str',
'getOrderNum': 'get_order_num',
'setOrderNum': 'set_order_num',
'isVanDerWaals': 'is_van_der_waals',
'isOrder': 'is_order',
'incrementOrder': 'increment_order',
'decrementOrder': 'decrement_order',
'addAtom': 'add_atom',
'addBond': 'add_bond',
'getBonds': 'get_bonds',
'getBond': 'get_bond',
'hasAtom': 'has_atom',
'hasBond': 'has_bond',
'containsSurfaceSite': 'contains_surface_site',
'removeAtom': 'remove_atom',
'removeBond': 'remove_bond',
'removeVanDerWaalsBonds': 'remove_van_der_waals_bonds',
'sortAtoms': 'sort_atoms',
'getFormula': 'get_formula',
'getMolecularWeight': 'get_molecular_weight',
'getRadicalCount': 'get_radical_count',
'getSingletCarbeneCount': 'get_singlet_carbene_count',
'getNumAtoms': 'get_num_atoms',
'deleteHydrogens': 'delete_hydrogens',
'connectTheDots': 'connect_the_dots',
'updateAtomTypes': 'update_atomtypes',
'updateMultiplicity': 'update_multiplicity',
'clearLabeledAtoms': 'clear_labeled_atoms',
'containsLabeledAtom': 'contains_labeled_atom',
'getLabeledAtoms': 'get_all_labeled_atoms',
'getLabeledAtom': 'get_labeled_atoms',
'isAtomInCycle': 'is_atom_in_cycle',
'isBondInCycle': 'is_bond_in_cycle',
'fromInChI': 'from_inchi',
'fromAugmentedInChI': 'from_augmented_inchi',
'fromSMILES': 'from_smiles',
'fromSMARTS': 'from_smarts',
'fromXYZ': 'from_xyz',
'toSingleBonds': 'to_single_bonds',
'toInChI': 'to_inchi',
'toAugmentedInChI': 'to_augmented_inchi',
'toInChIKey': 'to_inchi_key',
'toAugmentedInChIKey': 'to_augmented_inchi_key',
'toSMARTS': 'to_smarts',
'toSMILES': 'to_smiles',
'find_H_bonds': 'find_h_bonds',
'generate_H_bonded_structures': 'generate_h_bonded_structures',
'remove_H_bonds': 'remove_h_bonds',
'isLinear': 'is_linear',
'isAromatic': 'is_aromatic',
'isHeterocyclic': 'is_heterocyclic',
'countInternalRotors': 'count_internal_rotors',
'calculateCp0': 'calculate_cp0',
'calculateCpInf': 'calculate_cpinf',
'getSymmetryNumber': 'get_symmetry_number',
'calculateSymmetryNumber': 'calculate_symmetry_number',
'isRadical': 'is_radical',
'isArylRadical': 'is_aryl_radical',
'getURL': 'get_url',
'getRadicalAtoms': 'get_radical_atoms',
'updateLonePairs': 'update_lone_pairs',
'getNetCharge': 'get_net_charge',
'getChargeSpan': 'get_charge_span',
'toGroup': 'to_group',
'getAromaticRings': 'get_aromatic_rings',
'assignAtomIDs': 'assign_atom_ids',
'atomIDValid': 'atom_ids_valid',
'isIdentical': 'is_identical',
'getNthNeighbor': 'get_nth_neighbor',
# rmgpy.molecule.group
'hasWildcards': 'has_wildcards',
'countBonds': 'count_bonds',
'makeSampleAtom': 'make_sample_atom',
'makeBond': 'make_bond',
'sortByConnectivity': 'sort_by_connectivity',
'clearRegDims': 'clear_reg_dims',
'getExtensions': 'get_extensions',
'specifyAtomExtensions': 'specify_atom_extensions',
'specifyRingExtensions': 'specify_ring_extensions',
'specifyUnpairedExtensions': 'specify_unpaired_extensions',
'specifyInternalNewBondExtensions': 'specify_internal_new_bond_extensions',
'specifyExternalNewBondExtensions': 'specify_external_new_bond_extensions',
'specifyBondExtensions': 'specify_bond_extensions',
'updateFingerprint': 'update_fingerprint',
'standardizeAtomType': 'standardize_atomtype',
'createAndConnectAtom': 'create_and_connect_atom',
'addExplicitLigands': 'add_explicit_ligands',
'standardizeGroup': 'standardize_group',
'addImplicitAtomsFromAtomType': 'add_implicit_atoms_from_atomtype',
'classifyBenzeneCarbons': 'classify_benzene_carbons',
'addImplicitBenzene': 'add_implicit_benzene',
'pickWildcards': 'pick_wildcards',
'makeSampleMolecule': 'make_sample_molecule',
'isBenzeneExplicit': 'is_benzene_explicit',
'mergeGroups': 'merge_groups',
'resetRingMembership': 'reset_ring_membership',
# rmgpy.pdep.configuration
'isUnimolecular': 'is_unimolecular',
'isBimolecular': 'is_bimolecular',
'isTermolecular': 'is_termolecular',
'isTransitionState': 'is_transition_state',
'calculateCollisionFrequency': 'calculate_collision_frequency',
'calculateDensityOfStates': 'calculate_density_of_states',
'mapDensityOfStates': 'map_density_of_states',
'mapSumOfStates': 'map_sum_of_states',
# rmgpy.pdep.network
'getAllSpecies': 'get_all_species',
'calculateRateCoefficients': 'calculate_rate_coefficients',
'setConditions': 'set_conditions',
'selectEnergyGrains': 'select_energy_grains',
'calculateDensitiesOfStates': 'calculate_densities_of_states',
'mapDensitiesOfStates': 'map_densities_of_states',
'calculateMicrocanonicalRates': 'calculate_microcanonical_rates',
'calculateEquilibriumRatios': 'calculate_equilibrium_ratios',
'calculateCollisionModel': 'calculate_collision_model',
'solveFullME': 'solve_full_me',
'solveReducedME': 'solve_reduced_me',
# rmgpy.rmg.main
'loadInput': 'load_input',
'loadThermoInput': 'load_thermo_input',
'checkInput': 'check_input',
'checkLibraries': 'check_libraries',
'saveInput': 'save_input',
'loadDatabase': 'load_database',
'makeSeedMech': 'make_seed_mech',
'makeSpeciesLabelsIndependent': 'make_species_labels_independent',
'processToSpeciesNetworks': 'process_to_species_networks',
'processPdepNetworks': 'process_pdep_networks',
'processReactionsToSpecies': 'process_reactions_to_species',
'generateCanteraFiles': 'generate_cantera_files',
'initializeReactionThresholdAndReactFlags': 'initialize_reaction_threshold_and_react_flags',
'updateReactionThresholdAndReactFlags': 'update_reaction_threshold_and_react_flags',
'saveEverything': 'save_everything',
'getGitCommit': 'get_git_commit',
'logHeader': 'log_header',
'loadRMGJavaInput': 'load_rmg_java_input',
'readMeaningfulLineJava': 'read_meaningful_line_java',
'determine_procnum_from_RAM': 'determine_procnum_from_ram',
# rmgpy.rmg.model
'checkForExistingSpecies': 'check_for_existing_species',
'makeNewSpecies': 'make_new_species',
'checkForExistingReaction': 'check_for_existing_reaction',
'makeNewReaction': 'make_new_reaction',
'makeNewPDepReaction': 'make_new_pdep_reaction',
'addNewSurfaceObjects': 'add_new_surface_objects',
'adjustSurface': 'adjust_surface',
'clearSurfaceAdjustments': 'clear_surface_adjustments',
'processNewReactions': 'process_new_reactions',
'applyThermoToSpecies': 'apply_thermo_to_species',
'generateThermo': 'generate_thermo',
'applyKineticsToReaction': 'apply_kinetics_to_reaction',
'generateKinetics': 'generate_kinetics',
'printEnlargeSummary': 'log_enlarge_summary',
'addSpeciesToCore': 'add_species_to_core',
'addSpeciesToEdge': 'add_species_to_edge',
'setThermodynamicFilteringParameters': 'set_thermodynamic_filtering_parameters',
'thermoFilterSpecies': 'thermo_filter_species',
'thermoFilterDown': 'thermo_filter_down',
'removeEmptyPdepNetworks': 'remove_empty_pdep_networks',
'removeSpeciesFromEdge': 'remove_species_from_edge',
'addReactionToCore': 'add_reaction_to_core',
'addReactionToEdge': 'add_reaction_to_edge',
'getModelSize': 'get_model_size',
'getLists': 'get_species_reaction_lists',
'getStoichiometryMatrix': 'get_stoichiometry_matrix',
'addSeedMechanismToCore': 'add_seed_mechanism_to_core',
'addReactionLibraryToEdge': 'add_reaction_library_to_edge',
'addReactionLibraryToOutput': 'add_reaction_library_to_output',
'addReactionToUnimolecularNetworks': 'add_reaction_to_unimolecular_networks',
'updateUnimolecularReactionNetworks': 'update_unimolecular_reaction_networks',
'markChemkinDuplicates': 'mark_chemkin_duplicates',
'registerReaction': 'register_reaction',
'searchRetrieveReactions': 'search_retrieve_reactions',
'initializeIndexSpeciesDict': 'initialize_index_species_dict',
# rmgpy.rmg.pdep
'getLeakCoefficient': 'get_leak_coefficient',
'getMaximumLeakSpecies': 'get_maximum_leak_species',
'getLeakBranchingRatios': 'get_leak_branching_ratios',
'exploreIsomer': 'explore_isomer',
'addPathReaction': 'add_path_reaction',
'solve_SS_network': 'solve_ss_network',
'updateConfigurations': 'update_configurations',
# rmgpy.solver.base
'initializeModel': 'initialize_model',
'getLayeringIndices': 'get_layering_indices',
'addReactionsToSurface': 'add_reactions_to_surface',
'logRates': 'log_rates',
'logConversions': 'log_conversions',
'computeRateDerivative': 'compute_rate_derivative',
# rmgpy.solver.simple
'convertInitialKeysToSpeciesObjects': 'convert_initial_keys_to_species_objects',
# rmgpy.solver.liquid
'get_constSPCIndices': 'get_const_spc_indices',
# rmgpy.statmech.conformer
'getPartitionFunction': 'get_partition_function',
'getHeatCapacity': 'get_heat_capacity',
'getEnthalpy': 'get_enthalpy',
'getEntropy': 'get_entropy',
'getFreeEnergy': 'get_free_energy',
'getSumOfStates': 'get_sum_of_states',
'getDensityOfStates': 'get_density_of_states',
'getTotalMass': 'get_total_mass',
'getCenterOfMass': 'get_center_of_mass',
'getNumberDegreesOfFreedom': 'get_number_degrees_of_freedom',
'getMomentOfInertiaTensor': 'get_moment_of_inertia_tensor',
'getPrincipalMomentsOfInertia': 'get_principal_moments_of_inertia',
'getInternalReducedMomentOfInertia': 'get_internal_reduced_moment_of_inertia',
'getSymmetricTopRotors': 'get_symmetric_top_rotors',
'getActiveModes': 'get_active_modes',
# rmgpy.thermo.nasa
'changeBaseEnthalpy': 'change_base_enthalpy',
'changeBaseEntropy': 'change_base_entropy',
'selectPolynomial': 'select_polynomial',
'toThermoData': 'to_thermo_data',
'toWilhoit': 'to_wilhoit',
# rmgpy.thermo.thermodata
'toNASA': 'to_nasa',
# rmgpy.reaction
'toLabeledStr': 'to_labeled_str',
'isIsomerization': 'is_isomerization',
'isAssociation': 'is_association',
'isDissociation': 'is_dissociation',
'isSurfaceReaction': 'is_surface_reaction',
'hasTemplate': 'has_template',
'matchesSpecies': 'matches_species',
'getEnthalpyOfReaction': 'get_enthalpy_of_reaction',
'getEntropyOfReaction': 'get_entropy_of_reaction',
'getFreeEnergyOfReaction': 'get_free_energy_of_reaction',
'getEquilibriumConstant': 'get_equilibrium_constant',
'getEnthalpiesOfReaction': 'get_enthalpies_of_reaction',
'getEntropiesOfReaction': 'get_entropies_of_reaction',
'getFreeEnergiesOfReaction': 'get_free_energies_of_reaction',
'getEquilibriumConstants': 'get_equilibrium_constants',
'getStoichiometricCoefficient': 'get_stoichiometric_coefficient',
'getSurfaceRateCoefficient': 'get_surface_rate_coefficient',
'fixDiffusionLimitedA': 'fix_diffusion_limited_a_factor',
'fixBarrierHeight': 'fix_barrier_height',
'reverseThisArrheniusRate': 'reverse_arrhenius_rate',
'generateReverseRateCoefficient': 'generate_reverse_rate_coefficient',
'calculateTSTRateCoefficient': 'calculate_tst_rate_coefficient',
'calculateTSTRateCoefficients': 'calculate_tst_rate_coefficients',
'canTST': 'can_tst',
'calculateMicrocanonicalRateCoefficient': 'calculate_microcanonical_rate_coefficient',
'isBalanced': 'is_balanced',
'generatePairs': 'generate_pairs',
'generate3dTS': 'generate_3d_ts',
# rmgpy.species
'toChemkin': 'to_chemkin',
'toCantera': 'to_cantera',
'hasStatMech': 'has_statmech',
'hasThermo': 'has_thermo',
'getResonanceHybrid': 'get_resonance_hybrid',
'getAugmentedInChI': 'get_augmented_inchi',
'generateTransportData': 'generate_transport_data',
'getTransportData': 'get_transport_data',
'generateStatMech': 'generate_statmech',
'setE0WithThermo': 'set_e0_with_thermo',
'generateEnergyTransferModel': 'generate_energy_transfer_model',
# rmgpy.transport
'getCollisionFrequency': 'get_collision_frequency',
# rmgpy.quantity
'getConversionFactorToSI': 'get_conversion_factor_to_si',
'getConversionFactorFromSI': 'get_conversion_factor_from_si',
'getConversionFactorFromSItoCmMolS': 'get_conversion_factor_from_si_to_cm_mol_s',
'isUncertaintyAdditive': 'is_uncertainty_additive',
'isUncertaintyMultiplicative': 'is_uncertainty_multiplicative',
# rmgpy.tools.canteraModel
'generateConditions': 'generate_conditions',
'loadModel': 'load_model',
'refreshModel': 'refresh_model',
'loadChemkinModel': 'load_chemkin_model',
'modifyReactionKinetics': 'modify_reaction_kinetics',
'modifySpeciesThermo': 'modify_species_thermo',
# rmgpy.tools.plot
'comparePlot': 'compare_plot',
'uncertaintyPlot': 'uncertainty_plot',
}
METHODS2 = {
# rmgpy.data.base
'loadOld': 'load_old',
'loadOldDictionary': 'load_old_dictionary',
'__loadTree': '_load_tree',
'loadOldTree': 'load_old_tree',
'loadOldLibrary': 'load_old_library',
'parseOldLibrary': 'parse_old_library',
'saveOld': 'save_old',
'saveOldDictionary': 'save_old_dictionary',
'generateOldTree': 'generate_old_tree',
'saveOldTree': 'save_old_tree',
'saveOldLibrary': 'save_old_library',
'__hashLabels': '_hash_labels',
# rmgpy.data.reference
'toPrettyRepr': 'to_pretty_repr',
'getAuthorString': 'get_author_string',
# rmgpy.data.solvation
'__addGroupSoluteData': '_add_group_solute_data',
# rmgpy.data.statmech
'__countMatchesToNode': '_count_matches_to_node',
'__getNode': '_get_node',
# rmgpy.data.thermo
'copyData': 'copy_data',
'__addPolycyclicCorrectionThermoData': '_add_polycyclic_correction_thermo_data',
'__addPolyRingCorrectionThermoDataFromHeuristic': '_add_poly_ring_correction_thermo_data_from_heuristic',
'__addRingCorrectionThermoDataFromTree': '_add_ring_correction_thermo_data_from_tree',
'__averageChildrenThermo': '_average_children_thermo',
'__addGroupThermoData': '_add_group_thermo_data',
'__removeGroupThermoData': '_remove_group_thermo_data',
'satisfyRegistrationRequirements': 'satisfy_registration_requirements',
'registerInCentralThermoDB': 'register_in_central_thermo_db',
# rmgpy.data.transport
'__addCriticalPointContribution': '_add_critical_point_contribution',
# rmgpy.data.kinetics.depository
'getSource': 'get_source',
# rmgpy.data.kinetics.family
'addAction': 'add_action',
'getReverse': 'get_reverse',
'__apply': '_apply',
'loadOldTemplate': 'load_old_template',
'saveOldTemplate': 'save_old_template',
'distributeTreeDistances': 'distribute_tree_distances',
'__generateProductStructures': '_generate_product_structures',
'__createReaction': '_create_reaction',
'__matchReactantToTemplate': '_match_reactant_to_template',
'__generateReactions': '_generate_reactions',
'__selectBestKinetics': '_select_best_kinetics',
'_splitReactions': '_split_reactions',
'evalExt': 'eval_ext',
'getExtensionEdge': 'get_extension_edge',
'extendNode': 'extend_node',
'generateTree': 'generate_tree',
'getRxnBatches': 'get_rxn_batches',
'pruneTree': 'prune_tree',
'makeTreeNodes': 'make_tree_nodes',
'_absorbProcess': '_absorb_process',
'makeBMRulesFromTemplateRxnMap': 'make_bm_rules_from_template_rxn_map',
'crossValidate': 'cross_validate',
'crossValidateOld': 'cross_validate_old',
'simpleRegularization': 'simple_regularization',
'checkTree': 'check_tree',
'makeTree': 'make_tree',
'cleanTreeRules': 'clean_tree_rules',
'cleanTreeGroups': 'clean_tree_groups',
'cleanTree': 'clean_tree',
'saveGeneratedTree': 'save_generated_tree',
'getTrainingSet': 'get_training_set',
'getReactionMatches': 'get_reaction_matches',
'isEntryMatch': 'is_entry_match',
'rxnsMatchNode': 'rxns_match_node',
'retrieveOriginalEntry': 'retrieve_original_entry',
'getSourcesForTemplate': 'get_sources_for_template',
'getBackboneRoots': 'get_backbone_roots',
'getEndRoots': 'get_end_roots',
'getTopLevelGroups': 'get_top_level_groups',
# rmgpy.data.kinetics.groups
'__multipleKineticsData': '_multiple_kinetics_data',
'generateGroupAdditivityValues': 'generate_group_additivity_values',
# rmgpy.data.kinetics.library
'__loadOldReactions': '_load_old_reactions',
# rmgpy.data.kinetics.rules
'__loadOldComments': '_load_old_comments',
'__getAverageKinetics': '_get_average_kinetics',
# rmgpy.molecule.draw
'createNewSurface': 'create_new_surface',
'__findRingGroups': '_find_ring_groups',
'__generateCoordinates': '_generate_coordinates',
'__findCyclicBackbone': '_find_cyclic_backbone',
'__findStrightChainBackbone': '_find_straight_chain_backbone',
'__findStraightChainPaths': '_find_straight_chain_paths',
'__generateRingSystemCoordinates': '_generate_ring_system_coordinates',
'__generateStraightChainCoordinates': '_generate_straight_chain_coordinates',
'__generateNeighborCoordinates': '_generate_neighbor_coordinates',
'__generateFunctionalGroupCoordinates': '_generate_functional_group_coordinates',
'__generateAtomLabels': '_generate_atom_labels',
'__drawLine': '_draw_line',
'__renderBond': '_render_bond',
'__renderAtom': '_render_atom',
'__make_single_bonds': '_make_single_bonds',
'__replace_bonds': '_replace_bonds',
# rmgpy.molecule.graph
'__isChainInCycle': '_is_chain_in_cycle',
'__exploreCyclesRecursively': '_explore_cycles_recursively',
'_sortCyclicVertices': 'sort_cyclic_vertices',
# rmgpy.molecule.vf2
'addToMapping': 'add_to_mapping',
'removeFromMapping': 'remove_from_mapping',
# rmgpy.molecule.molecule
'__changeBond': '_change_bond',
'identifyRingMembership': 'identify_ring_membership',
'getDeterministicSmallestSetOfSmallestRings': 'get_deterministic_sssr',
# rmgpy.molecule.group
'__formBond': '_form_bond',
'__breakBond': '_break_bond',
'__gainRadical': '_gain_radical',
'__loseRadical': '_lose_radical',
'__gainPair': '_gain_pair',
'__losePair': '_lose_pair',
# rmgpy.pdep.collision
'getAlpha': 'get_alpha',
'generateCollisionMatrix': 'generate_collision_matrix',
'calculate_collision_efficiency': 'calculate_collision_efficiency',
# rmgpy.pdep.draw
'__getEnergyRange': '_get_energy_range',
'__useStructureForLabel': '_use_structure_for_label',
'__getTextSize': '_get_text_size',
'__drawText': '_draw_text',
'__getLabelSize': '_get_label_size',
'__drawLabel': '_draw_label',
# rmgpy.pdep.network
'__getEnergyGrains': '_get_energy_grains',
'printSummary': 'log_summary',
# rmgpy.qm.gaussian
'testReady': 'test_ready',
'verifyOutputFile': 'verify_output_file',
'inputFileKeywords': 'input_file_keywords',
'writeInputFile': 'write_input_file',
'generateQMData': 'generate_qm_data',
'getParser': 'get_parser',
# rmgpy.qm.main
'checkAllSet': 'check_all_set',
'setDefaultOutputDirectory': 'set_default_output_directory',
'checkReady': 'check_ready',
'checkPaths': 'check_paths',
'runJobs': 'run_jobs',
# rmgpy.qm.molecule
'getFilePath': 'get_file_path',
'getCrudeMolFilePath': 'get_crude_mol_file_path',
'getRefinedMolFilePath': 'get_refined_mol_file_path',
'generateRDKitGeometries': 'generate_rdkit_geometries',
'saveCoordinatesFromRDMol': 'save_coordinates_from_rdmol',
'saveCoordinatesFromQMData': 'save_coordinates_from_qm_data',
'getThermoFilePath': 'get_thermo_file_path',
'createGeometry': 'create_geometry',
'saveThermoData': 'save_thermo_data',
'loadThermoData': 'load_thermo_data',
'getInChiKeyAug': 'get_augmented_inchi_key',
'getMolFilePathForCalculation': 'get_mol_file_path_for_calculation',
'determinePointGroup': 'determine_point_group',
'calculateChiralityCorrection': 'calculate_chirality_correction',
'calculateThermoData': 'calculate_thermo_data',
# rmgpy.qm.qmdata
'testValid': 'test_valid',
# rmgpy.qm.qmverifier
'checkForInChIKeyCollision': 'check_for_inchi_key_collision',
'successfulJobExists': 'successful_job_exists',
# rmgpy.statmech.ndTorsions
'getTorsions': 'get_torsions',
'readScan': 'read_scan',
'readGjf': 'read_gjf',
'writeXYZ': 'write_xyz',
'writePes': 'write_pes',
'writeInp': 'write_inp',
'getIcsFile': 'get_ics_file',
'fitFourier': 'fit_fourier',
'getSplistfile': 'get_splist_file',
'getEigvals': 'get_eigvals',
'readEigvals': 'read_eigvals',
'calcPartitionFunction': 'calc_partition_function',
'getFrequencies': 'get_frequencies',
# rmgpy.statmech.torsion
'getRotationalConstantEnergy': 'get_rotational_constant_energy',
'getFrequency': 'get_frequency',
'getLevelEnergy': 'get_level_energy',
'getLevelDegeneracy': 'get_level_degeneracy',
'solveSchrodingerEquation': 'solve_schrodinger_equation',
'getHamiltonian': 'get_hamiltonian',
'getPotential': 'get_potential',
'fitFourierPotentialToData': 'fit_fourier_potential_to_data',
'fitCosinePotentialToData': 'fit_cosine_potential_to_data',
# rmgpy.thermo.wilhoit
'__residual': '_residual',
'fitToDataForConstantB': 'fit_to_data_for_constant_b',
# rmgpy.stats
'saveExecutionStatistics': 'save_execution_statistics',
'generateExecutionPlots': 'generate_execution_plots',
# rmgpy.tools.observablesRegression
'runSimulations': 'run_simulations',
# rmgpy.tools.uncertainty
'getUncertaintyValue': 'get_uncertainty_value',
'getPartialUncertaintyValue': 'get_partial_uncertainty_value',
'getUncertaintyFactor': 'get_uncertainty_factor',
'retrieveSaturatedSpeciesFromList': 'retrieve_saturated_species_from_list',
'extractSourcesFromModel': 'extract_sources_from_model',
'compileAllSources': 'compile_all_sources',
'assignParameterUncertainties': 'assign_parameter_uncertainties',
'sensitivityAnalysis': 'sensitivity_analysis',
'localAnalysis': 'local_analysis',
}
# Function and method arguments
ARGUMENTS1 = {
# Arekane:
'Vlist': 'v_list',
'maximumRadicalElectrons': 'maximum_radical_electrons',
'format': 'file_format',
'F': 'hessian',
'getProjectedOutFreqs': 'get_projected_out_freqs',
# rmgpy.data.base
'shortDesc': 'short_desc',
'longDesc': 'long_desc',
'referenceType': 'reference_type',
'nodalDistance': 'nodal_distance',
'numLabels': 'num_labels',
# rmgpy.data.rmg
'thermoLibraries': 'thermo_libraries',
'transportLibraries': 'transport_libraries',
'reactionLibraries': 'reaction_libraries',
'seedMechanisms': 'seed_mechanisms',
'kineticsFamilies': 'kinetics_families',
'kineticsDepositories': 'kinetics_depositories',
'statmechLibraries': 'statmech_libraries',
# rmgpy.data.statmech
'thermoModel': 'thermo_model',
# rmgpy.data.thermo
'groupAdditivity': 'group_additivity',
'thermoDataList': 'thermo_data_list',
'trainingSet': 'training_set',
'bindingEnergies': 'binding_energies',
# rmgpy.data.kinetics.database
'thermoDatabase': 'thermo_database',
'fixBarrierHeight': 'fix_barrier_height',
'forcePositiveBarrier': 'force_positive_barrier',
# rmgpy.data.kinetics.family
'forwardTemplate': 'forward_template',
'forwardRecipe': 'forward_recipe',
'reverseTemplate': 'reverse_template',
'reverseRecipe': 'reverse_recipe',
'boundaryAtoms': 'boundary_atoms',
'treeDistance': 'tree_distance',
'depositoryLabels': 'depository_labels',
'returnAllKinetics': 'return_all_kinetics',
# rmgpy.data.kinetics.library
'markDuplicates': 'mark_duplicates',
# rmgpy.data.kinetics.rules
'rootTemplate': 'root_template',
'alreadyDone': 'already_done',
'kList': 'k_list',
# rmgpy.kinetics.model
'otherKinetics': 'other_kinetics',
'ctReaction': 'ct_reaction',
'speciesList': 'species_list',
# rmgpy.kinetics.arrhenius
'threeParams': 'three_params',
# rmgpy.kinetics.diffusionLimited
'solvationDatabase': 'solvation_database',
# rmgpy.molecule.adjlist
'saturateH': 'saturate_h',
'removeH': 'remove_h',
'removeLonePairs': 'remove_lone_pairs',
'oldStyle': 'old_style',
# rmgpy.molecule.atomtype
'incrementBond': 'increment_bond',
'decrementBond': 'decrement_bond',
'formBond': 'form_bond',
'breakBond': 'break_bond',
'incrementRadical': 'increment_radical',
'decrementRadical': 'decrement_radical',
'incrementLonePair': 'increment_lone_pair',
'decrementLonePair': 'decrement_lone_pair',
# rmgpy.molecule.converter
'removeHs': 'remove_h',
'returnMapping': 'return_mapping',
# rmgpy.molecule.draw
'heavyFirst': 'heavy_first',
'drawLonePairs': 'draw_lone_pairs',
# rmgpy.molecule.element
'chemkinName': 'chemkin_name',
# rmgpy.molecule.graph
'saveOrder': 'save_order',
'initialMap': 'initial_map',
'startingVertex': 'starting_vertex',
# rmgpy.molecule.molecule
'lonePairs': 'lone_pairs',
'newOrder': 'new_order',
'otherOrder': 'other_order',
'isSingle': 'is_single',
'isDouble': 'is_double',
'isTriple': 'is_triple',
'isQuadruple': 'is_quadruple',
'isBenzene': 'is_benzene',
'isHydrogenBond': 'is_hydrogen_bond',
'radicalElectrons': 'radical_electrons',
'logSpecies': 'log_species',
'raiseException': 'raise_exception',
'generateInitialMap': 'generate_initial_map',
'atomicNums': 'atomic_nums',
'aromaticRings': 'aromatic_rings',
'startingAtoms': 'starting_atoms',
'distanceList': 'distance_list',
'ignoreList': 'ignore_list',
# rmgpy.molecule.group
'connectingAtom': 'connecting_atom',
'bondOrders': 'bond_orders',
'keepIdenticalLabels': 'keep_identical_labels',
# rmgpy.pdep.collision
'densStates': 'dens_states',
'Elist': 'e_list',
'Jlist': 'j_list',
'Ereac': 'e_reac',
# rmgpy.pdep.configuration
'bathGas': 'bath_gas',
# rmgpy.pdep.cse
'lumpingOrder': 'lumping_order',
# rmgpy.pdep.msc
'efficiencyModel': 'efficiency_model',
# rmgpy.pdep.network
'pathReactions': 'path_reactions',
'netReactions': 'net_reactions',
'Ngrains': 'n_grains',
'NJ': 'n_j',
'grainSize': 'grain_size',
'grainCount': 'grain_count',
'maximumGrainSize': 'maximum_grain_size',
'minimumGrainCount': 'minimum_grain_count',
'errorCheck': 'error_check',
# rmgpy.pdep.reaction
'reacDensStates': 'reac_dens_states',
'prodDensStates': 'prod_dens_states',
# rmgpy.qm.symmetry
'pointGroup': 'point_group',
'symmetryNumber': 'symmetry_number',
'uniqueID': 'unique_id',
'qmData': 'qm_data',
# rmgpy.rmg.main
'inputFile': 'input_file',
'outputDirectory': 'output_directory',
'firstTime': 'first_time',
'rxnSysUnimolecularThreshold': 'rxn_sys_unimol_threshold',
'rxnSysBimolecularThreshold': 'rxn_sys_bimol_threshold',
'rxnSysTrimolecularThreshold': 'rxn_sys_trimol_threshold',
'skipUpdate': 'skip_update',
'modulePath': 'module_path',
'reactionSystem': 'reaction_system',
# rmgpy.rmg.model
'checkForExisting': 'check_existing',
'checkExisting': 'check_existing',
'generateThermo': 'generate_thermo',
'newObject': 'new_object',
'reactEdge': 'react_edge',
'unimolecularReact': 'unimolecular_react',
'bimolecularReact': 'bimolecular_react',
'trimolecularReact': 'trimolecular_react',
'newSurfaceSpecies': 'new_surface_species',
'newReactions': 'new_reactions',
'newSpecies': 'new_species',
'pdepNetwork': 'pdep_network',
'newCoreSpecies': 'new_core_species',
'newCoreReactions': 'new_core_reactions',
'newEdgeSpecies': 'new_edge_species',
'newEdgeReactions': 'new_edge_reactions',
'reactionsMovedFromEdge': 'reactions_moved_from_edge',
'toleranceThermoKeepSpeciesInEdge': 'thermo_tol_keep_spc_in_edge',
'minCoreSizeForPrune': 'min_core_size_for_prune',
'maximumEdgeSpecies': 'maximum_edge_species',
'reactionSystems': 'reaction_systems',
'minSpeciesExistIterationsForPrune': 'min_species_exist_iterations_for_prune',
'seedMechanism': 'seed_mechanism',
'reactionLibrary': 'reaction_library',
# rmgpy.rmg.output
'partCoreEdge': 'part_core_edge',
# rmgpy.rmg.pdep
'pdepSettings': 'pdep_settings',
# rmgpy.rmg.settings
'toleranceKeepInEdge': 'tol_keep_in_edge',
'toleranceMoveToCore': 'tol_move_to_core',
'toleranceInterruptSimulation': 'tol_interrupt_simulation',
'toleranceMoveEdgeReactionToCore': 'tol_move_edge_rxn_to_core',
'filterThreshold': 'filter_threshold',
'ignoreOverallFluxCriterion': 'ignore_overall_flux_criterion',
'toleranceMoveEdgeReactionToSurface': 'tol_move_edge_rxn_to_surface',
'toleranceMoveSurfaceSpeciesToCore': 'tol_move_surface_spc_to_core',
'toleranceMoveSurfaceReactionToCore': 'tol_move_surface_rxn_to_core',
'terminateAtMaxObjects': 'terminate_at_max_objects',
'dynamicsTimeScale': 'dynamics_time_scale',
'toleranceBranchReactionToCore': 'tol_branch_rxn_to_core',
'branchingIndex': 'branching_index',
'branchingRatioMax': 'branching_ratio_max',
'toleranceMoveEdgeReactionToSurfaceInterrupt': 'tol_move_edge_rxn_to_surface_interrupt',
'toleranceMoveEdgeReactionToCoreInterrupt': 'tol_move_edge_rxn_to_core_interrupt',
'maxNumSpecies': 'max_num_species',
'maxNumObjsPerIter': 'max_num_objects_per_iter',
# rmgpy.solver.base
'sensitiveSpecies': 'sensitive_species',
'sensitivityThreshold': 'sensitivity_threshold',
'coreSpecies': 'core_species',
'coreReactions': 'core_reactions',
'edgeSpecies': 'edge_species',
'edgeReactions': 'edge_reactions',
'surfaceSpecies': 'surface_species',
'surfaceReactions': 'surface_reactions',
'pdepNetworks': 'pdep_networks',
'filterReactions': 'filter_reactions',
'newSurfaceReactions': 'new_surface_reactions',
'newSurfaceReactionInds': 'new_surface_reaction_inds',
'sensWorksheet': 'sens_worksheet',
'modelSettings': 'model_settings',
'simulatorSettings': 'simulator_settings',
'charRate': 'char_rate',
'speciesRate': 'species_rate',
'maxDifLnAccumNum': 'max_dif_ln_accum_num',
'networkRate': 'network_rate',
'speciesIndex': 'species_index',
# rmgpy.solver.simple
'initialMoleFractions': 'initial_mole_fractions',
'nSims': 'n_sims',
'sensConditions': 'sens_conditions',
# rmgpy.solver.liquid
'initialConcentrations': 'initial_concentrations',
'constSPCNames': 'const_spc_names',
# rmgpy.solver.surface
'initialP': 'P_initial',
'initialGasMoleFractions': 'initial_gas_mole_fractions',
'initialSurfaceCoverages': 'initial_surface_coverages',
'surfaceVolumeRatio': 'surface_volume_ratio',
# rmgpy.statmech.conformer
'spinMultiplicity': 'spin_multiplicity',
'opticalIsomers': 'optical_isomers',
# rmgpy.statmech.torsion
'Nbasis': 'n_basis',
'sumStates0': 'sum_states_0',
'densStates0': 'dens_states_0',
# rmgpy.species
'transportData': 'transport_data',
'molecularWeight': 'molecular_weight',
'energyTransferModel': 'energy_transfer_model',
'creationIteration': 'creation_iteration',
'explicitlyAllowed': 'explicitly_allowed',
'useChemkinIdentifier': 'use_chemkin_identifier',
'solventName': 'solvent_name',
# rmgpy.reaction
'eitherDirection': 'either_direction',
'checkIdentical': 'check_identical',
'checkOnlyLabel': 'check_only_label',
'checkTemplateRxnProducts': 'check_template_rxn_products',
'surfaceSiteDensity': 'surface_site_density',
'forcePositive': 'force_positive',
'kForward': 'k_forward',
'reverseUnits': 'reverse_units',
# rmgpy.chemkin
'speciesDict': 'species_dict',
'reactionList': 'reaction_list',
'javaLibrary': 'java_library',
'reactionModel': 'reaction_model',
'dictionaryPath': 'dictionary_path',
'transportPath': 'transport_path',
'thermoPath': 'thermo_path',
'saveEdgeSpecies': 'save_edge_species',
'checkForDuplicates': 'check_for_duplicates',
'elementCounts': 'element_counts',
'readComments': 'read_comments',
'useChemkinNames': 'use_chemkin_names',
'checkDuplicates': 'check_duplicates',
}
ARGUMENTS2 = {
# rmgpy.data.base
'numParameters': 'num_parameters',
'nodeOther': 'node_other',
'parentNode': 'parent_node',
'childNode': 'child_node',
'groupToRemove': 'group_to_remove',
# rmgpy.data.solvation
'solventViscosity': 'solvent_viscosity',
'saturatedStruct': 'saturated_struct',
'addedToRadicals': 'added_to_radicals',
'addedToPairs': 'added_to_pairs',
'soluteData': 'solute_data',
'solventData': 'solvent_data',
'solventStructure': 'solvent_structure',
# rmgpy.data.thermo
'thermoData1': 'thermo_data1',
'thermoData2': 'thermo_data2',
'stableThermoEstimator': 'stable_thermo_estimator',
'thermoData': 'thermo_data',
# rmgpy.data.transport
'groupData': 'group_data',
# rmgpy.data.kinetics.family
'doForward': 'forward',
'reactantStructures': 'reactant_structures',
'templateReactant': 'template_reactant',
'kineticsList': 'kinetics_list',
'templateLabels': 'template_labels',
'templateRxnMap': 'template_rxn_map',
'minSplitableEntryNum': 'min_splitable_entry_num',
'minRxnsToSpawn': 'min_rxns_to_spawn',
'maxBatchSize': 'max_batch_size',
'outlierFraction': 'outlier_fraction',
'stratumNum': 'stratum_num',
'maxRxnsToReoptNode': 'max_rxns_to_reopt_node',
'fixLabels': 'fix_labels',
'exactMatchesOnly': 'exact_matches_only',
'getReverse': 'get_reverse',
'testRxnInds': 'test_rxn_inds',
'keepRoot': 'keep_root',
'removeDegeneracy': 'remove_degeneracy',
'estimateThermo': 'estimate_thermo',
'templateLabel': 'template_label',
'childConn': 'child_conn',
# rmgpy.data.kinetics.groups
'referenceKinetics': 'reference_kinetics',
# rmgpy.molecule.vf2
'initialMapping': 'initial_mapping',
'findAll': 'find_all',
'callDepth': 'call_depth',
# rmgpy.molecule.molecule
'InChI': 'inchi',
'SMILES': 'smiles',
# rmgpy.molecule.group
'atomList': 'atom_list',
'R': 'r',
'atmInd': 'atm_ind',
'atmInd2': 'atm_ind2',
'Nsplits': 'n_splits',
'Run': 'r_un',
'Rbonds': 'r_bonds',
# rmgpy.statmech.ndTorsions
'calcPath': 'calc_path',
'isLinear': 'is_linear',
'isTS': 'is_ts',
# rmgpy.thermo.thermoengine
'thermoClass': 'thermo_class',
# rmgpy.thermo.wilhoit
'contCons': 'cont_cons',
# rmgpy.quantity
'uncertaintyType': 'uncertainty_type',
'commonUnits': 'common_units',
'extraDimensionality': 'extra_dimensionality',
# rmgpy.tools.canteraModel
'reactorType': 'reactor_type',
'reactionTime': 'reaction_time',
'molFrac': 'mol_frac',
'reactorTypeList': 'reactor_type_list',
'reactionTimeList': 'reaction_time_list',
'molFracList': 'mol_frac_list',
'reactionMap': 'reaction_map',
'chemkinFile': 'chemkin_file',
'transportFile': 'transport_file',
'rmgReactionIndex': 'rmg_reaction_index',
'rmgReaction': 'rmg_reaction',
'rmgSpeciesIndex': 'rmg_species_index',
'rmgSpecies': 'rmg_species',
'topSpecies': 'top_species',
'topSensitiveReactions': 'top_sensitive_reactions',
'userList': 'user_list',
'RMGList': 'rmg_list',
'ctSpec1': 'ct_spec1',
'ctSpec2': 'ct_spec2',
'ctRxn1': 'ct_rxn1',
'ctRxn2': 'ct_rxn2',
'checkID': 'check_id',
# rmgpy.tools.fluxdiagram
'reactionRates': 'reaction_rates',
'centralSpeciesList': 'central_species_list',
'speciesDirectory': 'species_directory',
'outputFile': 'output_file',
'savePath': 'save_path',
'speciesPath': 'species_path',
'chemkinOutput': 'chemkin_output',
'saveStates': 'save_states',
'readStates': 'read_states',
# rmgpy.tools.isotopes
'useOriginalReactions': 'use_original_reactions',
'kineticIsotopeEffect': 'kinetic_isotope_effect',
'maximumIsotopicAtoms': 'maximum_isotopic_atoms',
# rmgpy.tools.loader
'generateImages': 'generate_images',
'useJava': 'use_java',
# rmgpy.tools.observablesRegression
'oldDir': 'old_dir',
'newDir': 'new_dir',
'exptData': 'expt_data',
# rmgpy.tools.plot
'reactionSystemIndex': 'reaction_system_index',
'sensitiveSpeciesList': 'sensitive_species_list',
'xArray': 'x_array',
'yArray': 'y_array',
'xValue': 'x_value',
'xVar': 'x_var',
'yVar': 'y_var',
'csvFile': 'csv_file',
'numSpecies': 'num_species',
'numReactions': 'num_reactions',
'totalVariance': 'total_variance',
# rmgpy.tools.simulate
'diffusionLimited': 'diffusion_limited',
'dictFile': 'dict_file',
# rmgpy.tools.uncertainty
'corrSourceType': 'corr_source_type',
'corrParam': 'corr_param',
'corrGroupType': 'corr_group_type',
'corrFamily': 'corr_family',
'gParamEngine': 'g_param_engine',
'kParamEngine': 'k_param_engine',
'chemkinPath': 'chemkin_path',
'terminationTime': 'termination_time',
}
# Names which are risky to replace using regex
DANGEROUS = ['SMILES', 'InChI', 'R', 'Run', 'atomTypes', 'format', 'F']
def main(path, write=False, nobackups=False):
"""
Function to analyze and update files with potential name replacements.
Args:
path: path to the file to be analyzed
write: save the fixed file to disk
nobackups: disable saving a backup of the original file
"""
if write and not nobackups:
dirname, filename = os.path.split(path)
backup_path = os.path.join(dirname, filename + '.backup')
shutil.copyfile(path, backup_path)
with open(path, 'r') as f:
original = f.read()
fixed = original
for pattern, newname in tqdm(replacements):
result = find_name_in_line(pattern, fixed, replacement=newname)
if result:
fixed = result
diff = difflib.unified_diff(
original.splitlines(keepends=True),
fixed.splitlines(keepends=True),
fromfile=path,
tofile=path,
)
if diff:
sys.stdout.writelines(diff)
if write:
with open(path, 'w') as f:
f.writelines(fixed)
else:
print('No changes detected')
def compile_regex(replacements, args=False, attr=False, words=True, avoid_danger=True):
"""
Compile regex expressions for the given replacements. By default, checks for
the name to be replaced as a word, i.e. surrounded by non-word characters.
Args:
replacements: dictionary of oldname, newname pairs
args: require equal sign following name and no period before name
attr: require period preceding name
words: require name to be a standalone word
avoid_danger: do not replace names in the DANGEROUS list
"""
patterns = []
for oldname, newname in replacements.items():
if avoid_danger and oldname in DANGEROUS:
continue
pattern = r''
if words:
if args:
# Require no period or word characters preceding the name
pattern += r'(?<![\w\.])'
elif attr:
# Require period preceding the name
pattern += r'(?<=\.)'
else:
# Require no word characters preceding the name
pattern += r'(?<!\w)'
pattern += oldname
if words:
if args:
# Require equal sign following the name
pattern += r'(?=\s*=\s*)'
else:
# Require no word characters following the name
pattern += r'(?!\w)'
patterns.append((re.compile(pattern), newname))
return patterns
def find_name_in_line(pattern, line, replacement=None):
"""
Use regex to replace the name in the line.
Args:
pattern: regex pattern to search for
line: string in which to search for pattern
replacement: if provided, text to replace the pattern with
"""
if re.search(pattern, line):
if replacement is not None:
return re.sub(pattern, replacement, line)
else:
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str, nargs='+')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-0', '--both-stages', action='store_true', help='check all possible replacements')
group.add_argument('-1', '--stage1', action='store_true', help='check common replacements')
group.add_argument('-2', '--stage2', action='store_true', help='check uncommon replacements')
parser.add_argument('-w', '--write', action='store_true', help='perform replacements and write to file')
parser.add_argument('-n', '--nobackups', action='store_true', help='do not save a backup file')
parser.add_argument('-x', '--ignore-danger', action='store_true', help='do not avoid dangerous replacements')
parser.add_argument('-y', '--all-args', action='store_true', help='disable `=` requirement for arg replacements')
parser.add_argument('-z', '--all-attr', action='store_true', help='disable `.` requirement for attr replacements')
parser.add_argument('-a', '--all', action='store_true', help='look for all appearances of names, implies `-xyz`')
arguments = parser.parse_args()
# Compile regex expressions
args = not (arguments.all or arguments.all_args)
attr = not (arguments.all or arguments.all_attr)
avoid_danger = not (arguments.all or arguments.ignore_danger)
words = not arguments.all
replacements = []
if arguments.both_stages or arguments.stage1:
replacements.extend(compile_regex(MODULES, args=False, attr=False, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(GLOBALS1, args=False, attr=False, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(METHODS1, args=False, attr=attr, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(ATTRIBUTES1, args=False, attr=attr, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(ARGUMENTS1, args=args, attr=False, words=words, avoid_danger=avoid_danger))
if arguments.both_stages or arguments.stage2:
replacements.extend(compile_regex(GLOBALS2, args=False, attr=False, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(METHODS2, args=False, attr=attr, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(ATTRIBUTES2, args=False, attr=attr, words=words, avoid_danger=avoid_danger))
replacements.extend(compile_regex(ARGUMENTS2, args=args, attr=False, words=words, avoid_danger=avoid_danger))
for fname in arguments.filename:
filepath = os.path.abspath(fname)
print('Processing {0}'.format(filepath))
main(
filepath,
write=arguments.write,
nobackups=arguments.nobackups,
)
|
libs/SettingDialog_EN.py | lzx1413/labelImgPlus | 218 | 12681401 | from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5 import QtWidgets
import socket
import re
class SettingDialog(QtWidgets.QDialog):
enable_color_map = False
label_font_size = 10
task_mode = 0 #0=det, 1=seg, 2=cls
signal_conn = pyqtSignal(list)
def __init__(self, parent,config):
QtWidgets.QDialog.__init__(self, parent)
self.resize(320, 240)
self.__class__.task_mode = config['task_mode']
self.__class__.label_font_size = config['label_font_size']
self.init_UI()
def createModeGroup(self):
'''
set the trask mode setting group
:return: mode group
'''
self.modegroupBox = QtWidgets.QGroupBox("& Task Mode")
self.modegroupBox.setCheckable(True)
self.modegroupBox.setChecked(True)
self.CLS_mode_rb = QtWidgets.QRadioButton("CLS Mode")
self.CLS_mode_rb.clicked.connect(self.CLS_model_selected)
self.DET_mode_rb = QtWidgets.QRadioButton("DET Mode")
self.DET_mode_rb.clicked.connect(self.DET_model_selected)
self.SEG_mode_rb = QtWidgets.QRadioButton("SEG Mode")
self.SEG_mode_rb.clicked.connect(self.SEG_model_selected)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.CLS_mode_rb)
vbox.addWidget(self.DET_mode_rb)
vbox.addWidget(self.SEG_mode_rb)
vbox.addStretch(True)
self.modegroupBox.setLayout(vbox)
return self.modegroupBox
def createDEToptGroup(self):
self.detgroupBox = QtWidgets.QGroupBox("& DET options")
self.enable_show_label_cb = QtWidgets.QCheckBox('enable show label name')
self.label_font_size_sl = QtWidgets.QSlider(Qt.Horizontal)
self.label_font_size_sl.setRange(5,50)
self.label_font_size_sp = QtWidgets.QSpinBox()
self.label_font_size_sp.setRange(5,50)
self.signal_conn.connect(self.font_conn)
self.label_font_size_sl.valueChanged.connect(self.change_label_font_size)
self.label_font_size_sl.setValue(self.__class__.label_font_size)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.enable_show_label_cb)
vbox.addWidget(QtWidgets.QLabel('label font size'))
vbox.addWidget(self.label_font_size_sl)
vbox.addWidget(self.label_font_size_sp)
vbox.addStretch()
self.detgroupBox.setLayout(vbox)
return self.detgroupBox
def font_conn(self):
self.label_font_size_sl = QtWidgets.QSlider(Qt.Horizontal)
self.label_font_size_sl.setRange(5,50)
self.label_font_size_sp = QtWidgets.QSpinBox()
self.label_font_size_sp.setRange(5,50)
def createCLSoptGroup(self):
self.clsgroupBox = QtWidgets.QGroupBox("& CLS options")
#self.single_label_rb = QtGui.QRadioButton("single label")
#self.multi_label_rb = QtGui.QRadioButton("multi label")
vbox = QtWidgets.QVBoxLayout()
#vbox.addWidget(self.single_label_rb)
#vbox.addWidget(self.multi_label_rb)
vbox.addStretch(True)
self.clsgroupBox.setLayout(vbox)
return self.clsgroupBox
def createSEGoptGroup(self):
self.seggroupBox = QtWidgets.QGroupBox("& SEG options")
self.enable_color_map_cb = QtWidgets.QCheckBox('enable color map')
if self.__class__.enable_color_map:
self.enable_color_map_cb.toggle()
self.enable_color_map_cb.stateChanged.connect(
self.change_color_enable_state)
if self.__class__.enable_color_map:
self.enable_color_map_cb.setChecked(True)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.enable_color_map_cb)
vbox.addStretch(True)
self.seggroupBox.setLayout(vbox)
return self.seggroupBox
def init_UI(self):
main_v_layout = QtWidgets.QVBoxLayout()
grid = QtWidgets.QGridLayout()
grid.addWidget(self.createModeGroup(),0,0)
grid.addWidget(self.createDEToptGroup(),1,0)
grid.addWidget(self.createCLSoptGroup(),2,0)
grid.addWidget(self.createSEGoptGroup(),3,0)
if self.__class__.task_mode == 0:
self.DET_mode_rb.setChecked(True)
self.DET_model_selected()
elif self.__class__.task_mode == 1:
self.SEG_mode_rb.setChecked(True)
self.SEG_model_selected()
elif self.__class__.task_mode == 2:
self.CLS_mode_rb.setChecked(True)
self.CLS_model_selected()
buttonBox = QtWidgets.QDialogButtonBox(parent=self)
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
main_v_layout.addLayout(grid)
spacerItem = QtWidgets.QSpacerItem(
20, 48, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
main_v_layout.addItem(spacerItem)
main_v_layout.addWidget(buttonBox)
self.setLayout(main_v_layout)
def CLS_model_selected(self):
self.__class__.task_mode = 2
self.clsgroupBox.setDisabled(False)
self.detgroupBox.setDisabled(True)
self.seggroupBox.setDisabled(True)
def DET_model_selected(self):
self.__class__.task_mode = 0
self.detgroupBox.setDisabled(False)
self.clsgroupBox.setDisabled(True)
self.seggroupBox.setDisabled(True)
def SEG_model_selected(self):
self.__class__.task_mode = 1
self.seggroupBox.setDisabled(False)
self.detgroupBox.setDisabled(True)
self.clsgroupBox.setDisabled(True)
def change_color_enable_state(self, state):
if state == QtWidgets.Qt.Checked:
self.__class__.enable_color_map = True
else:
self.__class__.enable_color_map = False
def change_label_font_size(self,value):
self.__class__.label_font_size = value
def get_color_map_state(self):
return self.__class__.enable_color_map
def get_setting_state(self):
if self.__class__.task_mode == 0:
return {'mode': 0,'enable_color_map':self.__class__.enable_color_map,'label_font_size': self.__class__.label_font_size}
elif self.__class__.task_mode == 1:
return {'mode': 1,'enable_color_map':self.__class__.enable_color_map}
elif self.__class__.task_mode == 2:
return {'mode': 2}
|
dreamer/tools/chunk_sequence.py | jsikyoon/dreamer-1 | 546 | 12681407 | # Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from dreamer.tools import nested
def chunk_sequence(sequence, chunk_length, randomize=True, num_chunks=None):
if 'length' in sequence:
length = sequence.pop('length')
else:
length = tf.shape(nested.flatten(sequence)[0])[0]
if randomize:
if not num_chunks:
num_chunks = tf.maximum(1, length // chunk_length - 1)
else:
num_chunks = num_chunks + 0 * length
used_length = num_chunks * chunk_length
max_offset = length - used_length
offset = tf.random_uniform((), 0, max_offset + 1, dtype=tf.int32)
else:
if num_chunks is None:
num_chunks = length // chunk_length
else:
num_chunks = num_chunks + 0 * length
used_length = num_chunks * chunk_length
offset = 0
clipped = nested.map(
lambda tensor: tensor[offset: offset + used_length],
sequence)
chunks = nested.map(
lambda tensor: tf.reshape(
tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),
clipped)
return chunks
|
py-bindings/ompl/morse/addons/ompl_addon.py | ericpairet/ompl | 837 | 12681411 | <filename>py-bindings/ompl/morse/addons/ompl_addon.py<gh_stars>100-1000
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: <NAME>
## \brief Information about this addon
bl_info = {
"name":"OMPL Interface",
"category":"Game Engine",
"description":"Planning with OMPL (requires MORSE)",
"location":"Game > OMPL",
"author":"<NAME>"
}
import configparser
import os
import socket
import subprocess
import sys
import time
import bpy
import ompl.morse.environment
OMPL_DIR = ompl.morse.__path__[0]
inf = float('inf')
# #
# Addon operators (actions in the menu the user can execute)
# #
##
# \brief Invoke OMPL Planning
class Plan(bpy.types.Operator):
bl_idname = "ompl.plan"
bl_label = "Plan..."
## \brief File where planner should save solution path
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
##
# \brief Called when the dialogs finish; starts up the simulation
def execute(self, _):
print('Starting planner...')
print("Planning on %s, saving to %s" % (bpy.data.filepath, self.filepath))
subprocess.Popen(['morse', '-c', 'run', 'ompl', OMPL_DIR+'/builder.py', '--', \
bpy.data.filepath, self.filepath, 'PLAN'])
return {'FINISHED'}
##
# \brief Called when the button is pressed; double-check configuration and
# ask for a file to save the path to
def invoke(self, context, _):
# Double-check goal object properties to make sure they're out of the way and
# connected properly
for obj in bpy.data.objects:
if [True for goalStr in ['.goalPose', '.goalRegion', '.goalRot'] \
if obj.name.endswith(goalStr)]:
obj.hide_render = True
if obj.name.endswith('.goalRegion'):
obj.game.physics_type = 'SENSOR'
body = bpy.data.objects.get(obj.name[:-11])
if not body:
continue
collider = obj.game.sensors.get("__collision")
if not collider:
# Link up a collision sensor
bpy.ops.logic.sensor_add(type='COLLISION', name="__collision", object=obj.name)
collider = obj.game.sensors.get("__collision")
collider.property = body.name.replace('.', '_')
# Just to make the sensor active
dummy = obj.game.controllers.get("__dummy")
if not dummy:
bpy.ops.logic.controller_add(type='EXPRESSION', name="__dummy", object=obj.name)
dummy = obj.game.controllers["__dummy"]
dummy.expression = 'TRUE'
collider.link(dummy)
else:
obj.game.physics_type = 'NO_COLLISION'
if not context.scene.objects.get('ompl_settings'):
# Bounds Configuration hasn't been setup for this file yet
bpy.ops.ompl.boundsconfig('INVOKE_DEFAULT')
else:
settings = context.scene.objects['ompl_settings']
if settings.get('autopb') is None:
# Bounds Configuration hasn't been setup for this file yet
bpy.ops.ompl.boundsconfig('INVOKE_DEFAULT')
# Save any changes so MORSE sees them when it loads the file
bpy.ops.wm.save_mainfile()
# File selector for the path output file
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
@bpy.app.handlers.persistent
def import_and_resave(animpath):
bpy.app.handlers.load_post.clear()
animtmppath = animpath + ".tmp"
print("OMPL: appending animation data")
with bpy.data.libraries.load(filepath=animtmppath) as (_, t):
t.scenes = ['S.MORSE_LOGIC']
print("OMPL: deleting tmp file")
os.remove(animtmppath)
bpy.data.scenes.remove(bpy.data.scenes['Scene'])
bpy.data.scenes['S.MORSE_LOGIC'].name = 'Scene'
bpy.context.screen.scene = bpy.data.scenes['Scene']
bpy.ops.wm.save_mainfile(filepath=animpath)
##
# \brief Invoke Path Playback
class Play(bpy.types.Operator):
bl_idname = "ompl.play"
bl_label = "Playback and save"
## \brief File where the planner wrote the solution path
filepath = bpy.props.StringProperty(name="Solution file", \
description="File where where the OMPL planner saved a solution path", subtype="FILE_PATH")
##
# \brief Called when the dialogs finish; starts up the simulation
def execute(self, context):
animpath = context.scene.objects['ompl_settings']['Animpath']
if animpath == '':
self.report({'ERROR'}, "Choose animation save file first!")
return {'FINISHED'}
self.report({'WARNING'}, "Switching to .blend file: '" + animpath + "'")
print('Starting player...')
print("Playing %s with %s" % (bpy.data.filepath, self.filepath))
subprocess.run(['morse', '-c', 'run', 'ompl', OMPL_DIR+'/builder.py', '--', bpy.data.filepath, self.filepath, 'PLAY'])
# Load blank file. Append animated objects. Re-save.
print("OMPL: Will save animation data to '" + animpath + "'")
cont = bpy.app.handlers.persistent(lambda _: import_and_resave(animpath))
bpy.app.handlers.load_post.append(cont)
blankpath = OMPL_DIR + '/resources/blank.blend'
print("OMPL: Loading blank file")
bpy.ops.wm.open_mainfile(filepath=blankpath)
return {'FINISHED'}
##
# \brief Called when the button is pressed; prompts for the path file
def invoke(self, context, _):
if not context.scene.objects.get('ompl_settings'):
# Select an animation save file
bpy.ops.ompl.animfile('INVOKE_DEFAULT')
elif not context.scene.objects['ompl_settings'].get('Animpath'):
# Select an animation save file
bpy.ops.ompl.animfile('INVOKE_DEFAULT')
# Save any changes so MORSE sees them when it loads the file
bpy.ops.wm.save_mainfile()
# File selector for the path file
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
##
# \brief Compile a list of usable MORSE robots
def getRobots():
import morse.builder
# This is a list of incompatible robots (e.g., some use controllers that require you to explicitly
# name the internal variable you want to change instead of merely accepting a list of control values).
# If you write your own controller that is compatible, feel free to take the robot out of this blacklist
excluded_robots = ['B21', 'BarePR2', 'BasePR2', 'Human', 'Hummer', 'Jido', \
'LocalizedPR2', 'NavPR2', 'Victim']
robotEnum = []
i = 0
for cname in dir(morse.builder.robots):
c = getattr(morse.builder.robots, cname)
# Is c a class?
if isinstance(c, type):
# Does it inherit from Robot and is it neither Robot nor WheeledRobot?
if issubclass(c, morse.builder.Robot) and c != morse.builder.Robot and c != morse.builder.WheeledRobot:
# Is is not in our exlusions list?
if cname not in excluded_robots:
# Add an entry for it
robotEnum.append((cname, cname, 'morse.builder.robots.' + cname, i))
i += 1
# Put then in alphabetical order
robotEnum.reverse()
return robotEnum
##
# \brief Compile list of controllers
def getControllers():
import morse.builder
# Exclude controllers that require non-numeric parameters, don't have a socket interface, or are irrelevant;
# you may be able to rewrite some of these (e.g., SteerForce) with little modification so that they do
# accept purely numeric inputs
excluded_controllers = ['Armature', 'Destination', 'ForceTorque', 'Gripper', 'Joystick', \
'Keyboard', 'KukaLWR', 'Light', 'Mocap', 'MocapControl', 'MotionXYW', 'Orientation', \
'PTU', 'RotorcraftAttitude', 'Sound', 'SteerForce']
controllerEnum = []
i = 0
for cname in dir(morse.builder.actuators):
c = getattr(morse.builder.actuators, cname)
# Is c a class?
if isinstance(c, type):
# Does it inherit from ActuatorCreator and is it not ActuatorCreator?
if issubclass(c, morse.builder.creator.ActuatorCreator) and \
c != morse.builder.creator.ActuatorCreator:
# Is it not in our exclusions list?
if cname not in excluded_controllers:
# Add an entry for it
controllerEnum.append((cname, cname, 'morse.builder.actuators.' + cname, i))
i += 1
controllerEnum.reverse()
return controllerEnum
##
# \brief Add a MORSE Robot to the scene
class AddRobot(bpy.types.Operator):
bl_idname = "ompl.addrobot"
bl_label = "Add Robot..."
# Set up the robot and controller selection menus
robotEnum = [('', '', '')]
controllerEnum = [('', '', '')]
robot_type = bpy.props.EnumProperty(items=robotEnum, name="MORSE robot", \
description="A robot from the MORSE components library", default=robotEnum[-1][0])
controller_type = bpy.props.EnumProperty(items=controllerEnum, name="MORSE actuator", \
description="The actuator to control the robot", default=controllerEnum[-1][0])
##
# \brief Operator refuses to run if this returns false; requires
# Blender to be in Object Mode
@classmethod
def poll(cls, context):
return context.mode == 'OBJECT'
##
# \brief Add the model to the scene and set up some properties
def execute(self, context):
import morse.builder
# Add model for robot_type
robot = getattr(morse.builder, self.robot_type)()
robotObj = context.object
# Make visible in a render
robotObj.hide_render = False
# Remove unnecessary game properties
while robotObj.game.properties:
bpy.ops.object.game_property_remove()
# Add properties for robot_type and controller_type
robotObj['RobotType'] = self.robot_type
robotObj['ControllerType'] = self.controller_type
return {'FINISHED'}
##
# \brief Prompt for robot and controller selection
def invoke(self, context, _):
return context.window_manager.invoke_props_dialog(self)
##
# \brief Recursively add children to the selection
def _recurseSelectChildren(obj):
for child in obj.children:
_recurseSelectChildren(child)
bpy.ops.object.select_pattern(pattern=obj.name, case_sensitive=True)
##
# \brief Add a goal to the Scene
class AddGoal(bpy.types.Operator):
bl_idname = "ompl.addgoal"
bl_label = "Add Goal..."
# Parameters are the type of goal and the name of the object we define the goal for
body = bpy.props.StringProperty(name="Rigid Body", \
description="The body to define a goal for", default="")
goal_type = bpy.props.EnumProperty(items=[('goalRot', 'Rotation only', 'Rotation'), \
('goalPose', 'Pose', 'Position and Rotation')], name="Goal Type", \
description="The kind of goal specification", default='goalPose')
##
# \brief Operator refuses to run if this returns false; requires
# Blender to be in Object mode
@classmethod
def poll(cls, context):
return context.mode == 'OBJECT'
##
# \brief Create the goal object and set up its properties
def execute(self, context):
# Check that the object exists
if not bpy.data.objects.get(self.body):
self.report({'ERROR'}, "No such object: '%s'" % self.body)
return {'FINISHED'}
goalname = self.body + '.' + self.goal_type
bpy.ops.object.select_all(action='DESELECT')
# Duplicate object
bpy.ops.object.select_pattern(pattern=self.body, case_sensitive=True)
_recurseSelectChildren(bpy.data.objects.get(self.body))
bpy.ops.object.duplicate()
goalobj = context.selected_objects[0]
# Remove old custom properties
for prop in goalobj.keys():
del goalobj[prop]
if self.goal_type == 'goalPose':
# Add default locTol
goalobj['locTol'] = 0.5
# Add default rotTol
goalobj['rotTol'] = 0.2
# Rename goal object
goalobj.name = goalname
# Move object to cursor
goalobj.location = context.scene.cursor_location
return {'FINISHED'}
##
# \brief Prompt for the object name and goal type
def invoke(self, context, _):
return context.window_manager.invoke_props_dialog(self)
##
# \brief Choose animation save file
class AnimFile(bpy.types.Operator):
bl_idname = "ompl.animfile"
bl_label = "Choose animation save file..."
## \brief Second *.blend to save the animation data
filepath = bpy.props.StringProperty(name="Animation Save file", \
description="*.blend file where the animation curves should be saved to", subtype="FILE_PATH")
##
# \brief Save the name of the file for later
def execute(self, context):
context.scene.objects['ompl_settings']['Animpath'] = self.filepath
return {'FINISHED'}
##
# \brief Prompt for the animation save file
def invoke(self, context, _):
# Add the settings object if it doesn't exist
if not context.scene.objects.get('ompl_settings'):
bpy.ops.object.add()
context.object.name = 'ompl_settings'
settings = context.scene.objects['ompl_settings']
# Get the settings object out of the way
settings.hide = True
settings.hide_render = True
settings.hide_select = True
if not settings.get('Animpath'):
settings['Animpath'] = self.filepath
# Prompt for the name of the file to save to
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
##
# \brief Configure the state and control bounds
class BoundsConfiguration(bpy.types.Operator):
bl_idname = "ompl.boundsconfig"
bl_label = "Bounds Configuration..."
# Properties displayed in the dialog; p=position,l=linear,a=angular,c=control;
# x,y,z,m=min, X,Y,Z,M=max; handles up to 16 control inputs
autopb = bpy.props.BoolProperty(name="Automatic position bounds", \
description="Overrides user-provided numbers by analyzing the scene", \
default=True)
pbx = bpy.props.FloatProperty(name="Min", default=-1000, min=-1000, max=1000)
pbX = bpy.props.FloatProperty(name="Max", default=1000, min=-1000, max=1000)
pby = bpy.props.FloatProperty(name="Min", default=-1000, min=-1000, max=1000)
pbY = bpy.props.FloatProperty(name="Max", default=1000, min=-1000, max=1000)
pbz = bpy.props.FloatProperty(name="Min", default=-1000, min=-1000, max=1000)
pbZ = bpy.props.FloatProperty(name="Max", default=1000, min=-1000, max=1000)
lbm = bpy.props.FloatProperty(name="Min", default=-1000, min=-1000, max=1000)
lbM = bpy.props.FloatProperty(name="Max", default=1000, min=-1000, max=1000)
abm = bpy.props.FloatProperty(name="Min", default=-1000, min=-1000, max=1000)
abM = bpy.props.FloatProperty(name="Max", default=1000, min=-1000, max=1000)
cbm0 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM0 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm1 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM1 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm2 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM2 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm3 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM3 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm4 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM4 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm5 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM5 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm6 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM6 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm7 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM7 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm8 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM8 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm9 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM9 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm10 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM10 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm11 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM11 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm12 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM12 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm13 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM13 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm14 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM14 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
cbm15 = bpy.props.FloatProperty(name="Min", default=-10, min=-1000, max=1000)
cbM15 = bpy.props.FloatProperty(name="Max", default=10, min=-1000, max=1000)
##
# \brief Save all the settings and reset dialogs to new defaults
def execute(self, context):
# Save settings to the scene
settings = context.scene.objects['ompl_settings']
settings['autopb'] = self.autopb
settings['pbx'] = self.pbx
settings['pbX'] = self.pbX
settings['pby'] = self.pby
settings['pbY'] = self.pbY
settings['pbz'] = self.pbz
settings['pbZ'] = self.pbZ
settings['lbm'] = self.lbm
settings['lbM'] = self.lbM
settings['abm'] = self.abm
settings['abM'] = self.abM
for i in range(16):
settings['cbm%i'%i] = getattr(self, 'cbm%i'%i)
settings['cbM%i'%i] = getattr(self, 'cbM%i'%i)
# Allow dialog defaults to be changed by resetting the properties
del BoundsConfiguration.autopb, BoundsConfiguration.pbx, BoundsConfiguration.pbX,\
BoundsConfiguration.pby, BoundsConfiguration.pbY, BoundsConfiguration.pbz,\
BoundsConfiguration.pbZ, BoundsConfiguration.lbm, BoundsConfiguration.lbM,\
BoundsConfiguration.abm, BoundsConfiguration.abM
for i in range(16):
delattr(BoundsConfiguration, 'cbm%i'%i)
delattr(BoundsConfiguration, 'cbM%i'%i)
BoundsConfiguration.autopb = bpy.props.BoolProperty(name="Automatic position bounds", \
description="Overrides user-provided numbers by analyzing the scene", \
default=settings['autopb'])
BoundsConfiguration.pbx = bpy.props.FloatProperty(name="Min", default=settings['pbx'], min=-1000, max=1000)
BoundsConfiguration.pbX = bpy.props.FloatProperty(name="Max", default=settings['pbX'], min=-1000, max=1000)
BoundsConfiguration.pby = bpy.props.FloatProperty(name="Min", default=settings['pby'], min=-1000, max=1000)
BoundsConfiguration.pbY = bpy.props.FloatProperty(name="Max", default=settings['pbY'], min=-1000, max=1000)
BoundsConfiguration.pbz = bpy.props.FloatProperty(name="Min", default=settings['pbz'], min=-1000, max=1000)
BoundsConfiguration.pbZ = bpy.props.FloatProperty(name="Max", default=settings['pbZ'], min=-1000, max=1000)
BoundsConfiguration.lbm = bpy.props.FloatProperty(name="Min", default=settings['lbm'], min=-1000, max=1000)
BoundsConfiguration.lbM = bpy.props.FloatProperty(name="Max", default=settings['lbM'], min=-1000, max=1000)
BoundsConfiguration.abm = bpy.props.FloatProperty(name="Min", default=settings['abm'], min=-1000, max=1000)
BoundsConfiguration.abM = bpy.props.FloatProperty(name="Max", default=settings['abM'], min=-1000, max=1000)
for i in range(16):
setattr(BoundsConfiguration, 'cbm%i'%i, bpy.props.FloatProperty(name="Min", default=settings['cbm%i'%i], min=-1000, max=1000))
setattr(BoundsConfiguration, 'cbM%i'%i, bpy.props.FloatProperty(name="Max", default=settings['cbM%i'%i], min=-1000, max=1000))
# Refresh
bpy.utils.unregister_class(BoundsConfiguration)
bpy.utils.register_class(BoundsConfiguration)
return {'FINISHED'}
##
# \brief Query MORSE for control description, then open the dialog
def invoke(self, context, _):
# If the settings have not been set before, initialize them
if not context.scene.objects.get('ompl_settings'):
bpy.ops.object.add()
settings = context.object
settings.name = 'ompl_settings'
settings['autopb'] = True
settings['pbx'] = -1000
settings['pbX'] = 1000
settings['pby'] = -1000
settings['pbY'] = 1000
settings['pbz'] = -1000
settings['pbZ'] = 1000
settings['lbm'] = -1000
settings['lbM'] = 1000
settings['abm'] = -1000
settings['abM'] = 1000
for i in range(16):
settings['cbm%i'%i] = -10
settings['cbM%i'%i] = 10
# Save any changes so MORSE sees them when it loads the file
bpy.ops.wm.save_mainfile()
# Query MORSE for cdesc by starting it up temporarily (clunky, but it needs to be done)
subprocess.Popen(['morse', '-c', 'run', 'ompl', OMPL_DIR+'/builder.py', '--', bpy.data.filepath, ".", 'QUERY'])
# Wait for a connection
sockS = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockC = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
print("Waiting for port 50007 to connect.")
sockS.connect(('localhost', 50007))
except:
time.sleep(0.5)
continue
break
while True:
try:
print("Waiting for port 4000 to connect.")
sockC.connect(('localhost', 4000))
except:
time.sleep(0.5)
continue
break
# Retrieve the control description
self.cdesc = ompl.morse.environment.MyEnvironment(sockS, sockC, True).cdesc
if self.cdesc[0] > 16:
self.report({'ERROR'}, "OMPL Error: Control dimension exceeds 16! This dialog won't be able to accomdate that many.")
return {'FINISHED'}
# Invoke bounds dialog
return context.window_manager.invoke_props_dialog(self, width=1100)
##
# \brief
def draw(self, _):
mainlayout = self.layout.row()
# 3 sections in first column:
sections = mainlayout.column()
sections.label(text="Position Bounds:")
sections.prop(self, 'autopb')
pb = sections.row()
sections.separator()
sections.label(text="Linear Velocity Bounds:")
lb = sections.row()
sections.separator()
sections.label(text="Angular Velocity Bounds:")
ab = sections.row()
# 1 section in second column
cb = mainlayout.column()
cb.label(text="Control Input Bounds:")
cbrow1 = cb.row()
cbrow2 = cb.row()
cbrow3 = cb.row()
cbrow4 = cb.row()
# In positional bounds sections, make 3 columns for X,Y,Z, with Min,Max in each
X = pb.column()
X.label(text="X")
X.prop(self, 'pbx', text="Min")
X.prop(self, 'pbX', text="Max")
Y = pb.column()
Y.label(text="Y")
Y.prop(self, 'pby', text="Min")
Y.prop(self, 'pbY', text="Max")
Z = pb.column()
Z.label(text="Z")
Z.prop(self, 'pbz', text="Min")
Z.prop(self, 'pbZ', text="Max")
# Linear velocity bounds Min,Max
lb.prop(self, 'lbm', text="Min")
lb.prop(self, 'lbM', text="Max")
# Angular
ab.prop(self, 'abm', text="Min")
ab.prop(self, 'abM', text="Max")
# Control Input
last_component = None
i = 0
k = 0
cbrow = [cbrow1, cbrow2, cbrow3, cbrow4]
for control in self.cdesc[2:]:
if control[0] != last_component:
# Only allow 4 robots per row
robot = cbrow[int(k/4)].column()
k += 1
# Print the robot name
robot.label(text=control[0][:-6]+":")
services = robot.box()
# Print the controller function name
services.label(text=control[1]+":")
args = services.row()
for j in range(control[2]):
# Print the argument number
con = args.column()
con.label(text="Arg %i"%j)
con.prop(self, 'cbm%i'%i, text="Min")
con.prop(self, 'cbM%i'%i, text="Max")
i += 1
# #
# Addon house-keeping
# #
##
# \brief Class describing the layout of the OMPL menu
class OMPLMenu(bpy.types.Menu):
bl_idname = "INFO_MT_game_ompl"
bl_label = "OMPL"
##
# \brief Add operators to the menu
def draw(self, _):
self.layout.operator_context = 'INVOKE_DEFAULT'
self.layout.operator(AddRobot.bl_idname)
self.layout.operator(AddGoal.bl_idname)
self.layout.operator(BoundsConfiguration.bl_idname)
self.layout.operator(Plan.bl_idname)
self.layout.operator(AnimFile.bl_idname)
self.layout.operator(Play.bl_idname)
##
# \brief Function called to initialize the menu
def menu_func(self, _):
self.layout.menu(OMPLMenu.bl_idname)
##
# \brief Deferred import of morse.builder (whenever a new file is loaded)
@bpy.app.handlers.persistent
def handler_scene_update_post(_):
# A little hackish, but now is a good time to import morse.builder
if 'morse.builder' not in sys.modules:
del AddRobot.robot_type
del AddRobot.controller_type
robotEnum = getRobots()
controllerEnum = getControllers()
AddRobot.robot_type = bpy.props.EnumProperty(items=robotEnum, name="MORSE robot", \
description="A robot from the MORSE components library", default=robotEnum[-1][0])
AddRobot.controller_type = bpy.props.EnumProperty(items=controllerEnum, name="MORSE actuator", \
description="The actuator to control the robot", default=controllerEnum[-1][0])
bpy.utils.unregister_class(AddRobot)
bpy.utils.register_class(AddRobot)
##
# \brief Called when the addon is enabled or Blender starts
def register():
# Ensure that MORSE environment 'ompl' is registered in ~/.morse/config
config_path = os.path.expanduser("~/.morse")
if not os.path.exists(config_path):
os.mkdir(config_path)
config_file = os.path.join(config_path, "config")
conf = configparser.SafeConfigParser()
conf.read(config_file)
if not conf.has_section("sites"):
conf.add_section("sites")
conf.set('sites', 'ompl', OMPL_DIR)
with open(config_file, 'w') as configfile:
conf.write(configfile)
# Register all the operators, menu, and handler
bpy.utils.register_class(Plan)
bpy.utils.register_class(AnimFile)
bpy.utils.register_class(Play)
bpy.utils.register_class(AddRobot)
bpy.utils.register_class(AddGoal)
bpy.utils.register_class(BoundsConfiguration)
bpy.utils.register_class(OMPLMenu)
bpy.types.INFO_MT_game.prepend(menu_func)
bpy.app.handlers.scene_update_post.append(handler_scene_update_post)
##
# \brief Called when operator is uninstalled
def unregister():
# Undo all the registering
bpy.utils.unregister_class(Plan)
bpy.utils.unregister_class(AnimFile)
bpy.utils.unregister_class(Play)
bpy.utils.unregister_class(AddRobot)
bpy.utils.unregister_class(AddGoal)
bpy.utils.unregister_class(BoundsConfiguration)
bpy.utils.unregister_class(OMPLMenu)
bpy.types.INFO_MT_game.remove(menu_func)
bpy.app.handlers.scene_update_post.remove(handler_scene_update_post)
|
example_configs/text2speech/centaur_float.py | VoiceZen/OpenSeq2Seq | 1,459 | 12681427 | <filename>example_configs/text2speech/centaur_float.py
# pylint: skip-file
import os
import tensorflow as tf
from open_seq2seq.data import Text2SpeechDataLayer
from open_seq2seq.decoders import CentaurDecoder
from open_seq2seq.encoders import CentaurEncoder
from open_seq2seq.losses import Text2SpeechLoss
from open_seq2seq.models import Text2SpeechCentaur
from open_seq2seq.optimizers.lr_policies import poly_decay
from open_seq2seq.optimizers.novograd import NovoGrad
base_model = Text2SpeechCentaur
dataset = "LJ"
dataset_location = "/data/LJSpeech"
output_type = "both"
trim = False
exp_mag = True
mag_num_feats = 513
train = "train.csv"
valid = "test.csv"
batch_size = 32
num_audio_features = {
"mel": 80,
"magnitude": mag_num_feats
}
data_min = {
"mel": 1e-2,
"magnitude": 1e-5,
}
debug = False
num_gpus = 8 if not debug else 1
reduction_factor = 2
attention_layers = 4
encoder_hidden_size = 256
decoder_hidden_size = 512
base_params = {
"random_seed": 0,
"use_horovod": True if not debug else False,
"max_steps": 1000000,
"bench_start": 0,
"num_gpus": num_gpus,
"batch_size_per_gpu": batch_size,
"save_summaries_steps": 1000 if not debug else 10,
"print_loss_steps": 1000 if not debug else 10,
"print_samples_steps": 1000 if not debug else 10,
"eval_steps": 5000 if not debug else 50,
"save_checkpoint_steps": 5000,
"save_to_tensorboard": True,
"logdir": "result/centaur-float",
"max_grad_norm": 1.,
"optimizer": NovoGrad,
"optimizer_params": {
"beta1": 0.95,
"beta2": 0.98,
"epsilon": 1e-08,
"weight_decay": 0.001,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.02,
"power": 2.0,
},
"dtype": tf.float32,
"initializer": tf.contrib.layers.xavier_initializer,
"summaries": ["learning_rate", "variables", "gradients", "larc_summaries",
"variable_norm", "gradient_norm", "global_gradient_norm"],
"encoder": CentaurEncoder,
"encoder_params": {
"src_vocab_size": 94,
"embedding_size": encoder_hidden_size,
"output_size": encoder_hidden_size,
"pad_embeddings_2_eight": True,
"cnn_dropout_prob": 0.1,
"conv_layers": [
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
},
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
},
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
},
{
"kernel_size": [3], "stride": [1],
"num_channels": encoder_hidden_size, "padding": "SAME",
"activation_fn": tf.nn.relu
}
]
},
"decoder": CentaurDecoder,
"decoder_params": {
"attention_layers": attention_layers,
"self_attention_conv_params": {
"kernel_size": [5],
"stride": [1],
"num_channels": decoder_hidden_size,
"padding": "VALID",
"is_causal": True,
"activation_fn": tf.nn.relu
},
"window_size": 4,
"back_step_size": 0,
"force_layers": [1, 3],
"hidden_size": decoder_hidden_size,
"reduction_factor": reduction_factor,
"prenet_layers": 2,
"prenet_hidden_size": decoder_hidden_size,
"prenet_use_inference_dropout": False,
"cnn_dropout_prob": 0.1,
"prenet_dropout": 0.5,
"conv_layers":
[
{
"kernel_size": [5],
"stride": [1],
"num_channels": decoder_hidden_size,
"padding": "VALID",
"is_causal": True,
"activation_fn": tf.nn.relu
}
] * 4,
"mag_conv_layers":
[
{
"kernel_size": [5],
"stride": [1],
"num_channels": decoder_hidden_size,
"padding": "VALID",
"is_causal": True,
"activation_fn": tf.nn.relu
}
] * 4,
"attention_dropout": 0.1,
"layer_postprocess_dropout": 0.1
},
"loss": Text2SpeechLoss,
"loss_params": {
"use_mask": True,
"l1_norm": True
},
"data_layer": Text2SpeechDataLayer,
"data_layer_params": {
"dataset": dataset,
"use_cache": True,
"num_audio_features": num_audio_features,
"output_type": output_type,
"vocab_file": "open_seq2seq/test_utils/vocab_tts.txt",
"dataset_location": dataset_location,
"mag_power": 1,
"pad_EOS": True,
"feature_normalize": False,
"feature_normalize_mean": 0.,
"feature_normalize_std": 1.,
"data_min": data_min,
"mel_type": "htk",
"trim": trim,
"duration_max": 1024,
"duration_min": 24,
"exp_mag": exp_mag
},
}
train_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, train),
],
"shuffle": True,
},
}
eval_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, valid),
],
"duration_max": 1000,
"duration_min": 0,
"shuffle": False,
},
}
infer_params = {
"data_layer_params": {
"dataset_files": [
os.path.join(dataset_location, "infer.csv"),
],
"duration_max": 1000,
"duration_min": 0,
"shuffle": False,
},
}
interactive_infer_params = {
"data_layer_params": {
"dataset_files": [],
"duration_max": 1000,
"duration_min": 0,
"shuffle": False,
},
}
|
api/features/workflows/core/migrations/0001_initial.py | SolidStateGroup/Bullet-Train-API | 126 | 12681442 | <filename>api/features/workflows/core/migrations/0001_initial.py
# Generated by Django 3.2.12 on 2022-03-25 14:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_lifecycle.mixins
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('environments', '0018_add_minimum_change_request_approvals_to_environment'),
]
operations = [
migrations.CreateModel(
name='ChangeRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=500)),
('description', models.TextField(blank=True, null=True)),
('deleted_at', models.DateTimeField(null=True)),
('committed_at', models.DateTimeField(null=True)),
('committed_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='committed_change_requests', to=settings.AUTH_USER_MODEL)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='change_requests', to='environments.environment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='change_requests', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(django_lifecycle.mixins.LifecycleModelMixin, models.Model),
),
migrations.CreateModel(
name='ChangeRequestApproval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('approved_at', models.DateTimeField(null=True)),
('change_request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='approvals', to='workflows_core.changerequest')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'change_request')},
},
),
]
|
parl/remote/tests/test_import_module/main_abs_test.py | lp2333/PARL | 3,172 | 12681456 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import parl
import time
import threading
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.utils import get_free_tcp_port
class TestImport(unittest.TestCase):
def tearDown(self):
disconnect()
def test_import_local_module(self):
from Module2 import B
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker = Worker('localhost:{}'.format(port), 1)
time.sleep(10)
parl.connect("localhost:{}".format(port))
obj = B()
res = obj.add_sum(10, 5)
self.assertEqual(res, 15)
worker.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
tests/test_argoverse_tracking_loader.py | gargrohin/argoverse-api | 560 | 12681461 | <gh_stars>100-1000
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
"""Tracking Loader unit tests"""
import pathlib
import numpy as np
import pytest
from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader
from argoverse.utils.camera_stats import CAMERA_LIST
TEST_DATA_LOC = str(pathlib.Path(__file__).parent.parent / "tests" / "test_data" / "tracking")
@pytest.fixture # type: ignore
def data_loader() -> ArgoverseTrackingLoader:
return ArgoverseTrackingLoader(TEST_DATA_LOC)
def test_get_city_name(data_loader: ArgoverseTrackingLoader) -> None:
assert data_loader.city_name == "PIT"
def test_calib(data_loader: ArgoverseTrackingLoader) -> None:
assert data_loader.calib
camera = "ring_front_center"
calib = data_loader.get_calibration(camera)
pc = data_loader.get_lidar(0)
uv = calib.project_ego_to_image(pc)
uv_cam = calib.project_ego_to_cam(pc)
assert (uv == calib.project_cam_to_image(uv_cam)).all
assert (uv_cam == calib.project_image_to_cam(uv)).all
assert (pc == calib.project_image_to_ego(uv)).all
def test_log_list(data_loader: ArgoverseTrackingLoader) -> None:
assert data_loader.log_list[0] == "1"
def test_image_list(data_loader: ArgoverseTrackingLoader) -> None:
assert set(data_loader.image_list.keys()) == set(CAMERA_LIST)
def test_image_list_sync(data_loader: ArgoverseTrackingLoader) -> None:
assert set(data_loader.image_list_sync.keys()) == set(CAMERA_LIST)
def test_image_timestamp_sync(data_loader: ArgoverseTrackingLoader) -> None:
assert set(data_loader.image_timestamp_list_sync.keys()) == set(CAMERA_LIST)
for camera in CAMERA_LIST:
assert 3 not in data_loader.image_timestamp_list_sync[camera]
def test_lidar_list(data_loader: ArgoverseTrackingLoader) -> None:
assert len(data_loader.lidar_list) == 3
def test_labale_list(data_loader: ArgoverseTrackingLoader) -> None:
assert len(data_loader.label_list) == 3
def test_image_timestamp_list(data_loader: ArgoverseTrackingLoader) -> None:
assert set(data_loader.image_timestamp_list.keys()) == set(CAMERA_LIST)
for camera in CAMERA_LIST:
assert 3 in data_loader.image_timestamp_list[camera]
def test_timestamp_image_dict(data_loader: ArgoverseTrackingLoader) -> None:
assert set(data_loader.timestamp_image_dict.keys()) == set(CAMERA_LIST)
for camera in CAMERA_LIST:
assert len(data_loader.timestamp_image_dict[camera]) == 4
def test_timestamp_lidar_map(data_loader: ArgoverseTrackingLoader) -> None:
assert len(data_loader.timestamp_lidar_dict) == 3
assert len(data_loader.lidar_timestamp_list) == 3
def test_data_loader_get(data_loader: ArgoverseTrackingLoader) -> None:
data_0 = data_loader[0].current_log
data_1 = data_loader.get("1").current_log
assert data_0 == data_1
def test_loading_image_lidar(data_loader: ArgoverseTrackingLoader) -> None:
camera = CAMERA_LIST[0]
log = "1"
image_1 = data_loader.get_image_at_timestamp(0, camera, log)
image_2 = data_loader.get_image_list_sync(camera, log, load=True)[0]
image_3 = data_loader.get_image(0, camera, log)
image_4 = data_loader.get_image_sync(0, camera, log)
assert np.array_equal(image_1, image_2) and np.array_equal(image_1, image_3) and np.array_equal(image_1, image_4)
lidar_0 = data_loader.get_lidar(0, log)
lidar_gt = np.array(
[
[0.0, 0.0, 5.0],
[1.0, 0.0, 5.0],
[2.0, 0.0, 5.0],
[3.0, 0.0, 5.0],
[4.0, 0.0, 5.0],
[5.0, 0.0, 5.0],
[6.0, 0.0, 5.0],
[7.0, 0.0, 5.0],
[8.0, 0.0, 5.0],
[9.0, 0.0, 5.0],
]
)
assert np.array_equal(lidar_0, lidar_gt)
def test_label_object(data_loader: ArgoverseTrackingLoader) -> None:
label_at_frame_0 = data_loader.get_label_object(0)
for label in label_at_frame_0:
assert label.label_class == "VEHICLE"
assert label.width == 2
assert label.height == 2
assert label.length == 2
def test_calibration(data_loader: ArgoverseTrackingLoader) -> None:
for camera in CAMERA_LIST:
calib = data_loader.get_calibration(camera, "1")
assert calib.camera == camera
def test_pose(data_loader: ArgoverseTrackingLoader) -> None:
for idx in range(len(data_loader.lidar_list)):
pose = data_loader.get_pose(idx)
if pose is not None:
assert np.array_equal(
pose.inverse().transform_point_cloud(np.array([[0, 0, 0]])),
pose.inverse_transform_point_cloud(np.array([[0, 0, 0]])),
)
else:
assert False
def test_idx_from_timestamp(data_loader: ArgoverseTrackingLoader) -> None:
for i in range(len(data_loader.lidar_list)):
assert data_loader.get_idx_from_timestamp(i) == i
|
tests/basics/int_big_unary.py | learnforpractice/micropython-cpp | 13,648 | 12681466 | <filename>tests/basics/int_big_unary.py
# test bignum unary operations
i = 1 << 65
print(bool(i))
print(+i)
print(-i)
print(~i)
|
lpips.py | JiangtaoFeng/MaskGIT-pytorch | 163 | 12681484 | <reponame>JiangtaoFeng/MaskGIT-pytorch<filename>lpips.py
import torch
import torch.nn as nn
from torchvision.models import vgg16
from collections import namedtuple
import os
import hashlib
import requests
from tqdm import tqdm
URL_MAP = {
"vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"
}
CKPT_MAP = {
"vgg_lpips": "vgg.pth"
}
MD5_MAP = {
"vgg_lpips": "d507d7349b931f0638a25a48a722f98a"
}
def download(url, local_path, chunk_size=1024):
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
with requests.get(url, stream=True) as r:
total_size = int(r.headers.get("content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
with open(local_path, "wb") as f:
for data in r.iter_content(chunk_size=chunk_size):
if data:
f.write(data)
pbar.update(chunk_size)
def md5_hash(path):
with open(path, "rb") as f:
content = f.read()
return hashlib.md5(content).hexdigest()
def get_ckpt_path(name, root, check=False):
assert name in URL_MAP
path = os.path.join(root, CKPT_MAP[name])
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
print(f"Downloading {name} model from {URL_MAP[name]} to {path}")
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert md5 == MD5_MAP[name], md5
return path
class LPIPS(nn.Module):
def __init__(self):
super(LPIPS, self).__init__()
self.scaling_layer = ScalingLayer()
self.channels = [64, 128, 256, 512, 512]
self.feature_net = VGG16()
self.lins = nn.ModuleList([
NetLinLayer(self.channels[0], use_dropout=True),
NetLinLayer(self.channels[1], use_dropout=True),
NetLinLayer(self.channels[2], use_dropout=True),
NetLinLayer(self.channels[3], use_dropout=True),
NetLinLayer(self.channels[4], use_dropout=True)
])
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vgg_lpips"):
ckpt = get_ckpt_path(name, "vgg_lpips")
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
def forward(self, real_x, fake_x):
features_real = self.feature_net(self.scaling_layer(real_x))
features_fake = self.feature_net(self.scaling_layer(fake_x))
diffs = {}
# calc MSE differences between features
for i in range(len(self.channels)):
diffs[i] = (norm_tensor(features_real[i]) - norm_tensor(features_fake[i])) ** 2
return sum([spatial_average(self.lins[i].model(diffs[i])) for i in range(len(self.channels))])
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer("shift", torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer("scale", torch.Tensor([.458, .448, .450])[None, :, None, None])
def forward(self, x):
return (x - self.shift) / self.scale
class NetLinLayer(nn.Module):
def __init__(self, in_channels, out_channels=1, use_dropout=False):
super(NetLinLayer, self).__init__()
self.model = nn.Sequential(
nn.Dropout() if use_dropout else None,
nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False)
)
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
vgg_pretrained_features = vgg16(pretrained=True).features
slices = [vgg_pretrained_features[i] for i in range(30)]
self.slice1 = nn.Sequential(*slices[0:4])
self.slice2 = nn.Sequential(*slices[4:9])
self.slice3 = nn.Sequential(*slices[9:16])
self.slice4 = nn.Sequential(*slices[16:23])
self.slice5 = nn.Sequential(*slices[23:30])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h = self.slice1(x)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
vgg_outputs = namedtuple("VGGOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
return vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
def norm_tensor(x):
"""
Normalize images by their length to make them unit vector?
:param x: batch of images
:return: normalized batch of images
"""
norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
return x / (norm_factor + 1e-10)
def spatial_average(x):
"""
imgs have: batch_size x channels x width x height --> average over width and height channel
:param x: batch of images
:return: averaged images along width and height
"""
return x.mean([2,3], keepdim=True)
if __name__ == '__main__':
real = torch.randn(10, 3, 256, 256)
fake = torch.randn(10, 3, 256, 256)
loss = LPIPS().eval()
print(loss(real, fake).shape) |
mayan/apps/common/views.py | atitaya1412/Mayan-EDMS | 343 | 12681506 | from django.contrib import messages
from django.templatetags.static import static
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
from stronghold.views import StrongholdPublicMixin
from mayan.apps.views.generics import ConfirmView, SimpleView
from mayan.apps.views.mixins import (
ExternalContentTypeObjectViewMixin, ObjectNameViewMixin
)
from .classes import ModelCopy
from .forms import LicenseForm
from .icons import icon_setup
from .menus import menu_tools, menu_setup
from .permissions import permission_object_copy
from .settings import setting_home_view
class AboutView(SimpleView):
extra_context = {'title': _('About')}
template_name = 'appearance/about.html'
class FaviconRedirectView(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
"""
Hide the static tag import to avoid errors with static file
processors.
"""
return static(path='appearance/images/favicon.ico')
class HomeView(SimpleView):
extra_context = {
'title': _('Home'),
}
template_name = 'appearance/home.html'
class LicenseView(SimpleView):
extra_context = {
'form': LicenseForm(),
'read_only': True,
'title': _('License'),
}
template_name = 'appearance/generic_form.html'
class ObjectCopyView(
ExternalContentTypeObjectViewMixin, ObjectNameViewMixin, ConfirmView
):
external_object_permission = permission_object_copy
def get_extra_context(self):
model_copy = ModelCopy.get(model=self.external_object._meta.model)
context = {
'object': self.external_object,
'subtitle': _('Fields to be copied: %s') % ', '.join(
sorted(
map(
str, model_copy.get_fields_verbose_names()
)
)
)
}
context['title'] = _(
'Make a copy of %(object_name)s "%(object)s"?'
) % {
'object_name': self.get_object_name(context=context),
'object': self.external_object
}
return context
def view_action(self):
self.external_object.copy_instance()
messages.success(
message=_('Object copied successfully.'),
request=self.request
)
class RootView(StrongholdPublicMixin, SimpleView):
extra_context = {'home_view': setting_home_view.value}
template_name = 'appearance/root.html'
class SetupListView(SimpleView):
template_name = 'appearance/generic_list_horizontal.html'
def get_extra_context(self, **kwargs):
return {
'no_results_icon': icon_setup,
'no_results_text': _(
'No results here means that don\'t have the required '
'permissions to perform administrative task.'
),
'no_results_title': _('No setup options available.'),
'resolved_links': menu_setup.resolve(
request=self.request, sort_results=True
),
'title': _('Setup items'),
'subtitle': _(
'Here you can configure all aspects of the system.'
)
}
class ToolsListView(SimpleView):
template_name = 'appearance/generic_list_horizontal.html'
def get_extra_context(self):
return {
'resolved_links': menu_tools.resolve(
request=self.request, sort_results=True
),
'title': _('Tools'),
'subtitle': _(
'These modules are used to do system maintenance.'
)
}
|
pyjswidgets/pyjamas/XMLDoc.browser.py | takipsizad/pyjs | 739 | 12681511 | <filename>pyjswidgets/pyjamas/XMLDoc.browser.py
def create_xml_doc(text):
JS("""
try //Internet Explorer
{
var xmlDoc=new ActiveXObject("Microsoft['XMLDOM']");
xmlDoc['async']="false";
xmlDoc['loadXML'](@{{text}});
}
catch(e)
{
try //Firefox, Mozilla, Opera, etc.
{
var parser=new DOMParser();
xmlDoc=parser['parseFromString'](@{{text}},"text/xml");
}
catch(e)
{
return null;
}
}
return xmlDoc;
""")
|
mava/systems/tf/vdn/system.py | sash-a/Mava | 337 | 12681525 | # python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VDN system implementation."""
import functools
from typing import Any, Callable, Dict, Optional, Type, Union
import dm_env
import reverb
import sonnet as snt
from acme import specs as acme_specs
from acme.utils import counting
import mava
from mava import core
from mava import specs as mava_specs
from mava.components.tf.architectures import DecentralisedValueActor
from mava.components.tf.modules import mixing
from mava.environment_loop import ParallelEnvironmentLoop
from mava.systems.tf import executors
from mava.systems.tf.madqn.system import MADQN
from mava.systems.tf.vdn import builder, execution, training
from mava.types import EpsilonScheduler
from mava.utils.loggers import MavaLogger, logger_utils
# TODO Implement recurrent VDN
class VDN(MADQN):
"""VDN system."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[acme_specs.BoundedArray], Dict[str, snt.Module]],
exploration_scheduler_fn: Union[
EpsilonScheduler,
Dict[str, EpsilonScheduler],
Dict[str, Dict[str, EpsilonScheduler]],
],
logger_factory: Callable[[str], MavaLogger] = None,
architecture: Type[DecentralisedValueActor] = DecentralisedValueActor,
trainer_fn: Type[training.VDNTrainer] = training.VDNTrainer,
executor_fn: Type[core.Executor] = execution.VDNFeedForwardExecutor,
mixer: Type[mixing.BaseMixingModule] = mixing.AdditiveMixing,
num_executors: int = 1,
num_caches: int = 0,
environment_spec: mava_specs.MAEnvironmentSpec = None,
shared_weights: bool = True,
agent_net_keys: Dict[str, str] = {},
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
n_step: int = 5,
sequence_length: int = 20,
importance_sampling_exponent: Optional[float] = None,
max_priority_weight: float = 0.9,
period: int = 20,
max_gradient_norm: float = None,
discount: float = 0.99,
optimizer: Union[snt.Optimizer, Dict[str, snt.Optimizer]] = snt.optimizers.Adam(
learning_rate=1e-4
),
target_update_period: int = 100,
executor_variable_update_period: int = 1000,
max_executor_steps: int = None,
checkpoint: bool = True,
checkpoint_subpath: str = "~/mava/",
checkpoint_minute_interval: int = 5,
logger_config: Dict = {},
train_loop_fn: Callable = ParallelEnvironmentLoop,
eval_loop_fn: Callable = ParallelEnvironmentLoop,
train_loop_fn_kwargs: Dict = {},
eval_loop_fn_kwargs: Dict = {},
evaluator_interval: Optional[dict] = None,
learning_rate_scheduler_fn: Optional[Callable[[int], None]] = None,
seed: Optional[int] = None,
):
"""Initialise the system
Args:
environment_factory (Callable[[bool], dm_env.Environment]): function to
instantiate an environment.
network_factory (Callable[[acme_specs.BoundedArray],
Dict[str, snt.Module]]): function to instantiate system networks.
logger_factory (Callable[[str], MavaLogger], optional): function to
instantiate a system logger. Defaults to None.
architecture (Type[DecentralisedValueActor], optional): system architecture,
e.g. decentralised or centralised. Defaults to DecentralisedValueActor.
trainer_fn (Type[training.VDNTrainer], optional): training type associated
with executor and architecture, e.g. centralised training. Defaults
to training.VDNTrainer.
executor_fn (Type[core.Executor], optional): executor type, e.g.
feedforward or recurrent. Defaults to execution.VDNFeedForwardExecutor.
mixer (Type[mixing.BaseMixingModule], optional): mixer module type, e.g.
additive or monotonic mixing. Defaults to mixing.AdditiveMixing.
exploration_scheduler_fn (Type[ LinearExplorationScheduler ], optional):
function specifying a decaying scheduler for epsilon exploration.
See mava/systems/tf/madqn/system.py for details.
num_executors (int, optional): number of executor processes to run in
parallel. Defaults to 1.
num_caches (int, optional): number of trainer node caches. Defaults to 0.
environment_spec (mava_specs.MAEnvironmentSpec, optional): description of
the action, observation spaces etc. for each agent in the system.
Defaults to None.
shared_weights (bool, optional): whether agents should share weights or not.
When agent_net_keys are provided the value of shared_weights is ignored.
Defaults to True.
agent_net_keys: (dict, optional): specifies what network each agent uses.
Defaults to {}.
batch_size (int, optional): sample batch size for updates. Defaults to 256.
prefetch_size (int, optional): size to prefetch from replay. Defaults to 4.
min_replay_size (int, optional): minimum replay size before updating.
Defaults to 1000.
max_replay_size (int, optional): maximum replay size. Defaults to 1000000.
samples_per_insert (Optional[float], optional): number of samples to take
from replay for every insert that is made. Defaults to 32.0.
n_step (int, optional): number of steps to include prior to boostrapping.
Defaults to 5.
sequence_length (int, optional): recurrent sequence rollout length. Defaults
to 20.
importance_sampling_exponent: (float): Not implemented yet.
max_priority_weight(float): Not implemented yet.
period (int, optional): The period with which we add sequences. See `period`
in `acme.SequenceAdder.period` for more info. Defaults to 20.
max_gradient_norm (float, optional): maximum allowed norm for gradients
before clipping is applied. Defaults to None.
discount (float, optional): discount factor to use for TD updates. Defaults
to 0.99.
optimizer (Union[snt.Optimizer, Dict[str, snt.Optimizer]], optional):
type of optimizer to use to update network parameters. Defaults to
snt.optimizers.Adam( learning_rate=1e-4 ).
target_update_period (int, optional): number of steps before target
networks are updated. Defaults to 100.
executor_variable_update_period (int, optional): number of steps before
updating executor variables from the variable source. Defaults to 1000.
max_executor_steps (int, optional): maximum number of steps and executor
can in an episode. Defaults to None.
checkpoint (bool, optional): whether to checkpoint models. Defaults to
False.
checkpoint_subpath (str, optional): subdirectory specifying where to store
checkpoints. Defaults to "~/mava/".
checkpoint_minute_interval (int): The number of minutes to wait between
checkpoints.
logger_config (Dict, optional): additional configuration settings for the
logger factory. Defaults to {}.
train_loop_fn (Callable, optional): function to instantiate a train loop.
Defaults to ParallelEnvironmentLoop.
eval_loop_fn (Callable, optional): function to instantiate an evaluation
loop. Defaults to ParallelEnvironmentLoop.
train_loop_fn_kwargs (Dict, optional): possible keyword arguments to send
to the training loop. Defaults to {}.
eval_loop_fn_kwargs (Dict, optional): possible keyword arguments to send to
the evaluation loop. Defaults to {}.
learning_rate_scheduler_fn: function/class that takes in a trainer step t
and returns the current learning rate.
seed: seed for reproducible sampling (for epsilon greedy action selection).
evaluator_interval: An optional condition that is used to
evaluate/test system performance after [evaluator_interval]
condition has been met. If None, evaluation will
happen at every timestep.
E.g. to evaluate a system after every 100 executor episodes,
evaluator_interval = {"executor_episodes": 100}.
"""
self._mixer = mixer
# set default logger if no logger provided
if not logger_factory:
logger_factory = functools.partial(
logger_utils.make_logger,
directory="~/mava",
to_terminal=True,
time_delta=10,
)
super(VDN, self).__init__(
architecture=architecture,
environment_factory=environment_factory,
network_factory=network_factory,
logger_factory=logger_factory,
environment_spec=environment_spec,
shared_weights=shared_weights,
agent_net_keys=agent_net_keys,
num_executors=num_executors,
num_caches=num_caches,
max_executor_steps=max_executor_steps,
checkpoint_subpath=checkpoint_subpath,
checkpoint=checkpoint,
checkpoint_minute_interval=checkpoint_minute_interval,
train_loop_fn=train_loop_fn,
train_loop_fn_kwargs=train_loop_fn_kwargs,
eval_loop_fn=eval_loop_fn,
eval_loop_fn_kwargs=eval_loop_fn_kwargs,
logger_config=logger_config,
exploration_scheduler_fn=exploration_scheduler_fn,
learning_rate_scheduler_fn=learning_rate_scheduler_fn,
seed=seed,
evaluator_interval=evaluator_interval,
)
if issubclass(executor_fn, executors.RecurrentExecutor):
extra_specs = self._get_extra_specs()
else:
extra_specs = {}
self._builder = builder.VDNBuilder(
builder.VDNConfig(
environment_spec=self._environment_spec,
agent_net_keys=self._agent_net_keys,
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
executor_variable_update_period=executor_variable_update_period,
min_replay_size=min_replay_size,
max_replay_size=max_replay_size,
samples_per_insert=samples_per_insert,
n_step=n_step,
sequence_length=sequence_length,
importance_sampling_exponent=importance_sampling_exponent,
max_priority_weight=max_priority_weight,
period=period,
max_gradient_norm=max_gradient_norm,
checkpoint=checkpoint,
optimizer=optimizer,
checkpoint_subpath=checkpoint_subpath,
checkpoint_minute_interval=checkpoint_minute_interval,
evaluator_interval=evaluator_interval,
learning_rate_scheduler_fn=learning_rate_scheduler_fn,
),
trainer_fn=trainer_fn,
executor_fn=executor_fn,
extra_specs=extra_specs,
)
def trainer(
self,
replay: reverb.Client,
counter: counting.Counter,
) -> mava.core.Trainer:
"""System trainer
Args:
replay (reverb.Client): replay data table to pull data from.
counter (counting.Counter): step counter object.
Returns:
mava.core.Trainer: system trainer.
"""
# Create the networks to optimize (online)
networks = self._network_factory( # type: ignore
environment_spec=self._environment_spec,
agent_net_keys=self._agent_net_keys,
)
# Create system architecture
architecture = self._architecture(
environment_spec=self._environment_spec,
value_networks=networks["q_networks"],
agent_net_keys=self._agent_net_keys,
)
# Augment network architecture by adding mixing layer network.
system_networks = self._mixer(
architecture=architecture,
).create_system()
# create logger
trainer_logger_config = {}
if self._logger_config and "trainer" in self._logger_config:
trainer_logger_config = self._logger_config["trainer"]
trainer_logger = self._logger_factory( # type: ignore
"trainer", **trainer_logger_config
)
dataset = self._builder.make_dataset_iterator(replay)
counter = counting.Counter(counter, "trainer")
return self._builder.make_trainer(
networks=system_networks,
dataset=dataset,
counter=counter,
logger=trainer_logger,
)
def build(self, name: str = "vdn") -> Any:
"""Build the distributed system as a graph program.
Args:
name (str, optional): system name. Defaults to "vdn".
Returns:
Any: graph program for distributed system training.
"""
return super().build(name=name)
|
tests/basics/except_match_tuple.py | learnforpractice/micropython-cpp | 13,648 | 12681531 | # test exception matching against a tuple
try:
fail
except (Exception,):
print('except 1')
try:
fail
except (Exception, Exception):
print('except 2')
try:
fail
except (TypeError, NameError):
print('except 3')
try:
fail
except (TypeError, ValueError, Exception):
print('except 4')
|
tools/mo/openvino/tools/mo/front/tf/extractors/pack.py | pazamelin/openvino | 2,406 | 12681561 | <reponame>pazamelin/openvino<gh_stars>1000+
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
def tf_pack_ext(pb):
assert (pb.attr["N"].i == len(pb.input))
return {
'axis': pb.attr["axis"].i,
'N': pb.attr["N"].i,
'infer': None
}
|
sdk/python/kubeflow/pytorchjob/constants/constants.py | happy2048/pytorch-operator | 312 | 12681573 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# PyTorchJob K8S constants
PYTORCHJOB_GROUP = 'kubeflow.org'
PYTORCHJOB_KIND = 'PyTorchJob'
PYTORCHJOB_PLURAL = 'pytorchjobs'
PYTORCHJOB_VERSION = os.environ.get('PYTORCHJOB_VERSION', 'v1')
PYTORCH_LOGLEVEL = os.environ.get('PYTORCHJOB_LOGLEVEL', 'INFO').upper()
# How long to wait in seconds for requests to the ApiServer
APISERVER_TIMEOUT = 120
#PyTorchJob Labels Name
PYTORCHJOB_CONTROLLER_LABEL = 'controller-name'
PYTORCHJOB_GROUP_LABEL = 'group-name'
PYTORCHJOB_NAME_LABEL = 'pytorch-job-name'
PYTORCHJOB_TYPE_LABEL = 'pytorch-replica-type'
PYTORCHJOB_INDEX_LABEL = 'pytorch-replica-index'
PYTORCHJOB_ROLE_LABEL = 'job-role'
|
examples/modal_component/app.py | adamlwgriffiths/vue.py | 274 | 12681585 | from vue import VueComponent
class Modal(VueComponent):
template = "#modal-template"
Modal.register()
class App(VueComponent):
template = "#main"
show_modal = False
App("#app")
|
testcases/elichika_tests/node/Functions/MinMax.py | vermashresth/chainer-compiler | 116 | 12681587 | <reponame>vermashresth/chainer-compiler
# coding: utf-8
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainer_compiler.elichika import testtools
class Simple(chainer.Chain):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, v):
return self.func(v)
class Axis(chainer.Chain):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, v):
return self.func(v, axis=1)
class KeepDims(chainer.Chain):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, v):
return self.func(v, keepdims=True)
class AxisKeepDims(chainer.Chain):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, v):
return self.func(v, axis=1, keepdims=True)
# ======================================
def main():
np.random.seed(314)
a1 = np.random.rand(6, 2, 3).astype(np.float32)
def test(func, name):
testtools.generate_testcase(Simple(func), [a1], subname= name + '_simple')
testtools.generate_testcase(Axis(func), [a1], subname= name + '_axis')
testtools.generate_testcase(KeepDims(func), [a1], subname= name + '_keepdims')
testtools.generate_testcase(AxisKeepDims(func), [a1], subname= name + '_axiskeepdims')
test(F.min, 'min')
test(F.max, 'max')
if __name__ == '__main__':
main() |
web400-8/bug_flask/flaskweb/app/__init__.py | mehrdad-shokri/CTF_web | 664 | 12681591 | import os
from flask import Flask, redirect, url_for, session, render_template, flash
from flask_script import Manager
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from config import config
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
manager = Manager()
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = "auth.login"
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
app.url_map.converters['regex'] = RegexConverter
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from . main import main as main_blueprint
app.register_blueprint(main_blueprint)
from . auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app |
tests/test_tradeexecutor.py | nhatminhbeo/mango-explorer | 131 | 12681593 | import typing
from .context import mango
from .fakes import fake_context, fake_wallet
from decimal import Decimal
def test_trade_executor_constructor() -> None:
succeeded = False
try:
mango.TradeExecutor() # type: ignore[abstract]
except TypeError:
# Can't instantiate the abstract base class.
succeeded = True
assert succeeded
def test_null_trade_executor_constructor() -> None:
def reporter(x: typing.Any) -> None:
return None
actual = mango.NullTradeExecutor(reporter)
assert actual is not None
assert actual.reporter == reporter
def test_serum_trade_executor_constructor() -> None:
context: mango.Context = fake_context()
wallet: mango.Wallet = fake_wallet()
price_adjustment_factor: Decimal = Decimal(0.05)
def reporter(x: typing.Any) -> None:
return None
actual = mango.ImmediateTradeExecutor(context, wallet, None, price_adjustment_factor, reporter)
assert actual is not None
assert actual.context == context
assert actual.wallet == wallet
assert actual.price_adjustment_factor == price_adjustment_factor
assert actual.reporter is not None
|
SpriteEncoder/decode.py | rmnvgr/CorsixTH | 2,323 | 12681600 | #!/usr/bin/env python3
"""
Program to decode the first sprite of a CTHG 2 file.
Mainly intended as a test for the checking the encoder, but also a
demonstration of how to decode.
"""
_license = """
Copyright (c) 2013 Alberth "Alberth" Hofkamp
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PIL import Image
class Infile:
def __init__(self, fname):
self.fname = fname
self.handle = open(self.fname, "rb")
# Read header
for h in [ord('C'), ord('T'), ord('H'), ord('G'), 2, 0]:
v = self.getByte()
assert v == h
def getByte(self):
v = self.handle.read(1)[0]
return v
def getWord(self):
b = self.getByte()
return b | (self.getByte() << 8)
def getLong(self):
w = self.getWord()
return w | (self.getWord() << 16)
def getData(self, size):
data = []
for i in range(size):
data.append(self.getByte())
return data
def decode_xy(pix_idx, w, h):
y = pix_idx // w
x = pix_idx - w * y
assert x >= 0 and x < w
assert y >= 0 and y < h
return x, y
def get_colour(table, idx):
if table == 0:
return (0, 0, 0, 255)
if table == 1:
return (idx, 0, 0, 255)
if table == 2:
return (0, idx, 0, 255)
if table == 3:
return (0, 0, idx, 255)
if table == 4:
return (0, idx, idx, 255)
if table == 5:
return (idx, 0, idx, 255)
assert False
class Sprite:
def __init__(self, infile):
size = infile.getLong() - 2 - 2 - 2
self.number = infile.getWord()
self.width = infile.getWord()
self.height = infile.getWord()
self.data = infile.getData(size)
print("Sprite number {}".format(self.number))
print("Width {}".format(self.width))
print("Height {}".format(self.height))
print("Size {}".format(size))
print("Data size {}".format(len(self.data)))
def get_data(self, idx):
return self.data[idx], idx + 1
def save(self):
im = Image.new("RGBA", (self.width, self.height), (0, 0, 0, 0))
pix = im.load()
idx = 0
pix_idx = 0
while idx < len(self.data):
length, idx = self.get_data(idx)
if length <= 63: # Fixed non-transparent 32bpp pixels (RGB)
length = length & 63
x, y = decode_xy(pix_idx, self.width, self.height)
for i in range(length):
d = (self.data[idx], self.data[idx+1], self.data[idx+2], 255)
pix[x, y] = d
idx = idx + 3
pix_idx = pix_idx + 1
x = x + 1
if x == self.width:
x = 0
y = y + 1
continue
elif length <= 64+63: # Partially transparent 32bpp pixels (RGB)
length = length & 63
opacity, idx = self.get_data(idx)
x, y = decode_xy(pix_idx, self.width, self.height)
for i in range(length):
d = (self.data[idx], self.data[idx+1], self.data[idx+2], opacity)
pix[x, y] = d
idx = idx + 3
pix_idx = pix_idx + 1
x = x + 1
if x == self.width:
x = 0
y = y + 1
continue
elif length <= 128+63: # Fixed fully transparent pixels
length = length & 63
pix_idx = pix_idx + length
continue
else: # Recolour layer.
length = length & 63
table, idx = self.get_data(idx)
opacity, idx = self.get_data(idx)
x, y = decode_xy(pix_idx, self.width, self.height)
for i in range(length):
col, idx = self.get_data(idx)
pix[x, y] = get_colour(table, col)
pix_idx = pix_idx + 1
x = x + 1
if x == self.width:
x = 0
y = y + 1
continue
im.save("sprite_{}.png".format(self.number))
inf = Infile("x.out")
spr = Sprite(inf)
spr.save()
|
voicefixer/base.py | ishine/voicefixer | 159 | 12681621 | import librosa.display
from voicefixer.tools.pytorch_util import *
from voicefixer.tools.wav import *
from voicefixer.restorer.model import VoiceFixer as voicefixer_fe
import os
EPS = 1e-8
class VoiceFixer(nn.Module):
def __init__(self):
super(VoiceFixer, self).__init__()
self._model = voicefixer_fe(channels=2, sample_rate=44100)
# print(os.path.join(os.path.expanduser('~'), ".cache/voicefixer/analysis_module/checkpoints/epoch=15_trimed_bn.ckpt"))
self._model.load_state_dict(
torch.load(
os.path.join(
os.path.expanduser("~"),
".cache/voicefixer/analysis_module/checkpoints/vf.ckpt",
)
)
)
self._model.eval()
def _load_wav_energy(self, path, sample_rate, threshold=0.95):
wav_10k, _ = librosa.load(path, sr=sample_rate)
stft = np.log10(np.abs(librosa.stft(wav_10k)) + 1.0)
fbins = stft.shape[0]
e_stft = np.sum(stft, axis=1)
for i in range(e_stft.shape[0]):
e_stft[-i - 1] = np.sum(e_stft[: -i - 1])
total = e_stft[-1]
for i in range(e_stft.shape[0]):
if e_stft[i] < total * threshold:
continue
else:
break
return wav_10k, int((sample_rate // 2) * (i / fbins))
def _load_wav(self, path, sample_rate, threshold=0.95):
wav_10k, _ = librosa.load(path, sr=sample_rate)
return wav_10k
def _amp_to_original_f(self, mel_sp_est, mel_sp_target, cutoff=0.2):
freq_dim = mel_sp_target.size()[-1]
mel_sp_est_low, mel_sp_target_low = (
mel_sp_est[..., 5 : int(freq_dim * cutoff)],
mel_sp_target[..., 5 : int(freq_dim * cutoff)],
)
energy_est, energy_target = torch.mean(mel_sp_est_low, dim=(2, 3)), torch.mean(
mel_sp_target_low, dim=(2, 3)
)
amp_ratio = energy_target / energy_est
return mel_sp_est * amp_ratio[..., None, None], mel_sp_target
def _trim_center(self, est, ref):
diff = np.abs(est.shape[-1] - ref.shape[-1])
if est.shape[-1] == ref.shape[-1]:
return est, ref
elif est.shape[-1] > ref.shape[-1]:
min_len = min(est.shape[-1], ref.shape[-1])
est, ref = est[..., int(diff // 2) : -int(diff // 2)], ref
est, ref = est[..., :min_len], ref[..., :min_len]
return est, ref
else:
min_len = min(est.shape[-1], ref.shape[-1])
est, ref = est, ref[..., int(diff // 2) : -int(diff // 2)]
est, ref = est[..., :min_len], ref[..., :min_len]
return est, ref
def _pre(self, model, input, cuda):
input = input[None, None, ...]
input = torch.tensor(input)
input = try_tensor_cuda(input, cuda=cuda)
sp, _, _ = model.f_helper.wav_to_spectrogram_phase(input)
mel_orig = model.mel(sp.permute(0, 1, 3, 2)).permute(0, 1, 3, 2)
# return models.to_log(sp), models.to_log(mel_orig)
return sp, mel_orig
def remove_higher_frequency(self, wav, ratio=0.95):
stft = librosa.stft(wav)
real, img = np.real(stft), np.imag(stft)
mag = (real**2 + img**2) ** 0.5
cos, sin = real / (mag + EPS), img / (mag + EPS)
spec = np.abs(stft) # [1025,T]
feature = spec.copy()
feature = np.log10(feature + EPS)
feature[feature < 0] = 0
energy_level = np.sum(feature, axis=1)
threshold = np.sum(energy_level) * ratio
curent_level, i = energy_level[0], 0
while i < energy_level.shape[0] and curent_level < threshold:
curent_level += energy_level[i + 1, ...]
i += 1
spec[i:, ...] = np.zeros_like(spec[i:, ...])
stft = spec * cos + 1j * spec * sin
return librosa.istft(stft)
@torch.no_grad()
def restore_inmem(self, wav_10k, cuda=False, mode=0, your_vocoder_func=None):
check_cuda_availability(cuda=cuda)
self._model = try_tensor_cuda(self._model, cuda=cuda)
if mode == 0:
self._model.eval()
elif mode == 1:
self._model.eval()
elif mode == 2:
self._model.train() # More effective on seriously demaged speech
res = []
seg_length = 44100 * 30
break_point = seg_length
while break_point < wav_10k.shape[0] + seg_length:
segment = wav_10k[break_point - seg_length : break_point]
if mode == 1:
segment = self.remove_higher_frequency(segment)
sp, mel_noisy = self._pre(self._model, segment, cuda)
out_model = self._model(sp, mel_noisy)
denoised_mel = from_log(out_model["mel"])
if your_vocoder_func is None:
out = self._model.vocoder(denoised_mel, cuda=cuda)
else:
out = your_vocoder_func(denoised_mel)
# unify energy
if torch.max(torch.abs(out)) > 1.0:
out = out / torch.max(torch.abs(out))
print("Warning: Exceed energy limit,", input)
# frame alignment
out, _ = self._trim_center(out, segment)
res.append(out)
break_point += seg_length
out = torch.cat(res, -1)
return tensor2numpy(out.squeeze(0))
def restore(self, input, output, cuda=False, mode=0, your_vocoder_func=None):
wav_10k = self._load_wav(input, sample_rate=44100)
out_np_wav = self.restore_inmem(
wav_10k, cuda=cuda, mode=mode, your_vocoder_func=your_vocoder_func
)
save_wave(out_np_wav, fname=output, sample_rate=44100)
|
CTFd/schemas/submissions.py | nox237/CTFd | 3,592 | 12681623 | from marshmallow import fields
from CTFd.models import Submissions, ma
from CTFd.schemas.challenges import ChallengeSchema
from CTFd.utils import string_types
class SubmissionSchema(ma.ModelSchema):
challenge = fields.Nested(ChallengeSchema, only=["name", "category", "value"])
class Meta:
model = Submissions
include_fk = True
dump_only = ("id",)
views = {
"admin": [
"provided",
"ip",
"challenge_id",
"challenge",
"user",
"team",
"date",
"type",
"id",
],
"user": ["challenge_id", "challenge", "user", "team", "date", "type", "id"],
}
def __init__(self, view=None, *args, **kwargs):
if view:
if isinstance(view, string_types):
kwargs["only"] = self.views[view]
elif isinstance(view, list):
kwargs["only"] = view
super(SubmissionSchema, self).__init__(*args, **kwargs)
|
apps/iterm.py | zachbarrow/talon_community | 125 | 12681627 | from talon.voice import Key, Context
ctx = Context("iterm", bundle="com.googlecode.iterm2")
keymap = {
"broadcaster": Key("cmd-alt-i"),
"password": Key("cmd-alt-f"),
# Pane creation and navigation
"split horizontal": Key("cmd-shift-d"),
"split vertical": Key("cmd-d"),
"pane next": Key("cmd-]"),
"pane last": Key("cmd-["),
}
ctx.keymap(keymap)
|
Blending/BP_Regression.py | Jojoxiao/Machine-Learning-for-Beginner-by-Python3 | 397 | 12681644 | #-*- coding:utf-8 -*-
# &Author AnFany
# 适用于多维输出
import numpy as np
import tensorflow as tf
'''基于TensorFlow构建训练函数'''
# 创建激活函数
def activate(input_layer, weights, biases, actfunc):
layer = tf.add(tf.matmul(input_layer, weights), biases)
if actfunc == 'relu':
return tf.nn.relu(layer)
elif actfunc == 'tanh':
return tf.nn.tanh(layer)
elif actfunc == 'sigmoid':
return tf.nn.sigmoid(layer)
# 权重初始化的方式和利用激活函数的关系很大
# sigmoid: xavir tanh: xavir relu: he
# 构建训练函数
def Ten_train(xdata, ydata, prexdata, preydata, hiddenlayers=3, hiddennodes=100, \
learn_rate=0.05, itertimes=100000, batch_size=200, activate_func='sigmoid', break_error=0.0043):
# 开始搭建神经网络
Input_Dimen = len(xdata[0])
Unit_Layers = [Input_Dimen] + [hiddennodes] * hiddenlayers + [len(ydata[0])] # 输入的维数,隐层的神经数,输出的维数1
# 创建占位符
x_data = tf.placeholder(shape=[None, Input_Dimen], dtype=tf.float32, name='x_data')
y_target = tf.placeholder(shape=[None, len(ydata[0])], dtype=tf.float32)
# 实现动态命名变量
VAR_NAME = locals()
for jj in range(hiddenlayers + 1):
VAR_NAME['weight%s' % jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,\
name='weight%s' % jj) / np.sqrt(Unit_Layers[jj]) # sigmoid tanh
# VAR_NAME['weight%s'%jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]), dtype=tf.float32,name='weight%s' % jj) \/ np.sqrt(Unit_Layers[jj] / 2) # relu
VAR_NAME['bias%s' % jj] = tf.Variable(tf.random_normal([Unit_Layers[jj + 1]], stddev=10, name='bias%s' % jj),
dtype=tf.float32)
if jj == 0:
VAR_NAME['ooutda%s' % jj] = activate(x_data, eval('weight%s' % jj), eval('bias%s' % jj), actfunc=activate_func)
else:
VAR_NAME['ooutda%s' % jj] = activate(eval('ooutda%s' % (jj - 1)), eval('weight%s' % jj), \
eval('bias%s' % jj), actfunc=activate_func)
# 均方误差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y_target - eval('ooutda%s' % (hiddenlayers))), reduction_indices=[1]))
# 优化的方法
my_opt = tf.train.AdamOptimizer(learn_rate)
train_step = my_opt.minimize(loss)
# 初始化
init = tf.global_variables_initializer()
# 存储误差的字典
accudict = {}
loss_vec = [] # 训练误差
loss_pre = [] # 验证数据误差
accunum = np.inf
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init)
for i in range(itertimes):
rand_index = np.random.choice(len(xdata), size=batch_size, replace=False)
rand_x = xdata[rand_index]
rand_y = ydata[rand_index]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: xdata, y_target: ydata})
temmp_losspre = sess.run(loss, feed_dict={x_data: prexdata, y_target: preydata})
loss_vec.append(temp_loss)
loss_pre.append(temmp_losspre)
accudict[i] = [temp_loss, temmp_losspre]
# 根据输出的误差,判断训练的情况
if (i + 1) % 20 == 0:
print('Generation: ' + str(i + 1) + '. 归一训练误差:Loss = ' + str(temp_loss) +
'. 归一验证误差:Loss = ' + str(temmp_losspre))
# 提前退出的判断
if temp_loss < break_error: # 根据经验获得此数值, 因为采用的是随机下降,因此误差在前期可能出现浮动
break
# 在所有的循环次数中,找到综合误差最低的一次,保存参数
zongheaccu = 0.01 * temp_loss + 0.99 * temmp_losspre
if zongheaccu < accunum:
accunum = zongheaccu
# 保存模型
saver.save(sess, './pm25', global_step=i) # 注意路径
sign = min(accudict.items(), key=lambda d: 0.01 * d[1][0] + 0.99 * d[1][1])[0]
# 返回训练,验证误差
xunlian_error, adderror = loss_vec[sign], loss_pre[sign]
return sign, hiddenlayers, xunlian_error, adderror
|
var/spack/repos/builtin/packages/nvptx-tools/package.py | LiamBindle/spack | 2,360 | 12681652 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NvptxTools(AutotoolsPackage):
"""nvptx-tools: A collection of tools for use with nvptx-none GCC
toolchains. These tools are necessary when building a version
of GCC that enables offloading of OpenMP/OpenACC code to NVIDIA
GPUs."""
homepage = "https://github.com/MentorEmbedded/nvptx-tools"
git = "https://github.com/MentorEmbedded/nvptx-tools"
version('2021-05-21', commit='<PASSWORD>')
version('2018-03-01', commit='<PASSWORD>')
depends_on('binutils')
depends_on('cuda')
def configure_args(self):
cuda_dir = self.spec['cuda'].prefix
config_args = [
"--with-cuda-driver-include={0}".format(cuda_dir.include),
"--with-cuda-driver-lib={0}".format(cuda_dir.lib64)
]
return config_args
|
fusesoc/provider/github.py | idex-biometrics/fusesoc | 829 | 12681660 | # Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import logging
import os.path
import sys
import tarfile
from fusesoc.provider.provider import Provider
logger = logging.getLogger(__name__)
if sys.version_info[0] >= 3:
import urllib.request as urllib
from urllib.error import URLError
else:
import urllib
from urllib2 import URLError
URL = "https://github.com/{user}/{repo}/archive/{version}.tar.gz"
class Github(Provider):
def _checkout(self, local_dir):
user = self.config.get("user")
repo = self.config.get("repo")
version = self.config.get("version", "master")
# TODO : Sanitize URL
url = URL.format(user=user, repo=repo, version=version)
logger.info(f"Downloading {user}/{repo} from github")
try:
(filename, headers) = urllib.urlretrieve(url)
except URLError as e:
raise RuntimeError(f"Failed to download '{url}'. '{e.reason}'")
t = tarfile.open(filename)
(cache_root, core) = os.path.split(local_dir)
# Ugly hack to get the first part of the directory name of the extracted files
tmp = t.getnames()[0]
t.extractall(cache_root)
os.rename(os.path.join(cache_root, tmp), os.path.join(cache_root, core))
|
clients/python/lipstick/lipstick.py | OpenEarthDemo/Lipstick | 238 | 12681671 | <filename>clients/python/lipstick/lipstick.py
import requests
from .graph import *
from .template import Template
class BaseClient(object):
def __init__(self, base_url):
if base_url.startswith("http"):
self.base_url = base_url
else:
self.base_url = "http://"+base_url
def request(self, method, path, **kwargs):
url = self.base_url+path
return requests.request(method=method, url=url, **kwargs)
def get(self, path, **kwargs):
kwargs.setdefault('allow_redirects', True)
return self.request('get', path, **kwargs)
def post(self, path, data=None, **kwargs):
return self.request('post', path, data=data, **kwargs)
def put(self, path, data=None, **kwargs):
return self.request('put', path, data=data, **kwargs)
class Client(BaseClient):
job_path = '/v1/job'
template_path = '/template'
def get(self, graph_id):
path = '%s/%s' % (Client.job_path, graph_id)
response = super(Client, self).get(path)
if (response.ok):
return graph(response.json())
else:
response.raise_for_status()
def list(self):
response = super(Client, self).get(Client.job_path)
if (response.ok):
return response.json()
else:
response.raise_for_status()
def save(self, graph):
return self.post(Client.job_path, data=graph.json(), headers={'content-type': 'application/json'})
def update(self, graph):
path = '%s/%s' % (Client.job_path, graph.id())
return self.put(path, data=graph.json())
def create_template(self, template):
path = '%s/%s' % (Client.template_path, template.name)
return self.post(path, data=template.json())
def get_template(self, name):
path = '%s/%s' % (Client.template_path, name)
response = super(Client, self).get(path)
if (response.ok):
return template(response.json())
else:
response.raise_for_status()
def list_templates(self):
response = super(Client, self).get(Client.template_path)
if (response.ok):
return response.json()
else:
response.raise_for_status()
|
NVLL/data/preprocess_yelp13_to_ptb_format.py | jennhu/vmf_vae_nlp | 159 | 12681681 | <reponame>jennhu/vmf_vae_nlp
"""
Format: [sent_bit] [w0] [w1] ...
"""
def remove_ids(fname, trunc=50):
with open(fname, 'r', errors='ignore') as fd:
lines = fd.read().splitlines()
bag = []
for l in lines:
l = l.replace(" <sssss>", "")
tokens = l.split("\t")
assert len(tokens) == 7
sent_bit = str(int(tokens[4]) - 1)
words = tokens[6]
txt = words.split(" ")
if len(txt) > trunc:
txt = txt[:trunc]
words = " ".join(txt)
seq = sent_bit + " " + words
bag.append(seq)
with open(fname[5:], 'w') as fd:
fd.write("\n".join(bag))
import os
os.chdir("../../data/yelp")
remove_ids("yelp-test.txt")
remove_ids("yelp-train.txt")
remove_ids("yelp-valid.txt")
def check_num_words(fname):
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
bag = []
for l in lines:
words = l.split(" ")[1:]
# words = words.split(" ")
bag.append(len(words))
print("{} {}".format(fname, sum(bag) / len(bag)))
check_num_words("train.txt")
check_num_words("test.txt")
check_num_words("valid.txt")
# from NVLL.util.util import Dictionary
def count(dic, fname):
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
lines = " ".join(lines)
words = lines.split(" ")
for w in words:
if w in dic:
dic[w] += 1
else:
dic[w] = 1
return dic
def reduce_vocab_sz(vocab_sz=15000):
# pad eos unk
d = {}
d = count(d, "train.txt")
d = count(d, "valid.txt")
d = count(d, "test.txt")
s = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)][:vocab_sz]
rt = []
for k, v in s:
rt.append(k)
# print(k, v)
return rt
word_list = reduce_vocab_sz()
def replace(wlist, fname):
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
new_lines = []
for l in lines:
raw_words = l.split(" ")
new_words = []
for w in raw_words:
if w in wlist:
new_words.append(w)
else:
new_words.append("<unk>")
new_lines.append(" ".join(new_words))
with open(fname, 'w') as fd:
fd.write("\n".join(new_lines))
replace(word_list, "train.txt")
replace(word_list, "valid.txt")
replace(word_list, "test.txt")
|
bauh/gems/arch/__init__.py | Flash1232/bauh | 507 | 12681718 | <gh_stars>100-1000
import os
from pathlib import Path
from bauh.api.constants import CACHE_PATH, CONFIG_PATH, TEMP_DIR
from bauh.commons import resource
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_DIR = '{}/arch'.format(TEMP_DIR)
ARCH_CACHE_PATH = CACHE_PATH + '/arch'
CATEGORIES_FILE_PATH = ARCH_CACHE_PATH + '/categories.txt'
URL_CATEGORIES_FILE = 'https://raw.githubusercontent.com/vinifmor/bauh-files/master/arch/categories.txt'
URL_GPG_SERVERS = 'https://raw.githubusercontent.com/vinifmor/bauh-files/master/arch/gpgservers.txt'
CONFIG_DIR = '{}/.config/bauh/arch'.format(str(Path.home()))
CUSTOM_MAKEPKG_FILE = '{}/makepkg.conf'.format(CONFIG_DIR)
AUR_INDEX_FILE = '{}/aur/index.txt'.format(ARCH_CACHE_PATH)
AUR_INDEX_TS_FILE = '{}/aur/index.ts'.format(ARCH_CACHE_PATH)
CONFIG_FILE = '{}/arch.yml'.format(CONFIG_PATH)
SUGGESTIONS_FILE = 'https://raw.githubusercontent.com/vinifmor/bauh-files/master/arch/aur_suggestions.txt'
UPDATES_IGNORED_FILE = '{}/updates_ignored.txt'.format(CONFIG_DIR)
EDITABLE_PKGBUILDS_FILE = '{}/aur/editable_pkgbuilds.txt'.format(CONFIG_DIR)
IGNORED_REBUILD_CHECK_FILE = '{}/aur/ignored_rebuild_check.txt'.format(CONFIG_DIR)
def get_icon_path() -> str:
return resource.get_path('img/arch.svg', ROOT_DIR)
def get_repo_icon_path() -> str:
return resource.get_path('img/repo.svg', ROOT_DIR)
|
tests/test_agent.py | PLPeeters/reppy | 137 | 12681720 | <filename>tests/test_agent.py
import unittest
from reppy.robots import Agent, Robots
class AgentTest(unittest.TestCase):
'''Tests about the Agent.'''
def parse(self, content, name):
'''Parse the robots.txt in content and return the agent of the provided name.'''
return Robots.parse('http://example.com', content).agent(name)
def test_length(self):
'''An agent knows how many directives it has.'''
agent = Agent().disallow('/path').allow('/path/')
self.assertEqual(len(agent), 2)
def test_make_allowed(self):
'''Make an agent that allows a path.'''
agent = Agent().disallow('/path').allow('/path/')
self.assertTrue(agent.allowed('/path/'))
self.assertFalse(agent.allowed('/path'))
def test_make_disallowed(self):
'''Make an agent that disallows a path.'''
agent = Agent().disallow('/path')
self.assertFalse(agent.allowed('/path'))
def test_checks_allowed(self):
'''Answers the allowed question.'''
agent = self.parse('''
User-agent: agent
Allow: /path
''', 'agent')
self.assertTrue(agent.allowed('/path'))
self.assertTrue(agent.allowed('/elsewhere'))
def test_honors_longest_first_priority(self):
'''The longest matching rule takes priority.'''
agent = self.parse('''
User-agent: agent
Disallow: /path
Allow: /path/exception
''', 'agent')
self.assertTrue(agent.allowed('/path/exception'))
self.assertFalse(agent.allowed('/path'))
def test_robots_txt_allowed(self):
'''Robots.txt is always allowed.'''
agent = self.parse('''
User-agent: agent
Disallow: /robots.txt
''', 'agent')
self.assertTrue(agent.allowed('/robots.txt'))
def test_disallow_none(self):
'''Recognizes the "Disallow:" form of "Allow: /"'''
agent = self.parse('''
User-agent: agent
Disallow:
''', 'agent')
self.assertTrue(agent.allowed('/anything'))
def test_escaped_rule(self):
'''Handles an escaped rule.'''
agent = self.parse('''
User-agent: agent
Disallow: /a%3cd.html
''', 'agent')
self.assertFalse(agent.allowed('/a<d.html'))
self.assertFalse(agent.allowed('/a%3cd.html'))
def test_unescaped_rule(self):
'''Handles an unescaped rule.'''
agent = self.parse('''
User-agent: agent
Disallow: /a<d.html
''', 'agent')
self.assertFalse(agent.allowed('/a<d.html'))
self.assertFalse(agent.allowed('/a%3cd.html'))
def test_escaped_rule_wildcard(self):
'''Handles the case where the wildcard rule is escaped.'''
agent = self.parse('''
User-agent: agent
Disallow: /a%3c*
''', 'agent')
self.assertFalse(agent.allowed('/a<d.html'))
self.assertFalse(agent.allowed('/a%3cd.html'))
def test_unescaped_rule_wildcard(self):
'''Handles the case where the wildcard rule is unescaped.'''
agent = self.parse('''
User-agent: agent
Disallow: /a<*
''', 'agent')
self.assertFalse(agent.allowed('/a<d.html'))
self.assertFalse(agent.allowed('/a%3cd.html'))
def test_accepts_full_url(self):
'''Accepts a full URL.'''
agent = self.parse('''
User-agent: agent
Disallow: /path;params?query
''', 'agent')
self.assertFalse(agent.allowed('http://exmaple.com/path;params?query'))
def test_query_only(self):
'''Recognized query-only rules.'''
agent = self.parse('''
User-agent: agent
Disallow: /?
''', 'agent')
self.assertFalse(agent.allowed('/?'))
self.assertTrue(agent.allowed('/'))
def test_params_only(self):
'''Recognized params-only rules.'''
agent = self.parse('''
User-agent: agent
Disallow: /;
''', 'agent')
self.assertFalse(agent.allowed('/;'))
self.assertTrue(agent.allowed('/'))
def test_str(self):
'''str() shows directives.'''
agent = self.parse('''
User-agent: agent
Disallow: /
''', 'agent')
self.assertEquals(str(agent), '[Directive(Disallow: /)]')
def test_str_crawl_delay(self):
'''str() shows crawl-delay.'''
agent = self.parse('''
User-agent: agent
Crawl-Delay: 1
Disallow: /
''', 'agent')
self.assertEquals(
str(agent), 'Crawl-Delay: 1 [Directive(Disallow: /)]')
|
mmflow/core/hooks/multistagelr_updater.py | ArlenCHEN/mmflow | 481 | 12681721 | <gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Sequence
from mmcv.runner import HOOKS, IterBasedRunner, LrUpdaterHook
@HOOKS.register_module()
class MultiStageLrUpdaterHook(LrUpdaterHook):
"""Multi-Stage Learning Rate Hook.
Args:
milestone_lrs (Sequence[float]): The base LR for multi-stages.
milestone_iters (Sequence[int]): The first iterations in different
stages.
steps (Sequence[Sequence[int]]): The steps to decay the LR in stages.
gammas (Sequence[float]): The list of decay LR ratios.
kwargs (any): The arguments of LrUpdaterHook.
"""
def __init__(self, milestone_lrs: Sequence[float],
milestone_iters: Sequence[int],
steps: Sequence[Sequence[int]], gammas: Sequence[float],
**kwargs: Any) -> None:
assert len(milestone_lrs) == len(milestone_iters) == len(steps) == len(
gammas
), ('For MultiStageLr, lengths of milestones_lr and steps and gammas',
f'must be equal, but got {len(milestone_lrs)}, ',
f'{len(milestone_iters)}, {len(steps)}, and {len(gammas)}')
for i in range(len(milestone_iters)):
assert milestone_iters[i] < steps[i][0], (
'miltestone step must be, '
'less than step')
self.milestone_lrs = milestone_lrs
self.steps = steps
self.gammas = gammas
self.milestone_iters = milestone_iters
super().__init__(**kwargs)
def get_lr(self, runner: IterBasedRunner, base_lr: float) -> float:
"""Get current LR.
Args:
runner (IterBasedRunner): The runner to control the training
workflow.
base_lr (float): The base LR in training workflow.
Returns:
float: The current LR.
"""
progress = runner.epoch if self.by_epoch else runner.iter
if progress < self.milestone_iters[0]:
return base_lr
milestone = -1
for i, milestone_iter in enumerate(self.milestone_iters[1:]):
if progress < milestone_iter:
milestone = i
break
exp = len(self.steps[milestone])
for ii, s in enumerate(self.steps[milestone]):
if progress < s:
exp = ii
break
lr = self.milestone_lrs[milestone] * (self.gammas[milestone]**exp)
return lr
|
python/boxdraw_test.py | nino/vim-boxdraw | 165 | 12681773 | from boxdraw import *
from pprint import pprint
# --------- Test utilities --------
def assert_cmd(cmd, cur1, cur2, lines, *args):
assert len(lines) % 2 == 0
input_lines = [lines[i] for i in range(0, len(lines), 2)]
expected = [lines[i] for i in range(1, len(lines), 2)]
# Determine coordinates from '1' / '2' strings
y1 = [i for i in range(len(input_lines)) if '1' in input_lines[i]][0]
y2 = [i for i in range(len(input_lines)) if '2' in input_lines[i]][0]
x1 = [l.index('1') for l in input_lines if '1' in l][0]
x2 = [l.index('2') for l in input_lines if '2' in l][0]
input_lines = [l.replace('1',cur1).replace('2',cur2) for l in input_lines]
if callable(cmd):
actual = list(cmd(input_lines, y1, x1, y2, x2, *args))
else:
actual = list(run_command(cmd, input_lines, y1, x1, y2, x2, *args))
if expected != actual:
print("Expected:")
pprint(expected, width=1)
print("Actual:")
pprint(actual, width=1)
assert expected == actual
# -------- Utility functions --------
def test_expand_line():
assert expand_line('', 0) == ''
assert expand_line('\n', 0) == '\n'
assert expand_line('xx', 1) == 'xx'
assert expand_line('xx\n', 1) == 'xx\n'
assert expand_line('xxx\n', 6) == 'xxx \n'
assert expand_line('xxx', 6) == 'xxx '
def test_replace_at():
assert replace_at('----', 0, 'xx') == 'xx--'
assert replace_at('----', 1, 'xx') == '-xx-'
assert replace_at('----', 3, 'xx') == '---xx'
assert replace_at('----', 3, 'xxx') == '---xxx'
assert replace_at('----', 3, 'xx\n') == '---xx\n'
def test_overwrite_at():
assert overwrite_at('----', 0, 'x x ') == 'x-x-'
assert overwrite_at('----', 1, 'x x ') == '-x-x'
assert overwrite_at('----', 2, 'x x ') == '--x-x'
assert overwrite_at('----', 3, 'x x ') == '---x x'
assert overwrite_at('----\n', 3, 'x x ') == '---x x\n'
assert overwrite_at('---', 0, ' | ') == '-+-'
def test_replace_block():
lines = [
'foo',
'bar',
'b',
]
block = [
'1234',
'5678',
]
assert list(replace_block(lines, 1, 2, block)) == [
'foo',
'ba1234',
'b 5678',
]
def test_line():
assert line('<->', 0) == ''
assert line('<->', 1) == '<'
assert line('<->', 2) == '<-'
assert line('<->', 3) == '<->'
assert line('<->', 4) == '<-->'
assert line('<->', 5) == '<--->'
assert line([['+---+'], ['| |'], ['+---+']], 4) == [
'+---+',
'| |',
'| |',
'+---+',
]
# -------- Box drawing --------
def test_basic_box_drawing():
assert_cmd('+o', '.', '.', [
'........', '........',
'..1.....', '..+---+.',
'........', '..| |.',
'......2.', '..+---+.',
'........', '........',
])
def test_box_drawing_after_line_end():
assert_cmd('+o' ,'.', '.', [
'........', '........',
'..1.' , '..+---+',
'' , ' | |',
'......2' , '..+---+',
])
def test_fill_box_alignments():
assert_cmd('+{[c', ' ', ' ', [
'+------------+', '+------------+',
'|1...........|', '|This is a |',
'|....FOO.....|', '|test. |',
'|............|', '| |',
'|...........2|', '| |',
'+------------+', '+------------+',
], 'This is a test.')
assert_cmd('+{c', ' ', ' ', [
'+------------+', '+------------+',
'|1...........|', '| This is a |',
'|....FOO.....|', '| test. |',
'|............|', '| |',
'|...........2|', '| |',
'+------------+', '+------------+',
], 'This is a test.')
assert_cmd('+{]c', ' ', ' ', [
'+------------+', '+------------+',
'|1...........|', '| This is a|',
'|....FOO.....|', '| test.|',
'|............|', '| |',
'|...........2|', '| |',
'+------------+', '+------------+',
], 'This is a test.')
assert_cmd('+c', ' ', ' ', [
'+------------+', '+------------+',
'|1...........|', '| |',
'|....FOO.....|', '| This is a |',
'|............|', '| test. |',
'|...........2|', '| |',
'+------------+', '+------------+',
], 'This is a test.')
assert_cmd('+}]c', ' ', ' ', [
'+------------+', '+------------+',
'|1...........|', '| |',
'|....FOO.....|', '| |',
'|............|', '| This is a|',
'|...........2|', '| test.|',
'+------------+', '+------------+',
], 'This is a test.')
def test_fill_box_too_small():
assert_cmd('+{[c', ' ', ' ', [
'+-----+', '+-----+',
'|1 |', '|not |',
'| 2|', '|enoug|',
'+-----+', '+-----+',
], 'not enough space')
assert_cmd('+{[c', ' ', ' ', [
'+-+', '+-+',
'|1|', '|n|',
'|.|', '|e|',
'|2|', '|s|',
'+-+', '+-+',
], 'not enough space')
def test_draw_box_with_label():
assert_cmd('+O', '.', '.', [
'.........', '.........',
'.1.......', '.+-----+.',
'.........', '.| foo |.',
'.........', '.| bar |.',
'.......2.', '.+-----+.',
'.........', '.........',
], 'foo bar')
# -------- Line drawing --------
def test_arrow_reverse():
assert arrow_reverse('---') == '---'
assert arrow_reverse('<->') == '<->'
assert arrow_reverse('-->') == '<--'
assert arrow_reverse('<--') == '-->'
def test_draw_ling_hv():
assert_cmd(draw_line_hv, ' ', ' ', [
' 1 2 ', ' o----> ',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' 2 1 ', ' <----o ',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' ', ' ',
'1', 'o',
' ', '|',
' ', '|',
'2', 'v',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' ', ' ',
'2', '^',
' ', '|',
' ', '|',
'1', 'o',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' ', ' ',
' 1 ', ' o----+ ',
' ', ' | ',
' 2 ', ' v ',
' ', ' ',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' ', ' ',
' 2 ', ' ^ ',
' ', ' | ',
' 1 ', ' +----o ',
' ', ' ',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' ', ' ',
' 1 ', ' +----o ',
' ', ' | ',
' 2 ', ' v ',
' ', ' ',
], 'o->')
assert_cmd(draw_line_hv, ' ', ' ', [
' ', ' ',
' 2 ', ' ^ ',
' ', ' | ',
' 1 ', ' o----+ ',
' ', ' ',
], 'o->')
def test_draw_ling_vh():
assert_cmd(draw_line_vh, ' ', ' ', [
' 1 2 ', ' o----> ',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' 2 1 ', ' <----o ',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' ', ' ',
'1', 'o',
' ', '|',
' ', '|',
'2', 'v',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' ', ' ',
'2', '^',
' ', '|',
' ', '|',
'1', 'o',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' ', ' ',
' 1 ', ' o ',
' ', ' | ',
' 2 ', ' +----> ',
' ', ' ',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' ', ' ',
' 2 ', ' <----+ ',
' ', ' | ',
' 1 ', ' o ',
' ', ' ',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' ', ' ',
' 1 ', ' o ',
' ', ' | ',
' 2 ', ' <----+ ',
' ', ' ',
], 'o->')
assert_cmd(draw_line_vh, ' ', ' ', [
' ', ' ',
' 2 ', ' +----> ',
' ', ' | ',
' 1 ', ' o ',
' ', ' ',
], 'o->')
def test_line_plus_connections():
assert_cmd(draw_line_vh, '-', ' ', [
' |', ' |',
' 2|', ' +---->|',
' |', ' | |',
'-1- ', '-+- ',
' ', ' ',
], '-->')
assert_cmd(draw_line_vh, '-', '|', [
' |', ' |',
' 2', ' +----->',
' |', ' | |',
'-1- ', '-+- ',
' ', ' ',
], '-->')
assert_cmd(draw_line_vh, '-', '|', [
' |', ' |',
' 2', ' +-----+',
' |', ' | |',
'-1- ', '-+- ',
' ', ' ',
], '---')
# -------- Selection --------
def test_select_outer_box():
sel = select_outer_box([
' ',
' +-------+ ',
' | # | ',
' +-------+ ',
' ',
' ',
], 2, 5, 2, 5)
assert sel == ["1,3,3,11"]
def test_select_inner_box():
sel = select_inner_box([
' ',
' +-------+ ',
' | # | ',
' +-------+ ',
' ',
' ',
], 2, 5, 2, 5)
assert sel == ["2,4,2,10"]
|
egs2/dirha_wsj/asr1/local/prepare_dirha_ir.py | texpomru13/espnet | 5,053 | 12681777 | <reponame>texpomru13/espnet
#!/usr/bin/env python3
import argparse
from pathlib import Path
from typing import Optional
import resampy
import scipy.io
import soundfile
def prepare(
dirha_dir: str,
fs: int,
audio_dir: str,
data_dir: Optional[str],
audio_format: str = "flac",
):
dirha_dir = Path(dirha_dir)
audio_dir = Path(audio_dir)
if data_dir is not None:
data_dir = Path(data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
fscp = (data_dir / "wav.scp").open("w")
else:
fscp = None
for m in dirha_dir.glob("**/*.mat"):
# FIXME(kamo): Using all IRs here regardless of mic type
if m.stem == "ref-chirp":
continue
# (Time, Channel) or (Channel, Time)
x = scipy.io.loadmat(m)["risp_imp"]
if x.shape[0] == 1:
x = x[0]
elif x.shape[1] == 1:
x = x[:, 0]
else:
raise RuntimeError(m, x.shape)
# 48khz impulse response
r = 48000
if r != fs:
x = resampy.resample(x, r, fs, axis=0)
# Rescale
x = 0.95 * x / max(abs(x))
owav = audio_dir / m.parent.name / f"{m.stem}.{audio_format}"
owav.parent.mkdir(parents=True, exist_ok=True)
soundfile.write(owav, x, fs)
if fscp is not None:
rid = f"{m.parent.name}_{m.stem}"
fscp.write(f"{rid} {owav}\n")
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Prepare Dirha WSJ data",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--dirha_dir", required=True, help="Input directory")
parser.add_argument("--audio_dir", required=True, help="Output directory")
parser.add_argument("--data_dir", help="Output directory")
parser.add_argument("--audio_format", default="flac")
parser.add_argument("--fs", type=int, default=16000)
return parser
def main(cmd=None):
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
prepare(**kwargs)
if __name__ == "__main__":
main()
|
boost_adaptbx/tests/tst_string_representation.py | dperl-sol/cctbx_project | 155 | 12681815 | from __future__ import absolute_import, division, print_function
from six.moves import range
def exercise():
import boost_adaptbx.boost.python as bp
csr = bp.ext.string_representation
from libtbx.str_utils import py_string_representation as psr
for sr in [csr, psr]:
assert sr("a", '"', "'") == '"a"'
assert sr("b", "'", '"') == "'b'"
def check(s):
c = csr(s, '"', "'")
p = psr(s, '"', "'")
assert c == p
r = eval(c)
assert r == s
iset = list(range(130)) + list(range(250,256))
for i in iset:
s = chr(i)
check(s)
for j in iset:
t = s + chr(j)
check(t)
def run(args):
assert args in [[], ["--forever"]]
while True:
exercise()
if (len(args) == 0):
break
print("OK")
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
|
tests/test_data/testpkgs/pkg1/sub/__main__.py | int19h/ptvsd | 349 | 12681822 | <reponame>int19h/ptvsd
from debug_me import backchannel
backchannel.send("ok")
|
pyhanko_tests/layout_test_utils.py | peteris-zealid/pyHanko | 161 | 12681824 | <reponame>peteris-zealid/pyHanko
import logging
import os
import subprocess
import tempfile
import pytest
__all__ = ['with_layout_comparison', 'compare_output']
from pyhanko.pdf_utils.writer import BasePdfFileWriter
logger = logging.getLogger(__name__)
SKIP_LAYOUT = False
SKIP_LAYOUT_REASON = "pdftoppm or compare tool path not specified"
pdftoppm_path = os.environ.get('PDFTOPPM_PATH', None)
compare_path = os.environ.get('IM_COMPARE_PATH', None)
if not pdftoppm_path or not compare_path:
logger.warning(f"Skipping layout tests --- {SKIP_LAYOUT_REASON}")
SKIP_LAYOUT = True
with_layout_comparison = pytest.mark.skipif(
SKIP_LAYOUT, reason=SKIP_LAYOUT_REASON
)
def _render_pdf(pdf_file, out_file_prefix):
# render the first page of a PDF to PNG file using pdftoppm
result = subprocess.run(
[pdftoppm_path, '-singlefile', '-png', pdf_file, out_file_prefix]
)
if result.returncode != 0:
raise RuntimeError(
f"Failed to convert {pdf_file} to {out_file_prefix}.png using "
f"pdftoppm (executable: {pdftoppm_path})."
)
return f"{out_file_prefix}.png"
def compare_output(writer: BasePdfFileWriter, expected_output_path):
with tempfile.TemporaryDirectory() as working_dir:
output_path = os.path.join(working_dir, 'output.pdf')
with open(output_path, 'wb') as outf:
writer.write(outf)
expected_png = _render_pdf(
expected_output_path, os.path.join(working_dir, 'expected')
)
actual_png = _render_pdf(
output_path, os.path.join(working_dir, 'actual')
)
result = subprocess.run(
# use the Absolute Error metric, since it's a single number
# and hence very easy to process
[
compare_path, '-metric', 'ae',
expected_png, actual_png, os.path.join(working_dir, 'diff.png')
],
capture_output=True
)
# TODO maintain a directory of failed test outputs?
if result.stderr != b'0':
raise RuntimeError(
f"Output compare test failed --- absolute error: "
f"{result.stderr.decode('utf8')}"
)
|
src/borg/patterns.py | phil294/borg | 8,680 | 12681834 | import argparse
import fnmatch
import os.path
import re
import sys
import unicodedata
from collections import namedtuple
from enum import Enum
from . import shellpattern
from .helpers import clean_lines
from .helpers.errors import Error
def parse_patternfile_line(line, roots, ie_commands, fallback):
"""Parse a pattern-file line and act depending on which command it represents."""
ie_command = parse_inclexcl_command(line, fallback=fallback)
if ie_command.cmd is IECommand.RootPath:
roots.append(ie_command.val)
elif ie_command.cmd is IECommand.PatternStyle:
fallback = ie_command.val
else:
# it is some kind of include/exclude command
ie_commands.append(ie_command)
return fallback
def load_pattern_file(fileobj, roots, ie_commands, fallback=None):
if fallback is None:
fallback = ShellPattern # ShellPattern is defined later in this module
for line in clean_lines(fileobj):
fallback = parse_patternfile_line(line, roots, ie_commands, fallback)
def load_exclude_file(fileobj, patterns):
for patternstr in clean_lines(fileobj):
patterns.append(parse_exclude_pattern(patternstr))
class ArgparsePatternAction(argparse.Action):
def __init__(self, nargs=1, **kw):
super().__init__(nargs=nargs, **kw)
def __call__(self, parser, args, values, option_string=None):
parse_patternfile_line(values[0], args.paths, args.patterns, ShellPattern)
class ArgparsePatternFileAction(argparse.Action):
def __init__(self, nargs=1, **kw):
super().__init__(nargs=nargs, **kw)
def __call__(self, parser, args, values, option_string=None):
"""Load and parse patterns from a file.
Lines empty or starting with '#' after stripping whitespace on both line ends are ignored.
"""
filename = values[0]
try:
with open(filename) as f:
self.parse(f, args)
except FileNotFoundError as e:
raise Error(str(e))
def parse(self, fobj, args):
load_pattern_file(fobj, args.paths, args.patterns)
class ArgparseExcludeFileAction(ArgparsePatternFileAction):
def parse(self, fobj, args):
load_exclude_file(fobj, args.patterns)
class PatternMatcher:
"""Represents a collection of pattern objects to match paths against.
*fallback* is a boolean value that *match()* returns if no matching patterns are found.
"""
def __init__(self, fallback=None):
self._items = []
# Value to return from match function when none of the patterns match.
self.fallback = fallback
# optimizations
self._path_full_patterns = {} # full path -> return value
# indicates whether the last match() call ended on a pattern for which
# we should recurse into any matching folder. Will be set to True or
# False when calling match().
self.recurse_dir = None
# whether to recurse into directories when no match is found
# TODO: allow modification as a config option?
self.recurse_dir_default = True
self.include_patterns = []
# TODO: move this info to parse_inclexcl_command and store in PatternBase subclass?
self.is_include_cmd = {
IECommand.Exclude: False,
IECommand.ExcludeNoRecurse: False,
IECommand.Include: True
}
def empty(self):
return not len(self._items) and not len(self._path_full_patterns)
def _add(self, pattern, cmd):
"""*cmd* is an IECommand value.
"""
if isinstance(pattern, PathFullPattern):
key = pattern.pattern # full, normalized path
self._path_full_patterns[key] = cmd
else:
self._items.append((pattern, cmd))
def add(self, patterns, cmd):
"""Add list of patterns to internal list. *cmd* indicates whether the
pattern is an include/exclude pattern, and whether recursion should be
done on excluded folders.
"""
for pattern in patterns:
self._add(pattern, cmd)
def add_includepaths(self, include_paths):
"""Used to add inclusion-paths from args.paths (from commandline).
"""
include_patterns = [parse_pattern(p, PathPrefixPattern) for p in include_paths]
self.add(include_patterns, IECommand.Include)
self.fallback = not include_patterns
self.include_patterns = include_patterns
def get_unmatched_include_patterns(self):
"Note that this only returns patterns added via *add_includepaths*."
return [p for p in self.include_patterns if p.match_count == 0]
def add_inclexcl(self, patterns):
"""Add list of patterns (of type CmdTuple) to internal list.
"""
for pattern, cmd in patterns:
self._add(pattern, cmd)
def match(self, path):
"""Return True or False depending on whether *path* is matched.
If no match is found among the patterns in this matcher, then the value
in self.fallback is returned (defaults to None).
"""
path = normalize_path(path).lstrip(os.path.sep)
# do a fast lookup for full path matches (note: we do not count such matches):
non_existent = object()
value = self._path_full_patterns.get(path, non_existent)
if value is not non_existent:
# we have a full path match!
self.recurse_dir = command_recurses_dir(value)
return self.is_include_cmd[value]
# this is the slow way, if we have many patterns in self._items:
for (pattern, cmd) in self._items:
if pattern.match(path, normalize=False):
self.recurse_dir = pattern.recurse_dir
return self.is_include_cmd[cmd]
# by default we will recurse if there is no match
self.recurse_dir = self.recurse_dir_default
return self.fallback
def normalize_path(path):
"""normalize paths for MacOS (but do nothing on other platforms)"""
# HFS+ converts paths to a canonical form, so users shouldn't be required to enter an exact match.
# Windows and Unix filesystems allow different forms, so users always have to enter an exact match.
return unicodedata.normalize('NFD', path) if sys.platform == 'darwin' else path
class PatternBase:
"""Shared logic for inclusion/exclusion patterns.
"""
PREFIX = NotImplemented
def __init__(self, pattern, recurse_dir=False):
self.pattern_orig = pattern
self.match_count = 0
pattern = normalize_path(pattern)
self._prepare(pattern)
self.recurse_dir = recurse_dir
def match(self, path, normalize=True):
"""Return a boolean indicating whether *path* is matched by this pattern.
If normalize is True (default), the path will get normalized using normalize_path(),
otherwise it is assumed that it already is normalized using that function.
"""
if normalize:
path = normalize_path(path)
matches = self._match(path)
if matches:
self.match_count += 1
return matches
def __repr__(self):
return '%s(%s)' % (type(self), self.pattern)
def __str__(self):
return self.pattern_orig
def _prepare(self, pattern):
"Should set the value of self.pattern"
raise NotImplementedError
def _match(self, path):
raise NotImplementedError
class PathFullPattern(PatternBase):
"""Full match of a path."""
PREFIX = "pf"
def _prepare(self, pattern):
self.pattern = os.path.normpath(pattern).lstrip(os.path.sep) # sep at beginning is removed
def _match(self, path):
return path == self.pattern
# For PathPrefixPattern, FnmatchPattern and ShellPattern, we require that the pattern either match the whole path
# or an initial segment of the path up to but not including a path separator. To unify the two cases, we add a path
# separator to the end of the path before matching.
class PathPrefixPattern(PatternBase):
"""Literal files or directories listed on the command line
for some operations (e.g. extract, but not create).
If a directory is specified, all paths that start with that
path match as well. A trailing slash makes no difference.
"""
PREFIX = "pp"
def _prepare(self, pattern):
sep = os.path.sep
self.pattern = (os.path.normpath(pattern).rstrip(sep) + sep).lstrip(sep) # sep at beginning is removed
def _match(self, path):
return (path + os.path.sep).startswith(self.pattern)
class FnmatchPattern(PatternBase):
"""Shell glob patterns to exclude. A trailing slash means to
exclude the contents of a directory, but not the directory itself.
"""
PREFIX = "fm"
def _prepare(self, pattern):
if pattern.endswith(os.path.sep):
pattern = os.path.normpath(pattern).rstrip(os.path.sep) + os.path.sep + '*' + os.path.sep
else:
pattern = os.path.normpath(pattern) + os.path.sep + '*'
self.pattern = pattern.lstrip(os.path.sep) # sep at beginning is removed
# fnmatch and re.match both cache compiled regular expressions.
# Nevertheless, this is about 10 times faster.
self.regex = re.compile(fnmatch.translate(self.pattern))
def _match(self, path):
return (self.regex.match(path + os.path.sep) is not None)
class ShellPattern(PatternBase):
"""Shell glob patterns to exclude. A trailing slash means to
exclude the contents of a directory, but not the directory itself.
"""
PREFIX = "sh"
def _prepare(self, pattern):
sep = os.path.sep
if pattern.endswith(sep):
pattern = os.path.normpath(pattern).rstrip(sep) + sep + "**" + sep + "*" + sep
else:
pattern = os.path.normpath(pattern) + sep + "**" + sep + "*"
self.pattern = pattern.lstrip(sep) # sep at beginning is removed
self.regex = re.compile(shellpattern.translate(self.pattern))
def _match(self, path):
return (self.regex.match(path + os.path.sep) is not None)
class RegexPattern(PatternBase):
"""Regular expression to exclude.
"""
PREFIX = "re"
def _prepare(self, pattern):
self.pattern = pattern # sep at beginning is NOT removed
self.regex = re.compile(pattern)
def _match(self, path):
# Normalize path separators
if os.path.sep != '/':
path = path.replace(os.path.sep, '/')
return (self.regex.search(path) is not None)
_PATTERN_CLASSES = {
FnmatchPattern,
PathFullPattern,
PathPrefixPattern,
RegexPattern,
ShellPattern,
}
_PATTERN_CLASS_BY_PREFIX = dict((i.PREFIX, i) for i in _PATTERN_CLASSES)
CmdTuple = namedtuple('CmdTuple', 'val cmd')
class IECommand(Enum):
"""A command that an InclExcl file line can represent.
"""
RootPath = 1
PatternStyle = 2
Include = 3
Exclude = 4
ExcludeNoRecurse = 5
def command_recurses_dir(cmd):
# TODO?: raise error or return None if *cmd* is RootPath or PatternStyle
return cmd not in [IECommand.ExcludeNoRecurse]
def get_pattern_class(prefix):
try:
return _PATTERN_CLASS_BY_PREFIX[prefix]
except KeyError:
raise ValueError("Unknown pattern style: {}".format(prefix)) from None
def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):
"""Read pattern from string and return an instance of the appropriate implementation class.
"""
if len(pattern) > 2 and pattern[2] == ":" and pattern[:2].isalnum():
(style, pattern) = (pattern[:2], pattern[3:])
cls = get_pattern_class(style)
else:
cls = fallback
return cls(pattern, recurse_dir)
def parse_exclude_pattern(pattern_str, fallback=FnmatchPattern):
"""Read pattern from string and return an instance of the appropriate implementation class.
"""
epattern_obj = parse_pattern(pattern_str, fallback, recurse_dir=False)
return CmdTuple(epattern_obj, IECommand.ExcludeNoRecurse)
def parse_inclexcl_command(cmd_line_str, fallback=ShellPattern):
"""Read a --patterns-from command from string and return a CmdTuple object."""
cmd_prefix_map = {
'-': IECommand.Exclude,
'!': IECommand.ExcludeNoRecurse,
'+': IECommand.Include,
'R': IECommand.RootPath,
'r': IECommand.RootPath,
'P': IECommand.PatternStyle,
'p': IECommand.PatternStyle,
}
if not cmd_line_str:
raise argparse.ArgumentTypeError("A pattern/command must not be empty.")
cmd = cmd_prefix_map.get(cmd_line_str[0])
if cmd is None:
raise argparse.ArgumentTypeError("A pattern/command must start with anyone of: %s" %
', '.join(cmd_prefix_map))
# remaining text on command-line following the command character
remainder_str = cmd_line_str[1:].lstrip()
if not remainder_str:
raise argparse.ArgumentTypeError("A pattern/command must have a value part.")
if cmd is IECommand.RootPath:
# TODO: validate string?
val = remainder_str
elif cmd is IECommand.PatternStyle:
# then remainder_str is something like 're' or 'sh'
try:
val = get_pattern_class(remainder_str)
except ValueError:
raise argparse.ArgumentTypeError("Invalid pattern style: {}".format(remainder_str))
else:
# determine recurse_dir based on command type
recurse_dir = command_recurses_dir(cmd)
val = parse_pattern(remainder_str, fallback, recurse_dir)
return CmdTuple(val, cmd)
|
tutorial/example_repo/jobs/hive_jobs.py | DotModus/pinball | 1,143 | 12681859 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pinball_ext.job.hive_jobs import HiveJob
class ShowTableHiveJob(HiveJob):
_QUERY_TEMPLATE = """
SHOW TABLES;
"""
class RandomUsersHiveJob(HiveJob):
_QUERY_TEMPLATE = """
SELECT *
FROM pop_names
WHERE dt < '%(end_date)s'
"""
def _complete(self):
super(RandomUsersHiveJob, self)._complete()
print "job stdout:\n"
print self._job_output
|
packages/jet_bridge_base/jet_bridge_base/paginators/page_number.py | KrunchMuffin/jet-bridge | 1,247 | 12681864 | <filename>packages/jet_bridge_base/jet_bridge_base/paginators/page_number.py
from collections import OrderedDict
import math
from jet_bridge_base.exceptions.missing_argument_error import MissingArgumentError
from jet_bridge_base.paginators.pagination import Pagination
from jet_bridge_base.responses.json import JSONResponse
from jet_bridge_base.utils.http import replace_query_param, remove_query_param
from jet_bridge_base.utils.queryset import queryset_count_optimized
class PageNumberPagination(Pagination):
default_page_size = 25
page_query_param = 'page'
page_size_query_param = '_per_page'
max_page_size = 10000
count = None
page_number = None
page_size = None
handler = None
def paginate_queryset(self, request, queryset, handler):
page_number = self.get_page_number(request, handler)
if not page_number:
return None
page_size = self.get_page_size(request, handler)
if not page_size:
return None
self.count = queryset_count_optimized(request, queryset)
self.page_number = page_number
self.page_size = page_size
self.handler = handler
return queryset.offset((page_number - 1) * page_size).limit(page_size)
def get_pages_count(self):
return int(math.ceil(self.count / self.page_size))
def get_paginated_response(self, request, data):
return JSONResponse(OrderedDict([
('count', self.count),
('next', self.get_next_link(request)),
('previous', self.get_previous_link(request)),
('results', data),
('num_pages', self.get_pages_count()),
('per_page', self.page_size),
]))
def get_page_number(self, request, handler):
try:
result = int(request.get_argument(self.page_query_param))
return max(result, 1)
except (MissingArgumentError, ValueError):
return 1
def get_page_size(self, request, handler):
if self.page_size_query_param:
try:
result = int(request.get_argument(self.page_size_query_param))
result = max(result, 1)
if self.max_page_size:
result = min(result, self.max_page_size)
return result
except (MissingArgumentError, ValueError):
pass
return self.default_page_size
def has_next(self):
return self.page_number < self.get_pages_count()
def has_previous(self):
return self.page_number > 1
def next_page_number(self):
return self.page_number + 1
def previous_page_number(self):
return self.page_number - 1
def get_next_link(self, request):
if not self.has_next():
return None
url = request.full_url()
page_number = self.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self, request):
if not self.has_previous():
return None
url = request.full_url()
page_number = self.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
|
unitest/common.py | HyeongminMoon/PatrickStar | 494 | 12681877 | # BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import time
import torch
from torch.multiprocessing import Process
# Worker timeout *after* the first worker has completed.
UNIT_WORKER_TIMEOUT = 120
def distributed_test(world_size=2, backend="nccl", use_fake_dist=False):
r"""A decorator for executing a function (e.g., a unit test) in a distributed manner.
This decorator manages the spawning and joining of processes, initialization of
torch.distributed, and catching of errors.
Usage example:
@distributed_test(worker_size=[2,3])
def my_test():
rank = dist.get_rank()
world_size = dist.get_world_size()
assert(rank < world_size)
Args:
world_size (int or list): number of ranks to spawn. Can be a list to spawn
multiple tests.
"""
def dist_wrap(run_func):
"""Second-level decorator for dist_test. This actually wraps the function."""
def dist_init(local_rank, num_procs, *func_args, **func_kwargs):
"""Initialize torch.distributed and execute the user function."""
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "29503"
os.environ["LOCAL_RANK"] = str(local_rank)
# NOTE: unit tests don't support multi-node so local_rank == global rank
os.environ["RANK"] = str(local_rank)
os.environ["WORLD_SIZE"] = str(num_procs)
torch.distributed.init_process_group(backend=backend)
if torch.cuda.is_available():
if use_fake_dist:
torch.cuda.set_device(0)
else:
torch.cuda.set_device(local_rank)
run_func(*func_args, **func_kwargs)
def dist_launcher(num_procs, *func_args, **func_kwargs):
r"""Launch processes and gracefully handle failures."""
# Spawn all workers on subprocesses.
processes = []
for local_rank in range(num_procs):
p = Process(
target=dist_init,
args=(local_rank, num_procs, *func_args),
kwargs=func_kwargs,
)
p.start()
processes.append(p)
# Now loop and wait for a test to complete. The spin-wait here isn't a big
# deal because the number of processes will be O(#GPUs) << O(#CPUs).
any_done = False
while not any_done:
for p in processes:
if not p.is_alive():
any_done = True
break
# Wait for all other processes to complete
for p in processes:
p.join(UNIT_WORKER_TIMEOUT)
failed = [(rank, p) for rank, p in enumerate(processes) if p.exitcode != 0]
for _, p in failed:
# If it still hasn't terminated, kill it because it hung.
if p.exitcode is None:
p.terminate()
if p.exitcode != 0:
p.terminate()
def run_func_decorator(*func_args, **func_kwargs):
r"""Entry point for @distributed_test()."""
if isinstance(world_size, int):
dist_launcher(world_size, *func_args, **func_kwargs)
elif isinstance(world_size, list):
for procs in world_size:
dist_launcher(procs, *func_args, **func_kwargs)
time.sleep(0.5)
else:
raise TypeError("world_size must be an integer or a list of integers.")
return run_func_decorator
return dist_wrap
|
PC/layout/support/catalog.py | shawwn/cpython | 52,316 | 12681883 | <gh_stars>1000+
"""
File generation for catalog signing non-binary contents.
"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "3.8"
import sys
__all__ = ["PYTHON_CAT_NAME", "PYTHON_CDF_NAME"]
def public(f):
__all__.append(f.__name__)
return f
PYTHON_CAT_NAME = "python.cat"
PYTHON_CDF_NAME = "python.cdf"
CATALOG_TEMPLATE = r"""[CatalogHeader]
Name={target.stem}.cat
ResultDir={target.parent}
PublicVersion=1
CatalogVersion=2
HashAlgorithms=SHA256
PageHashes=false
EncodingType=
[CatalogFiles]
"""
def can_sign(file):
return file.is_file() and file.stat().st_size
@public
def write_catalog(target, files):
with target.open("w", encoding="utf-8") as cat:
cat.write(CATALOG_TEMPLATE.format(target=target))
cat.writelines("<HASH>{}={}\n".format(n, f) for n, f in files if can_sign(f))
|
HetSANN_MR/utils/layers.py | xhhszc/hetsann | 116 | 12681890 | import numpy as np
import tensorflow as tf
conv1d = tf.layers.conv1d
def attn_head(seq, out_sz, bias_mat, activation, in_drop=0.0, coef_drop=0.0, residual=False):
with tf.name_scope('my_attn'):
if in_drop != 0.0:
seq = tf.nn.dropout(seq, 1.0 - in_drop)
seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False)
# simplest self-attention possible
f_1 = tf.layers.conv1d(seq_fts, 1, 1)
f_2 = tf.layers.conv1d(seq_fts, 1, 1)
logits = f_1 + tf.transpose(f_2, [0, 2, 1])
coefs = tf.nn.softmax(tf.nn.leaky_relu(logits) + bias_mat)
if coef_drop != 0.0:
coefs = tf.nn.dropout(coefs, 1.0 - coef_drop)
if in_drop != 0.0:
seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop)
vals = tf.matmul(coefs, seq_fts)
ret = tf.contrib.layers.bias_add(vals)
# residual connection
if residual:
if seq.shape[-1] != ret.shape[-1]:
ret = ret + conv1d(seq, ret.shape[-1], 1) # activation
else:
ret = ret + seq
return activation(ret) # activation
# Experimental sparse attention head (for running on datasets such as Pubmed)
# N.B. Because of limitations of current TF implementation, will work _only_ if batch_size = 1!
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False):
with tf.name_scope('sp_attn'):
if in_drop != 0.0:
seq = tf.nn.dropout(seq, 1.0 - in_drop)
seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False)
# simplest self-attention possible
f_1 = tf.layers.conv1d(seq_fts, 1, 1)
f_2 = tf.layers.conv1d(seq_fts, 1, 1)
f_1 = tf.reshape(f_1, (nb_nodes, 1))
f_2 = tf.reshape(f_2, (nb_nodes, 1))
f_1 = adj_mat*f_1
f_2 = adj_mat * tf.transpose(f_2, [1,0])
logits = tf.sparse_add(f_1, f_2)
lrelu = tf.SparseTensor(indices=logits.indices,
values=tf.nn.leaky_relu(logits.values),
dense_shape=logits.dense_shape)
coefs = tf.sparse_softmax(lrelu)
if coef_drop != 0.0:
coefs = tf.SparseTensor(indices=coefs.indices,
values=tf.nn.dropout(coefs.values, 1.0 - coef_drop),
dense_shape=coefs.dense_shape)
if in_drop != 0.0:
seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop)
# As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2,
# here we make an assumption that our input is of batch size 1, and reshape appropriately.
# The method will fail in all other cases!
coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes])
seq_fts = tf.squeeze(seq_fts)
vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts)
vals = tf.expand_dims(vals, axis=0)
vals.set_shape([1, nb_nodes, out_sz])
ret = tf.contrib.layers.bias_add(vals)
# residual connection
if residual:
if seq.shape[-1] != ret.shape[-1]:
ret = ret + conv1d(seq, ret.shape[-1], 1) # activation
else:
ret = ret + seq
return activation(ret) # activation
def sp_hete_attn_head(seq, out_sz, adj_mat, adj_type, edge_list, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False):
# input adjacency matrices are TRANSPOSED before feeding!
with tf.name_scope('sp_hete_attn'):
if in_drop != 0.0:
seq = [tf.nn.dropout(seq_i, 1.0 - in_drop) for seq_i in seq]
# seq_fts[j][i]: hidden features from group i to group j, center node is j
# 1 * nb_nodes_i * out_sz_j
seq_fts = [[tf.layers.conv1d(seq_i,
out_sz, # out_sz_j
1,
use_bias=False) for seq_i in seq] for _ in seq]
attn_biases = [None for _ in adj_type]
for dir_edge in edge_list:
attn_bias = tf.Variable(tf.random_normal(shape=(1, out_sz)))
attn_biases[dir_edge[0]] = attn_bias
if len(dir_edge) == 2:
attn_biases[dir_edge[1]] = -attn_bias
# for out_sz_j in out_sz
coefs_lists = [[] for _ in range(len(seq))]
seq_fts_lists = [[] for _ in range(len(seq))]
# simplest self-attention possible
for adj_ij, type_ij, attn_bias in zip(adj_mat, adj_type, attn_biases):
# transposed, # nb_nodes_j * nb_nodes_i
i, j = type_ij
f_1 = tf.reshape(seq_fts[j][j], (nb_nodes[j], out_sz))
f_1 = tf.gather(f_1, adj_ij.indices[:, 0])
f_2 = tf.reshape(seq_fts[j][i], (nb_nodes[i], out_sz))
if attn_bias is not None:
f_2 = f_2 + attn_bias
f_2 = tf.gather(f_2, adj_ij.indices[:, 1])
f = tf.reduce_sum(tf.multiply(f_1, f_2), 1)
coefs = tf.SparseTensor(indices=adj_ij.indices,
values=tf.nn.leaky_relu(f),
dense_shape=adj_ij.dense_shape)
if coef_drop != 0.0:
coefs = tf.SparseTensor(indices=coefs.indices,
values=tf.nn.dropout(coefs.values, 1.0 - coef_drop),
dense_shape=coefs.dense_shape)
coefs_lists[j].append(coefs) # transposed, nb_nodes_j * nb_nodes_i
if in_drop != 0.0:
seq_fts_ij = tf.nn.dropout(seq_fts[j][i], 1.0 - in_drop)
seq_fts_lists[j].append(tf.squeeze(seq_fts_ij)) # nb_nodes_i * out_sz_j
# As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2,
# here we make an assumption that our input is of batch size 1, and reshape appropriately.
# The method will fail in all other cases!
coefs = [tf.sparse_concat(1, coefs_list) for coefs_list in coefs_lists]
coefs = [tf.sparse_softmax(coef) for coef in coefs]
seq_fts = [tf.concat(seq_fts_list, 0) for seq_fts_list in seq_fts_lists]
vals = [tf.sparse_tensor_dense_matmul(coef, seq_ft) for coef, seq_ft in zip(coefs, seq_fts)]
# nb_nodes_j * out_sz_j
vals = [tf.expand_dims(val, axis=0) for val in vals]
for i, val in enumerate(vals):
val.set_shape([1, nb_nodes[i], out_sz])
ret = [tf.contrib.layers.bias_add(val) for val in vals]
# residual connection
if residual:
ret2 = []
for r, s in zip(ret, seq):
if s.shape[-1] != r.shape[-1]:
ret2.append(r + tf.layers.conv1d(s, r.shape[-1], 1))
else:
ret2.append(r + s)
ret = ret2
ret = [activation(r) for r in ret]
return ret # activation
def full_connection(seq, out_sz, target_node, activation, in_drop=0.0, use_bias=True):
with tf.name_scope('full_connection_layer'):
if in_drop != 0.0:
seq = [tf.nn.dropout(seq_i, 1.0 - in_drop) for seq_i in seq]
seq_fc = [tf.layers.conv1d(seq[target_node[i]], out_sz[i], 1, use_bias=use_bias) for i in range(len(target_node))]
seq_fc = [tf.squeeze(seq_i) for seq_i in seq_fc] # remove the bach_size which is set as 1
ret = [activation(s) for s in seq_fc]
return ret
|
deep-rl/lib/python2.7/site-packages/OpenGL/WGL/NV/present_video.py | ShujaKhalid/deep-rl | 210 | 12681898 | '''OpenGL extension NV.present_video
This module customises the behaviour of the
OpenGL.raw.WGL.NV.present_video to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a mechanism for displaying textures and
renderbuffers on auxiliary video output devices. It allows an
application to specify separate buffers for the individual
fields used with interlaced output. It also provides a way
to present frames or field pairs simultaneously in two separate
video streams. It also allows an application to request when images
should be displayed, and to obtain feedback on exactly when images
are actually first displayed.
This specification attempts to avoid language that would tie it to
any particular hardware or vendor. However, it should be noted that
it has been designed specifically for use with NVIDIA SDI products
and the features and limitations of the spec compliment those of
NVIDIA's line of SDI video output devices.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/present_video.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.NV.present_video import *
from OpenGL.raw.WGL.NV.present_video import _EXTENSION_NAME
def glInitPresentVideoNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_component.py | Iuiu1234/pipelines | 2,860 | 12681904 | <filename>components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_component.py
"""RoboMaker component for deleting a simulation application."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
from delete_simulation_app.src.robomaker_delete_simulation_app_spec import (
RoboMakerDeleteSimulationAppSpec,
RoboMakerDeleteSimulationAppInputs,
RoboMakerDeleteSimulationAppOutputs,
)
from common.sagemaker_component import (
SageMakerComponent,
ComponentMetadata,
SageMakerJobStatus,
)
from common.boto3_manager import Boto3Manager
from common.common_inputs import SageMakerComponentCommonInputs
@ComponentMetadata(
name="RoboMaker - Delete Simulation Application",
description="Delete a simulation application.",
spec=RoboMakerDeleteSimulationAppSpec,
)
class RoboMakerDeleteSimulationAppComponent(SageMakerComponent):
"""RoboMaker component for deleting a simulation application."""
def Do(self, spec: RoboMakerDeleteSimulationAppSpec):
self._arn = spec.inputs.arn
self._version = spec.inputs.version
super().Do(spec.inputs, spec.outputs, spec.output_paths)
def _get_job_status(self) -> SageMakerJobStatus:
try:
response = self._rm_client.describe_simulation_application(
application=self._arn
)
status = response["arn"]
if status is not None:
return SageMakerJobStatus(is_completed=False, raw_status=status,)
else:
return SageMakerJobStatus(is_completed=True, raw_status="Item deleted")
except Exception as ex:
return SageMakerJobStatus(is_completed=True, raw_status=str(ex))
def _configure_aws_clients(self, inputs: SageMakerComponentCommonInputs):
"""Configures the internal AWS clients for the component.
Args:
inputs: A populated list of user inputs.
"""
self._rm_client = Boto3Manager.get_robomaker_client(
self._get_component_version(),
inputs.region,
endpoint_url=inputs.endpoint_url,
assume_role_arn=inputs.assume_role,
)
self._cw_client = Boto3Manager.get_cloudwatch_client(
inputs.region, assume_role_arn=inputs.assume_role
)
def _after_job_complete(
self,
job: Dict,
request: Dict,
inputs: RoboMakerDeleteSimulationAppInputs,
outputs: RoboMakerDeleteSimulationAppOutputs,
):
outputs.arn = self._arn
logging.info("Simulation Application {} has been deleted".format(outputs.arn))
def _on_job_terminated(self):
logging.info("Simulation Application {} failed to delete".format(self._arn))
def _create_job_request(
self,
inputs: RoboMakerDeleteSimulationAppInputs,
outputs: RoboMakerDeleteSimulationAppOutputs,
) -> Dict:
"""
Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/robomaker.html#RoboMaker.Client.delete_simulation_application
"""
request = self._get_request_template("robomaker.delete.simulation.app")
request["application"] = self._arn
# If we have a version then use it, else remove it from request object
if inputs.version:
request["applicationVersion"] = inputs.version
else:
request.pop("applicationVersion")
return request
def _submit_job_request(self, request: Dict) -> Dict:
return self._rm_client.delete_simulation_application(**request)
def _after_submit_job_request(
self,
job: Dict,
request: Dict,
inputs: RoboMakerDeleteSimulationAppInputs,
outputs: RoboMakerDeleteSimulationAppOutputs,
):
logging.info(f"Deleted Robomaker Simulation Application with arn: {self._arn}")
def _print_logs_for_job(self):
pass
if __name__ == "__main__":
import sys
spec = RoboMakerDeleteSimulationAppSpec(sys.argv[1:])
component = RoboMakerDeleteSimulationAppComponent()
component.Do(spec)
|
sysidentpy/polynomial_basis/simulation.py | neylsoncrepalde/sysidentpy | 107 | 12681919 | <filename>sysidentpy/polynomial_basis/simulation.py
""" Simulation methods for Polynomial NARMAX models """
# Authors:
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from sysidentpy.parameter_estimation.estimators import Estimators
from ..base import GenerateRegressors
from ..base import InformationMatrix
from .narmax import PolynomialNarmax
import numpy as np
from ..residues.residues_correlation import ResiduesAnalysis
from ..utils._check_arrays import check_X_y
from ..utils.deprecation import deprecated
@deprecated(
version="v0.1.7",
future_version="v0.2.0",
alternative="from sysidentpy.simulation import SimulateNARMAX",
)
class SimulatePolynomialNarmax(PolynomialNarmax):
"""Simulation of Polynomial NARMAX model
Parameters
----------
n_inputs : int, default=1
The number of inputs of the system.
estimator : str, default="least_squares"
The parameter estimation method.
extended_least_squares : bool, default=False
Whether to use extended least squares method
for parameter estimation.
Note that we define a specific set of noise regressors.
estimate_parameter : bool, default=False
Whether to use a method for parameter estimation.
Must be True if the user do not enter the pre-estimated parameters.
Note that we define a specific set of noise regressors.
calculate_err : bool, default=False
Whether to use a ERR algorithm to the pre-defined regressors.
lam : float, default=0.98
Forgetting factor of the Recursive Least Squares method.
delta : float, default=0.01
Normalization factor of the P matrix.
offset_covariance : float, default=0.2
The offset covariance factor of the affine least mean squares
filter.
mu : float, default=0.01
The convergence coefficient (learning rate) of the filter.
eps : float
Normalization factor of the normalized filters.
gama : float, default=0.2
The leakage factor of the Leaky LMS method.
weight : float, default=0.02
Weight factor to control the proportions of the error norms
and offers an extra degree of freedom within the adaptation
of the LMS mixed norm method.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sysidentpy.polynomial_basis.simulation import SimulatePolynomialNarmax
>>> from sysidentpy.metrics import root_relative_squared_error
>>> from sysidentpy.utils.generate_data import get_miso_data, get_siso_data
>>> x_train, x_valid, y_train, y_valid = get_siso_data(n=1000,
... colored_noise=True,
... sigma=0.2,
... train_percentage=90)
>>> s = SimulatePolynomialNarmax()
>>> model = np.array(
... [
... [1001, 0], # y(k-1)
... [2001, 1001], # x1(k-1)y(k-1)
... [2002, 0], # x1(k-2)
... ]
... )
>>> # theta must be a numpy array of shape (n, 1) where n is the number of regressors
>>> theta = np.array([[0.2, 0.9, 0.1]]).T
>>> yhat, results = s.simulate(
... X_test=x_test,
... y_test=y_test,
... model_code=model,
... theta=theta,
... plot=True)
>>> results = pd.DataFrame(model.results(err_precision=8,
... dtype='dec'),
... columns=['Regressors', 'Parameters', 'ERR'])
>>> print(results)
Regressors Parameters ERR
0 x1(k-2) 0.9000 0.95556574
1 y(k-1) 0.1999 0.04107943
2 x1(k-1)y(k-1) 0.1000 0.00335113
"""
def __init__(
self,
n_inputs=1,
estimator="recursive_least_squares",
extended_least_squares=True,
lam=0.98,
delta=0.01,
offset_covariance=0.2,
mu=0.01,
eps=np.finfo(np.float64).eps,
gama=0.2,
weight=0.02,
estimate_parameter=False,
calculate_err=False,
):
super().__init__( # n_inputs=n_inputs,
estimator=estimator,
extended_least_squares=extended_least_squares,
lam=lam,
delta=delta,
offset_covariance=offset_covariance,
mu=mu,
eps=eps,
gama=gama,
weight=weight,
)
self.estimate_parameter = estimate_parameter
self.calculate_err = calculate_err
self._n_inputs = n_inputs
def _validate_simulate_params(self):
if not isinstance(self.estimate_parameter, bool):
raise TypeError(
f"estimate_parameter must be False or True. Got {self.estimate_parameter}"
)
def _get_index_from_regressor_code(self, regressor_code, model_code):
"""Get the index of user regressor in regressor space.
Took from: https://stackoverflow.com/questions/38674027/find-the-row-indexes-of-several-values-in-a-numpy-array/38674038#38674038
Parameters
----------
regressor_code : ndarray of int
Matrix codification of all possible regressors.
model_code : ndarray of int
Model defined by the user to simulate.
Returns
-------
model_index : ndarray of int
Index of model code in the regressor space.
"""
dims = regressor_code.max(0) + 1
model_index = np.where(
np.in1d(
np.ravel_multi_index(regressor_code.T, dims),
np.ravel_multi_index(model_code.T, dims),
)
)[0]
return model_index
def _list_output_regressor_code(self, model_code):
"""Create a flattened array of input or output regressors.
Parameters
----------
model_code : ndarray of int
Model defined by the user to simulate.
Returns
-------
model_code : ndarray of int
Flattened list of output regressors.
"""
regressor_code = [
code for code in model_code.ravel() if (code != 0) and (str(code)[0] == "1")
]
return np.asarray(regressor_code)
def _list_input_regressor_code(self, model_code):
"""Create a flattened array of input or output regressors.
Parameters
----------
model_code : ndarray of int
Model defined by the user to simulate.
Returns
-------
model_code : ndarray of int
Flattened list of output regressors.
"""
regressor_code = [
code for code in model_code.ravel() if (code != 0) and (str(code)[0] != "1")
]
return np.asarray(regressor_code)
def _get_lag_from_regressor_code(self, regressors):
"""Get the maximum lag from array of regressors.
Parameters
----------
regressors : ndarray of int
Flattened list of input or output regressors.
Returns
-------
max_lag : int
Maximum lag of list of regressors.
"""
lag_list = [
int(i) for i in regressors.astype("str") for i in [np.sum(int(i[2:]))]
]
if len(lag_list) != 0:
return max(lag_list)
else:
return 1
def simulate(
self,
X_train=None,
y_train=None,
X_test=None,
y_test=None,
model_code=None,
steps_ahead=None,
theta=None,
plot=True,
):
"""Simulate a model defined by the user.
Parameters
----------
X_train : ndarray of floats
The input data to be used in the training process.
y_train : ndarray of floats
The output data to be used in the training process.
X_test : ndarray of floats
The input data to be used in the prediction process.
y_test : ndarray of floats
The output data (initial conditions) to be used in the prediction process.
model_code : ndarray of int
Flattened list of input or output regressors.
steps_ahead = int, default = None
The forecast horizon.
theta : array-like of shape = number_of_model_elements
The parameters of the model.
plot : bool, default=True
Indicate if the user wants to plot or not.
Returns
-------
yhat : ndarray of floats
The predicted values of the model.
results : string
Where:
First column represents each regressor element;
Second column represents associated parameter;
Third column represents the error reduction ratio associated
to each regressor.
"""
if y_test is None:
raise ValueError("y_test cannot be None")
if not isinstance(model_code, np.ndarray):
raise TypeError(f"model_code must be an np.np.ndarray. Got {model_code}")
if not isinstance(steps_ahead, (int, type(None))):
raise ValueError(
f"steps_ahead must be None or integer > zero. Got {steps_ahead}"
)
if not isinstance(plot, bool):
raise TypeError(f"plot must be True or False. Got {plot}")
if not isinstance(theta, np.ndarray) and not self.estimate_parameter:
raise TypeError(
f"If estimate_parameter is False, theta must be an np.np.ndarray. Got {theta}"
)
check_X_y(X_test, y_test)
if self.estimate_parameter:
if not all(isinstance(i, np.ndarray) for i in [X_train, y_train]):
raise TypeError(
f"If estimate_parameter is True, X_train and y_train must be an np.ndarray. Got {type(X_train), type(y_train)}"
)
check_X_y(X_train, y_train)
if y_train is None:
raise ValueError("y_train cannot be None")
xlag_code = self._list_input_regressor_code(model_code)
ylag_code = self._list_output_regressor_code(model_code)
self.xlag = self._get_lag_from_regressor_code(xlag_code)
self.ylag = self._get_lag_from_regressor_code(ylag_code)
self.max_lag = max(self.xlag, self.ylag)
if self._n_inputs != 1:
self.xlag = self._n_inputs * [list(range(1, self.max_lag + 1))]
self.non_degree = model_code.shape[1]
[regressor_code, _] = self.regressor_space(
self.non_degree, self.xlag, self.ylag, self._n_inputs
)
self.pivv = self._get_index_from_regressor_code(regressor_code, model_code)
self.final_model = regressor_code[self.pivv]
# to use in the predict function
self.n_terms = self.final_model.shape[0]
if not self.estimate_parameter:
self.theta = theta
self.err = self.n_terms * [0]
elif self.estimate_parameter and not self.calculate_err:
psi = self.build_information_matrix(
X_train, y_train, self.xlag, self.ylag, self.non_degree, self.pivv
)
# psi = psi[:, self.pivv]
self.theta = getattr(self, self.estimator)(psi, y_train)
self.err = self.n_terms * [0]
else:
psi = self.build_information_matrix(
X_train, y_train, self.xlag, self.ylag, self.non_degree, self.pivv
)
# psi = psi[:, self.pivv]
_, self.err, self.pivv, _ = self.error_reduction_ratio(
psi, y_train, self.n_terms
)
self.theta = getattr(self, self.estimator)(psi, y_train)
yhat = self.predict(X_test, y_test, steps_ahead)
results = self.results(err_precision=8, dtype="dec")
if plot:
ee, ex, _, _ = self.residuals(X_test, y_test, yhat)
self.plot_result(y_test, yhat, ee, ex)
return yhat, results
|
logomaker/src/examples.py | ruxi/logomaker | 125 | 12681927 | <reponame>ruxi/logomaker
import pandas as pd
import os
import gzip
from logomaker.src.error_handling import check, handle_errors
# load directory of file
matrix_dir = os.path.dirname(os.path.abspath(__file__)) \
+ '/../examples/matrices'
# load directory of file
data_dir = os.path.dirname(os.path.abspath(__file__)) \
+ '/../examples/datafiles'
@handle_errors
def list_example_matrices():
"""
Return list of available matrices.
"""
# List of supported distributions by name
valid_matrices = ['.'.join(name.split('.')[:-1]) for name in
os.listdir(matrix_dir) if '.txt' in name]
return valid_matrices
@handle_errors
def list_example_datafiles():
"""
Return list of available data files.
"""
# List of supported distributions by name
valid_datafiles = [name for name in
os.listdir(data_dir) if
len(name.split('.')) >= 2 and
len(name.split('.')[0]) > 0]
return valid_datafiles
@handle_errors
def get_example_matrix(name=None, print_description=True):
"""
Returns an example matrix from which a logo can be made.
parameters
----------
name: (None or str)
Name of example matrix.
print_description: (bool)
If true, a description of the example matrix will be printed
returns
-------
df: (data frame)
A data frame containing an example matrix.
"""
# get list of valid matrices
valid_matrices = list_example_matrices()
# check that matrix name is valid
check(name in valid_matrices,
'Matrix "%s" not recognized. Please choose from: \n%s'
% (name, '\n'.join([repr(x) for x in valid_matrices])))
# check that print_description is boolean
check(isinstance(print_description, bool),
'type(print_description) = %s; must be of type bool ' %
type(print_description))
# set matrix file
file_name = '%s/%s.txt' % (matrix_dir, name)
assert os.path.isfile(file_name), 'File %s does not exist!'%file_name
# if user wants a description of the example matrix, provide it
if print_description:
print('Description of example matrix "%s":' % name)
with open(file_name, 'r') as f:
lines = f.readlines()
lines = [l for l in lines if len(l)>0 and l[0] == '#']
description = "".join(lines)
print(description)
# return matrix data frame
return pd.read_csv(file_name, sep='\t', index_col=0, comment='#')
@handle_errors
def open_example_datafile(name=None, print_description=True):
"""
Returns a file handle to an example dataset
parameters
----------
name: (None or str)
Name of example matrix.
print_description: (bool)
If true, a description of the example matrix will be printed
returns
-------
f: (file handle)
A handle to the requested file
"""
# get list of valid data files
valid_datafiles = list_example_datafiles()
# check that specified datafile is valid
check(name in valid_datafiles,
'Matrix "%s" not recognized. Please choose from: \n%s'
% (name, '\n'.join([repr(x) for x in valid_datafiles])))
# check that print_description is boolean
check(isinstance(print_description, bool),
'type(print_description) = %s; must be of type bool ' %
type(print_description))
# set datafile file name
file_name = '%s/%s' % (data_dir, name)
assert os.path.isfile(file_name), 'File %s does not exist!' % file_name
# if user wants a description of the datafile, provide it
if print_description:
print('Description of example matrix "%s":' % name)
with open(file_name, 'r') as f:
lines = f.readlines()
lines = [l for l in lines if len(l)>0 and l[0] == '#']
description = "".join(lines)
print(description)
# if file is a gzipped file, use gzip.open()
if len(file_name) >= 3 and file_name[-3:] == '.gz':
f = gzip.open(file_name, 'r')
# otherwise, use regular open()
else:
f = open(file_name, 'r')
# return file handle to user
return f
|
docs/plot_visualise.py | vishalbelsare/pycobra | 119 | 12681935 | <filename>docs/plot_visualise.py
"""
COBRA Visualisations
--------------------
This notebook will cover the visulaisation and plotting offered by
pycobra.
"""
# %matplotlib inline
import numpy as np
from pycobra.cobra import Cobra
from pycobra.ewa import Ewa
from pycobra.visualisation import Visualisation
from pycobra.diagnostics import Diagnostics
# setting up our random data-set
rng = np.random.RandomState(42)
# D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing
n_features = 2
D1, D2, D3, D4 = 200, 200, 200, 200
D = D1 + D2 + D3 + D4
X = rng.uniform(-1, 1, D * n_features).reshape(D, n_features)
# Y = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10])
Y = np.power(X[:,0], 2) + np.power(X[:,1], 3)
# training data-set
X_train = X[:D1 + D2]
X_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4]
X_eps = X[D1 + D2:D1 + D2 + D3]
# for testing
Y_train = Y[:D1 + D2]
Y_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4]
Y_eps = Y[D1 + D2:D1 + D2 + D3]
# set up our COBRA machine with the data
cobra = Cobra(epsilon=0.5)
cobra.fit(X_train, Y_train)
######################################################################
# Plotting COBRA
# ~~~~~~~~~~~~~~
#
# We use the visualisation class to plot our results, and for various
# visualisations.
#
cobra_vis = Visualisation(cobra, X_test, Y_test)
# to plot our machines, we need a linspace as input. This is the 'scale' to plot and should be the range of the results
# since our data ranges from -1 to 1 it is such - and we space it out to a hundred points
cobra_vis.plot_machines(machines=["COBRA"])
cobra_vis.plot_machines()
######################################################################
# Plots and Visualisations of Results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# QQ and Boxplots!
#
cobra_vis.QQ()
cobra_vis.boxplot()
######################################################################
# Plotting EWA!
# ~~~~~~~~~~~~~
#
# We can use the same visualisation class for seeing how EWA works. Let's
# demonstrate this!
#
ewa = Ewa()
ewa.set_beta(X_beta=X_eps, y_beta=Y_eps)
ewa.fit(X_train, Y_train)
ewa_vis = Visualisation(ewa, X_test, Y_test)
ewa_vis.QQ("EWA")
ewa_vis.boxplot()
######################################################################
# Plotting ClassifierCobra
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
from sklearn import datasets
from sklearn.metrics import accuracy_score
from pycobra.classifiercobra import ClassifierCobra
bc = datasets.load_breast_cancer()
X_cc = bc.data[:-40]
y_cc = bc.target[:-40]
X_cc_test = bc.data[-40:]
y_cc_test = bc.target[-40:]
cc = ClassifierCobra()
cc.fit(X_cc, y_cc)
cc_vis = Visualisation(cc, X_cc_test, y_cc_test)
cc_vis.boxplot()
######################################################################
# Remember that all the estimators in the Pycobra package are scikit-learn
# compatible - we can also use the scikit-learn metrics and tools to
# analyse our machines!
#
from sklearn.metrics import classification_report
print(classification_report(y_cc_test, cc.predict(X_cc_test)))
######################################################################
# Plotting COBRA colors!
# ~~~~~~~~~~~~~~~~~~~~~~
#
# We're now going to experiment with plotting colors and data. After we
# get information about which indices are used by which machines the best
# for a fixed epsilon (or not, we can toggle this option), we can plot the
# distribution of machines.
#
# Why is this useful? Since we're dealing with a 2-D space now, we're
# attempting to see if there are some parts in the input space which are
# picked up by certain machines. This could lead to interesting
# experiments and
#
# We first present a plot where the machine colors are mixed depending on
# which machines were selected; after which we plot one machine at a time.
#
indices, MSE = cobra_vis.indice_info(X_test=X_eps[0:50], y_test=Y_eps[0:50], epsilon=0.50)
cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices, single=True)
cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices)
######################################################################
# Voronoi Tesselation
# ~~~~~~~~~~~~~~~~~~~
#
# We present a variety of Voronoi Tesselation based plots - the purpose of
# this is to help in visualising the pattern of points which tend to be
# picked up.
#
cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, single=True)
cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices)
######################################################################
# Gradient-Colored Based Voronoi
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, MSE=MSE, gradient=True)
######################################################################
# Licensed under the MIT License - https://opensource.org/licenses/MIT
# |
src/train/TrainOneClassifier.py | SurajK7/kaggle-rsna18 | 220 | 12681963 | <gh_stars>100-1000
###########
# IMPORTS #
###########
import os
WDIR = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.insert(0, os.path.join(WDIR, "gradient-checkpointing"))
import memory_saving_gradients
sys.path.insert(0, os.path.join(WDIR, "../grayscale-models"))
from inception_resnet_v2_gray import InceptionResNetV2
from mobilenet_v2_gray import MobileNetV2
from densenet_gray import DenseNet121, DenseNet169, DenseNet201
from resnet50_gray import ResNet50
from xception_gray import Xception
from keras.layers import Dropout, Flatten, Dense, Input, Concatenate
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D
from keras.engine import Model
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras import backend as K
from keras import optimizers, layers, utils
K.__dict__["gradients"] = memory_saving_gradients.gradients_memory
import pandas as pd
import numpy as np
import scipy.misc
import glob
import json
from scipy.ndimage.interpolation import zoom, rotate
from scipy.ndimage.filters import gaussian_filter
from skimage import exposure
from sklearn.metrics import roc_auc_score, cohen_kappa_score, accuracy_score, f1_score
################
# KERAS MODELS #
################
def get_model(base_model,
layer,
lr=1e-3,
input_shape=(224,224,1),
classes=2,
activation="softmax",
dropout=None,
pooling="avg",
weights=None,
pretrained="imagenet"):
base = base_model(input_shape=input_shape,
include_top=False,
weights=pretrained,
channels="gray")
if pooling == "avg":
x = GlobalAveragePooling2D()(base.output)
elif pooling == "max":
x = GlobalMaxPooling2D()(base.output)
elif pooling is None:
x = Flatten()(base.output)
if dropout is not None:
x = Dropout(dropout)(x)
x = Dense(classes, activation=activation)(x)
model = Model(inputs=base.input, outputs=x)
if weights is not None:
model.load_weights(weights)
for l in model.layers[:layer]:
l.trainable = False
model.compile(loss="binary_crossentropy", metrics=["accuracy"],
optimizer=optimizers.Adam(lr))
return model
##########
## DATA ##
##########
# == PREPROCESSING == #
def preprocess_input(x, model):
x = x.astype("float32")
if model in ("inception","xception","mobilenet"):
x /= 255.
x -= 0.5
x *= 2.
if model in ("densenet"):
x /= 255.
if x.shape[-1] == 3:
x[..., 0] -= 0.485
x[..., 1] -= 0.456
x[..., 2] -= 0.406
x[..., 0] /= 0.229
x[..., 1] /= 0.224
x[..., 2] /= 0.225
elif x.shape[-1] == 1:
x[..., 0] -= 0.449
x[..., 0] /= 0.226
elif model in ("resnet","vgg"):
if x.shape[-1] == 3:
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.680
elif x.shape[-1] == 1:
x[..., 0] -= 115.799
return x
def apply_clahe(img):
img = img / 255.
img = exposure.equalize_adapthist(img)
img = img * 255.
return img
# == AUGMENTATION == #
def crop_center(img, cropx, cropy):
y,x = img.shape[:2]
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx,:]
def data_augmentation(image):
# Input should be ONE image with shape: (L, W, CH)
options = ["gaussian_smooth", "rotate", "zoom", "adjust_gamma"]
# Probabilities for each augmentation were arbitrarily assigned
which_option = np.random.choice(options)
if which_option == "gaussian_smooth":
sigma = np.random.uniform(0.2, 1.0)
image = gaussian_filter(image, sigma)
elif which_option == "zoom":
# Assumes image is square
min_crop = int(image.shape[0]*0.85)
max_crop = int(image.shape[0]*0.95)
crop_size = np.random.randint(min_crop, max_crop)
crop = crop_center(image, crop_size, crop_size)
if crop.shape[-1] == 1: crop = crop[:,:,0]
image = scipy.misc.imresize(crop, image.shape)
elif which_option == "rotate":
angle = np.random.uniform(-15, 15)
image = rotate(image, angle, reshape=False)
elif which_option == "adjust_gamma":
image = image / 255.
image = exposure.adjust_gamma(image, np.random.uniform(0.75,1.25))
image = image * 255.
if len(image.shape) == 2: image = np.expand_dims(image, axis=2)
return image
# == I/O == #
def load_sample(train_images, num_train_samples, z):
if len(train_images) < num_train_samples:
train_sample_images = np.random.choice(train_images, num_train_samples, replace=True)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, []
train_sample_images = list(set(train_images) - set(z))
if len(train_sample_images) < num_train_samples:
sample_diff = num_train_samples - len(train_sample_images)
not_sampled = list(set(train_images) - set(train_sample_images))
train_sample_images.extend(np.random.choice(not_sampled, sample_diff, replace=False))
z = []
else:
train_sample_images = np.random.choice(train_sample_images, num_train_samples, replace=False)
z.extend(train_sample_images)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, z
def load_sample_and_labels(df, train_images, num_train_samples, z):
if len(train_images) < num_train_samples:
train_sample_images = np.random.choice(train_images, num_train_samples, replace=True)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, []
train_sample_images = list(set(train_images) - set(z))
if len(train_sample_images) < num_train_samples:
sample_diff = num_train_samples - len(train_sample_images)
not_sampled = list(set(train_images) - set(train_sample_images))
train_sample_images.extend(np.random.choice(not_sampled, sample_diff, replace=False))
z = []
else:
train_sample_images = np.random.choice(train_sample_images, num_train_samples, replace=False)
z.extend(train_sample_images)
train_sample_ids = [_.split("/")[-1].split(".")[0] for _ in train_sample_images]
train_sample_df = df[(df.patientId.isin(train_sample_ids))]
train_sample_df.index = train_sample_df.patientId
train_sample_df = train_sample_df.reindex(train_sample_ids)
train_sample_labels = np.asarray(train_sample_df["label"])
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, train_sample_labels, z
def TTA(img, model, model_name, seed=88, niter=0):
np.random.seed(seed)
original_img = img.copy()
inverted_img = np.invert(img.copy())
hflipped_img = np.fliplr(img.copy())
original_img_array = np.empty((niter+1, img.shape[0], img.shape[1], img.shape[2]))
inverted_img_array = original_img_array.copy()
hflipped_img_array = original_img_array.copy()
original_img_array[0] = original_img
inverted_img_array[0] = inverted_img
hflipped_img_array[0] = hflipped_img
for each_iter in range(niter):
original_img_array[each_iter+1] = data_augmentation(original_img)
inverted_img_array[each_iter+1] = data_augmentation(inverted_img)
hflipped_img_array[each_iter+1] = data_augmentation(hflipped_img)
tmp_array = np.vstack((original_img_array, inverted_img_array, hflipped_img_array))
tmp_array = preprocess_input(tmp_array, model_name)
if int(model.get_output_at(-1).get_shape()[1]) == 1:
prediction = np.mean(model.predict(tmp_array)[:,0])
else:
prediction = np.mean(model.predict(tmp_array)[:,-1])
return prediction
############
# VALIDATE #
############
def reduce_learning_rate_or_not(metric_list, direction="max", patience=2):
# **NOTE: metric_list should have CURRENT metric as last element
if len(metric_list) < patience + 1:
return False
else:
if direction == "max":
if metric_list[-1] <= metric_list[(-1-patience)]:
return True
else:
return False
elif direction == "min":
if metric_list[-1] >= metric_list[(-1-patience)]:
return True
else:
return False
def competitionMetric(y_true, y_pred):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
TP = np.sum((y_true == 1) & (y_pred == 1))
FP = np.sum((y_true == 0) & (y_pred == 1))
FN = np.sum((y_true == 1) & (y_pred == 0))
return float(TP) / (float(FP) + float(FN) + float(TP))
def calculate_metrics(val_results_dict, y_pred, y_val, suffix=""):
tmp_kappa_list = []
tmp_accur_list = []
tmp_f1_list = []
tmp_cm_list = []
y_val = utils.to_categorical(y_val)[:,-1]
for each_threshold in np.linspace(0.1, 0.9, 17):
tmp_pred = [1 if _ >= each_threshold else 0 for _ in y_pred]
tmp_kappa_list.append(cohen_kappa_score(tmp_pred, y_val))
tmp_accur_list.append(accuracy_score(tmp_pred, y_val))
tmp_f1_list.append(f1_score(tmp_pred, y_val))
tmp_cm_list.append(competitionMetric(tmp_pred, y_val))
auroc = round(roc_auc_score(y_val, y_pred), 3)
kappa = round(np.max(tmp_kappa_list), 3)
accur = round(np.max(tmp_accur_list), 3)
cm = round(np.max(tmp_cm_list), 3)
f1 = round(np.max(tmp_f1_list), 3)
val_results_dict["auc{}".format(suffix)].append(auroc)
val_results_dict["kap{}".format(suffix)].append(kappa)
val_results_dict["acc{}".format(suffix)].append(accur)
val_results_dict["f1{}".format(suffix)].append(f1)
val_results_dict["cm{}".format(suffix)].append(cm)
kappa_threshold = np.linspace(0.1,0.9,17)[tmp_kappa_list.index(np.max(tmp_kappa_list))]
accur_threshold = np.linspace(0.1,0.9,17)[tmp_accur_list.index(np.max(tmp_accur_list))]
f1_threshold = np.linspace(0.1,0.9,17)[tmp_f1_list.index(np.max(tmp_f1_list))]
cm_threshold = np.linspace(0.1,0.9,17)[tmp_cm_list.index(np.max(tmp_cm_list))]
val_results_dict["threshold_kap{}".format(suffix)].append(round(kappa_threshold, 2))
val_results_dict["threshold_acc{}".format(suffix)].append(round(accur_threshold, 2))
val_results_dict["threshold_f1{}".format(suffix)].append(round(f1_threshold, 2))
val_results_dict["threshold_cm{}".format(suffix)].append(round(cm_threshold, 2))
return val_results_dict
def validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path, val_results_path,
subepoch,
batch_size):
y_pred = np.asarray([TTA(img, model, model_name) for img in X_val])
val_results_dict = calculate_metrics(val_results_dict, y_pred, y_val)
val_results_dict = calculate_metrics(val_results_dict, y_pred[valid_views == "AP"], y_val[valid_views == "AP"], "_AP")
val_results_dict = calculate_metrics(val_results_dict, y_pred[valid_views == "PA"], y_val[valid_views == "PA"], "_PA")
val_results_dict["subepoch"].append(subepoch)
out_df = pd.DataFrame(val_results_dict)
out_df.to_csv(os.path.join(val_results_path, "results.csv"), index=False)
predictions_df = pd.DataFrame({"patientId": valid_ids, "y_pred": y_pred})
predictions_df.to_csv(os.path.join(val_results_path, "predictions.csv"), index=False)
model.save_weights(os.path.join(save_weights_path, "weights_subepoch_{}.h5".format(str(subepoch).zfill(3))))
return val_results_dict
def load_and_validate(val_results_dict,
model, model_name,
clahe,
valid_df, data_dir,
save_weights_path, val_results_path,
subepoch,
batch_size):
# Memory requirements may prevent all validation data from being
# loaded at once
# NOTE: data is NOT preprocessed
print ">>VALIDATING<<\n"
X_val = np.asarray([np.load(os.path.join(data_dir, "{}.npy".format(_))) for _ in valid_df.patientId])
if clahe:
X_val = np.asarray([apply_clahe(_) for _ in X_val])
X_val = np.expand_dims(X_val, axis=-1)
#X_val = preprocess_input(X_val, model_name)
valid_ids = np.asarray(list(valid_df["patientId"]))
y_val = np.asarray(list(valid_df["label"]))
valid_views = np.asarray(list(valid_df["view"]))
val_results_dict = validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path,
val_results_path,
subepoch, batch_size)
return val_results_dict
def train(df, fold,
model, model_name,
subepochs, batch_size, base_lr, augment_p,
save_weights_path, save_logs_path, val_results_path,
data_dir,
mode="weighted_loss",
clahe=False,
lr_schedule=[20,10,2],
load_validation_data=True,
validate_every_nth_epoch=5,
resume=0,
num_train_samples=16000):
# lr_schedule : list of 3 integers OR list of 1 string and 2 integer
# - index 0: subepoch for first annealing
# - index 1: subepoch interval for annealing after first annealing
# - index 2: annealing_factor
# OR
# - index 0: "ReduceLROnPlateau"
# - index 1: annealing_factor
# - index 2: patience
if not os.path.exists(save_weights_path):
os.makedirs(save_weights_path)
if not os.path.exists(save_logs_path):
os.makedirs(save_logs_path)
if not os.path.exists(val_results_path):
os.makedirs(val_results_path)
train_df = df[(df.fold != fold)]
valid_df = df[(df.fold == fold)]
# Load the validation data if specified
if load_validation_data:
print "Loading validation data ..."
X_val = np.asarray([np.load(os.path.join(data_dir, "{}.npy".format(_))) for _ in valid_df.patientId])
if clahe:
X_val = np.asarray([apply_clahe(_) for _ in X_val])
X_val = np.expand_dims(X_val, axis=-1)
#X_val = preprocess_input(X_val, model_name)
print "DONE !"
valid_ids = np.asarray(list(valid_df["patientId"]))
y_val = np.asarray(list(valid_df["label"]))
valid_views = np.asarray(list(valid_df["view"]))
if mode == "weighted_loss":
train_images = [os.path.join(data_dir, "{}.npy".format(_)) for _ in train_df.patientId]
z = []
elif mode == "sample_equally":
pos_train_df = train_df[train_df["label"] == 1]
neg_train_df = train_df[train_df["label"] == 0]
pos_train_images = [os.path.join(data_dir, "{}.npy".format(_)) for _ in pos_train_df.patientId]
neg_train_images = [os.path.join(data_dir, "{}.npy".format(_)) for _ in neg_train_df.patientId]
z_pos = []
z_neg = []
val_results_dict = {"auc": [],
"kap": [],
"acc": [],
"f1": [],
"cm": [],
"threshold_kap": [],
"threshold_acc": [],
"threshold_f1": [],
"threshold_cm": [],
"subepoch": [],
"auc_AP": [],
"kap_AP": [],
"acc_AP": [],
"f1_AP": [],
"cm_AP": [],
"threshold_kap_AP": [],
"threshold_acc_AP": [],
"threshold_f1_AP": [],
"threshold_cm_AP": [],
"auc_PA": [],
"kap_PA": [],
"acc_PA": [],
"f1_PA": [],
"cm_PA": [],
"threshold_kap_PA": [],
"threshold_acc_PA": [],
"threshold_f1_PA": [],
"threshold_cm_PA": []}
lr_annealing_counter = 0
for each_subepoch in range(resume, subepochs):
suffix = str(each_subepoch).zfill(3)
logs_path = os.path.join(save_logs_path, "log_subepoch_{}.csv".format(suffix))
csvlogger = CSVLogger(logs_path)
print "Loading training sample ..."
if mode == "weighted_loss":
X_train, y_train, z = load_sample_and_labels(train_df, train_images, num_train_samples, z)
class_weight_dict = {}
class_freq_list = []
y_train = utils.to_categorical(y_train)
for each_class in range(y_train.shape[1]):
class_freq_list.append(np.sum(y_train[:,each_class]) / float(y_train.shape[0]))
for each_class in range(y_train.shape[1]):
class_weight_dict[each_class] = np.max(class_freq_list) / class_freq_list[each_class]
elif mode == "sample_equally":
X_pos_train, z_pos = load_sample(pos_train_images, num_train_samples / 2, z_pos)
X_neg_train, z_neg = load_sample(neg_train_images, num_train_samples / 2, z_neg)
X_train = np.vstack((X_pos_train, X_neg_train))
y_train = np.concatenate((np.repeat(1, len(X_pos_train)),
np.repeat(0, len(X_neg_train))))
del X_pos_train, X_neg_train
if clahe:
X_train = np.asarray([apply_clahe(_) for _ in X_train])
X_train = np.expand_dims(X_train, axis=-1)
print "Augmenting training data ..."
for index, each_image in enumerate(X_train):
sys.stdout.write("{}/{} ...\r".format(index+1, len(X_train)))
sys.stdout.flush()
if np.random.binomial(1, 0.5):
each_image = np.invert(each_image)
if np.random.binomial(1, 0.5):
each_image = np.fliplr(each_image)
if np.random.binomial(1, augment_p):
X_train[index] = data_augmentation(each_image)
X_train = preprocess_input(X_train, model_name)
print ("\nDONE !")
if mode == "weighted_loss":
model.fit(X_train, y_train,
batch_size=batch_size, epochs=1,
shuffle=True, callbacks=[csvlogger],
class_weight=class_weight_dict)
elif mode == "sample_equally":
model.fit(X_train, y_train,
batch_size=batch_size, epochs=1,
shuffle=True, callbacks=[csvlogger])
##### VALIDATE #####
if (each_subepoch + 1) % validate_every_nth_epoch == 0:
if load_validation_data:
val_results_dict = validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path, val_results_path,
each_subepoch,
batch_size)
else:
val_results_dict = load_and_validate(val_results_dict,
model, model_name,
clahe,
valid_df, data_dir,
save_weights_path, val_results_path,
each_subepoch,
batch_size)
##### LEARNING RATE SCHEDULE #####
if lr_schedule[0] != "ReduceLROnPlateau":
if (each_subepoch + 1) >= lr_schedule[0] and (each_subepoch + 1) % lr_schedule[1] == 0:
lr_annealing_counter += 1.
# Step-wise learning rate annealing schedule
new_lr = base_lr / (lr_schedule[2] ** lr_annealing_counter)
K.set_value(model.optimizer.lr, new_lr)
else:
if (each_subepoch + 1) % validate_every_nth_epoch == 0:
if reduce_learning_rate_or_not(val_results_dict["acc"], "max", lr_schedule[2]):
lr_annealing_counter += 1.
new_lr = base_lr / (lr_schedule[1] ** lr_annealing_counter)
K.set_value(model.optimizer.lr, new_lr)
##########
# SCRIPT #
##########
import json
# Specify GPU
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
with open(os.path.join(WDIR, "../../SETTINGS.json")) as f:
SETTINGS_JSON = json.load(f)
df = pd.read_csv(os.path.join(WDIR, "../..", SETTINGS_JSON["TRAIN_INFO_DIR"], "stratified_folds_df.csv"))
df["label"] = [1 if _ == "Lung Opacity" else 0 for _ in df["class"]]
#####################
# InceptionResNetV2 #
#####################
fold = 0
input_size = 256
fold_save_dir = os.path.join(WDIR, "../../models/one_classifier/snapshots/binary/InceptionResNetV2/fold{}".format(fold))
model = get_model(InceptionResNetV2, 0, 5e-5, dropout=None, input_shape=(input_size,input_size,1),
pretrained=os.path.join(WDIR, "../../models/pretrained/InceptionResNetV2_NIH15_Px256.h5"))
model_name = "inception"
train(df, fold, model, model_name, 15, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6,3,2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
|
readtwice/models/checkpoint_utils_test.py | DionysisChristopoulos/google-research | 23,901 | 12681966 | <reponame>DionysisChristopoulos/google-research<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for checkpoint_utils."""
import tempfile
from typing import Text, Tuple
import tensorflow.compat.v1 as tf
from readtwice.models import checkpoint_utils
def _create_test_variables(
outer_scope, inner_scope, var_c_name, var_e_name,
var_n_name):
# Keras layers can cause problems for `tf.train.init_from_checkpoint`
# if not handled properly. Here we intentionally use Dense layers
# to test whether the ckpt loading logic works.
dense_layer = tf.keras.layers.Dense(10, name="dense")
with tf.variable_scope(outer_scope):
var_c = tf.get_variable(
var_c_name, shape=[2, 4], initializer=tf.truncated_normal_initializer())
var_d = dense_layer(var_c)
with tf.variable_scope(inner_scope):
var_e = tf.get_variable(
var_e_name,
shape=[2, 3],
initializer=tf.truncated_normal_initializer())
_ = tf.get_variable(
var_n_name,
shape=[3, 5],
initializer=tf.truncated_normal_initializer())
return var_c, var_d, var_e
class CheckpointUtilsTest(tf.test.TestCase):
def _create_test_checkpoint(self, outer_scope, inner_scope,
var_c_name, var_e_name,
var_n_name):
with tempfile.NamedTemporaryFile(suffix="ckpt_test") as ckpt_file:
with self.session() as sess:
var_c, var_d, var_e = _create_test_variables(outer_scope, inner_scope,
var_c_name, var_e_name,
var_n_name)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, ckpt_file.name)
return ckpt_file.name, var_c, var_d, var_e
def test_get_assignment_map_from_checkpoint(self):
ckpt_path, expected_c, expected_d, expected_e = (
self._create_test_checkpoint("scope_a", "scope_b", "var_c", "var_e",
"var_f"))
with self.cached_session() as sess:
var_c, var_d, var_e = _create_test_variables("another_scope_a", "scope_b",
"var_c", "var_e", "var_g")
(assignment_map, initialized_variable_names
) = checkpoint_utils.get_assignment_map_from_checkpoint(
variables=sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES),
ckpt_path=ckpt_path,
variable_scope="another_scope_a/",
ckpt_variable_scope="scope_a/")
self.assertCountEqual(initialized_variable_names, [
"another_scope_a/var_c:0", "another_scope_a/dense/bias:0",
"another_scope_a/dense/kernel:0", "another_scope_a/scope_b/var_e:0"
])
tf.train.init_from_checkpoint(ckpt_path, assignment_map)
sess.run(tf.global_variables_initializer())
self.assertAllClose(var_c, expected_c)
self.assertAllClose(var_d, expected_d)
self.assertAllClose(var_e, expected_e)
# When require_all_variables_initialized = True, an error is raised
# since a checkpoint variable corresponding to the variable
# `another_scope_a/scope_b/var_g` cannot be found
# in the ckpt_variable_scope `scope_a/`.
with self.assertRaisesRegex(ValueError, "cannot be mapped"):
(assignment_map, initialized_variable_names
) = checkpoint_utils.get_assignment_map_from_checkpoint(
variables=sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES),
ckpt_path=ckpt_path,
variable_scope="another_scope_a/",
ckpt_variable_scope="scope_a/",
require_all_variables_initialized=True)
def test_init_from_checkpoint_init_checkpoint_none(self):
self.assertIsNone(checkpoint_utils.get_scaffold_fn(None, True))
def test_init_from_checkpoint_single_scope_pair(self):
ckpt_path, expected_c, expected_d, expected_e = (
self._create_test_checkpoint("scope_a", "scope_b", "var_c", "var_e",
"var_f"))
with self.cached_session() as sess:
var_c, var_d, var_e = _create_test_variables("scope_a_1", "scope_b",
"var_c", "var_e", "var_g")
scaffold_fn = checkpoint_utils.get_scaffold_fn(
ckpt_path, True, variable_scope_pairs=[("scope_a_1/", "scope_a/")])
scaffold = scaffold_fn()
self.assertIsInstance(scaffold, tf.train.Scaffold)
sess.run(tf.global_variables_initializer())
self.assertAllClose(var_c, expected_c)
self.assertAllClose(var_d, expected_d)
self.assertAllClose(var_e, expected_e)
def test_init_from_checkpoint_multiple_scope_pairs(self):
ckpt_path, expected_c, expected_d, expected_e = (
self._create_test_checkpoint("scope_a", "scope_b", "var_c", "var_e",
"var_f"))
with self.cached_session() as sess:
var_c_1, var_d_1, var_e_1 = _create_test_variables(
"scope_a_1", "scope_b", "var_c", "var_e", "var_g")
var_c_2, var_d_2, var_e_2 = _create_test_variables(
"scope_a_2", "scope_b", "var_c", "var_e", "var_g")
scaffold_fn = checkpoint_utils.get_scaffold_fn(
ckpt_path,
True,
variable_scope_pairs=[("scope_a_1/", "scope_a/"),
("scope_a_2/", "scope_a/")])
scaffold = scaffold_fn()
self.assertIsInstance(scaffold, tf.train.Scaffold)
sess.run(tf.global_variables_initializer())
self.assertAllClose(var_c_1, expected_c)
self.assertAllClose(var_d_1, expected_d)
self.assertAllClose(var_e_1, expected_e)
self.assertAllClose(var_c_2, expected_c)
self.assertAllClose(var_d_2, expected_d)
self.assertAllClose(var_e_2, expected_e)
if __name__ == "__main__":
tf.compat.v1.disable_eager_execution()
tf.test.main()
|
hanlp/components/srl/span_rank/inference_utils.py | antfootAlex/HanLP | 27,208 | 12681968 | # Adopted from https://github.com/KiroSummer/A_Syntax-aware_MTL_Framework_for_Chinese_SRL
# Inference functions for the SRL model.
import numpy as np
def decode_spans(span_starts, span_ends, span_scores, labels_inv):
"""
Args:
span_starts: [num_candidates,]
span_scores: [num_candidates, num_labels]
span_ends:
labels_inv:
Returns:
"""
pred_spans = []
span_labels = np.argmax(span_scores, axis=1) # [num_candidates]
spans_list = list(zip(span_starts, span_ends, span_labels, span_scores))
spans_list = sorted(spans_list, key=lambda x: x[3][x[2]], reverse=True)
predicted_spans = {}
for start, end, label, _ in spans_list:
# Skip invalid span.
if label == 0 or (start, end) in predicted_spans:
continue
pred_spans.append((start, end, labels_inv[label]))
predicted_spans[(start, end)] = label
return pred_spans
def greedy_decode(predict_dict, srl_labels_inv):
"""Greedy decoding for SRL predicate-argument structures.
Args:
predict_dict: Dictionary of name to numpy arrays.
srl_labels_inv: SRL label id to string name.
suppress_overlap: Whether to greedily suppress overlapping arguments for the same predicate.
Returns:
"""
arg_starts = predict_dict["arg_starts"]
arg_ends = predict_dict["arg_ends"]
predicates = predict_dict["predicates"]
arg_labels = predict_dict["arg_labels"]
scores = predict_dict["srl_scores"]
num_suppressed_args = 0
# Map from predicates to a list of labeled spans.
pred_to_args = {}
if len(arg_ends) > 0 and len(predicates) > 0:
max_len = max(np.max(arg_ends), np.max(predicates)) + 1
else:
max_len = 1
for j, pred_id in enumerate(predicates):
args_list = []
for i, (arg_start, arg_end) in enumerate(zip(arg_starts, arg_ends)):
# If label is not null.
if arg_labels[i][j] == 0:
continue
label = srl_labels_inv[arg_labels[i][j]]
# if label not in ["V", "C-V"]:
args_list.append((arg_start, arg_end, label, scores[i][j][arg_labels[i][j]]))
# Sort arguments by highest score first.
args_list = sorted(args_list, key=lambda x: x[3], reverse=True)
new_args_list = []
flags = [False for _ in range(max_len)]
# Predicate will not overlap with arguments either.
flags[pred_id] = True
for (arg_start, arg_end, label, score) in args_list:
# If none of the tokens has been covered:
if not max(flags[arg_start:arg_end + 1]):
new_args_list.append((arg_start, arg_end, label))
for k in range(arg_start, arg_end + 1):
flags[k] = True
# Only add predicate if it has any argument.
if new_args_list:
pred_to_args[pred_id] = new_args_list
num_suppressed_args += len(args_list) - len(new_args_list)
return pred_to_args, num_suppressed_args
_CORE_ARGS = {"ARG0": 1, "ARG1": 2, "ARG2": 4, "ARG3": 8, "ARG4": 16, "ARG5": 32, "ARGA": 64,
"A0": 1, "A1": 2, "A2": 4, "A3": 8, "A4": 16, "A5": 32, "AA": 64}
def get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in list(mention_to_predicted.items())}
return predicted_clusters, mention_to_predicted
def _decode_non_overlapping_spans(starts, ends, scores, max_len, labels_inv, pred_id):
labels = np.argmax(scores, axis=1)
spans = []
for i, (start, end, label) in enumerate(zip(starts, ends, labels)):
if label <= 0:
continue
label_str = labels_inv[label]
if pred_id is not None and label_str == "V":
continue
spans.append((start, end, label_str, scores[i][label]))
spans = sorted(spans, key=lambda x: x[3], reverse=True)
flags = np.zeros([max_len], dtype=bool)
if pred_id is not None:
flags[pred_id] = True
new_spans = []
for start, end, label_str, score in spans:
if not max(flags[start:end + 1]):
new_spans.append((start, end, label_str)) # , score))
for k in range(start, end + 1):
flags[k] = True
return new_spans
def _dp_decode_non_overlapping_spans(starts, ends, scores, max_len, labels_inv, pred_id, u_constraint=False):
num_roles = scores.shape[1] # [num_arg, num_roles]
labels = np.argmax(scores, axis=1).astype(np.int64)
spans = list(zip(starts, ends, list(range(len(starts)))))
spans = sorted(spans, key=lambda x: (x[0], x[1])) # sort according to the span start index
if u_constraint:
f = np.zeros([max_len + 1, 128], dtype=float) - 0.1
else: # This one
f = np.zeros([max_len + 1, 1], dtype=float) - 0.1
f[0, 0] = 0
states = {0: set([0])} # A dictionary from id to list of binary core-arg states.
pointers = {} # A dictionary from states to (arg_id, role, prev_t, prev_rs)
best_state = [(0, 0)]
def _update_state(t0, rs0, t1, rs1, delta, arg_id, role):
if f[t0][rs0] + delta > f[t1][rs1]:
f[t1][rs1] = f[t0][rs0] + delta
if t1 not in states:
states[t1] = set()
states[t1].update([rs1])
pointers[(t1, rs1)] = (arg_id, role, t0, rs0) # the pointers store
if f[t1][rs1] > f[best_state[0][0]][best_state[0][1]]:
best_state[0] = (t1, rs1)
for start, end, i in spans: # [arg_start, arg_end, arg_span_id]
assert scores[i][0] == 0 # dummy score
# The extra dummy score should be same for all states, so we can safely skip arguments overlap
# with the predicate.
if pred_id is not None and start <= pred_id and pred_id <= end: # skip the span contains the predicate
continue
r0 = labels[i] # Locally best role assignment.
# Strictly better to incorporate a dummy span if it has the highest local score.
if r0 == 0: # labels_inv[r0] == "O"
continue
r0_str = labels_inv[r0]
# Enumerate explored states.
t_states = [t for t in list(states.keys()) if t <= start] # collect the state which is before the current span
for t in t_states: # for each state
role_states = states[t]
# Update states if best role is not a core arg.
if not u_constraint or r0_str not in _CORE_ARGS: # True; this one
for rs in role_states: # the set type in the value in the state dict
_update_state(t, rs, end + 1, rs, scores[i][r0], i, r0) # update the state
else:
for rs in role_states:
for r in range(1, num_roles):
if scores[i][r] > 0:
r_str = labels_inv[r]
core_state = _CORE_ARGS.get(r_str, 0)
# print start, end, i, r_str, core_state, rs
if core_state & rs == 0:
_update_state(t, rs, end + 1, rs | core_state, scores[i][r], i, r)
# Backtrack to decode.
new_spans = []
t, rs = best_state[0]
while (t, rs) in pointers:
i, r, t0, rs0 = pointers[(t, rs)]
new_spans.append((int(starts[i]), int(ends[i]), labels_inv[r]))
t = t0
rs = rs0
return new_spans[::-1]
def srl_decode(sentence_lengths, predict_dict, srl_labels_inv, config): # decode the predictions.
# Decode sentence-level tasks.
num_sentences = len(sentence_lengths)
predictions = [{} for _ in range(num_sentences)]
# Sentence-level predictions.
for i in range(num_sentences): # for each sentences
# if predict_dict["No_arg"] is True:
# predictions["srl"][i][predict_dict["predicates"][i]] = []
# continue
predict_dict_num_args_ = predict_dict["num_args"].cpu().numpy()
predict_dict_num_preds_ = predict_dict["num_preds"].cpu().numpy()
predict_dict_predicates_ = predict_dict["predicates"].cpu().numpy()
predict_dict_arg_starts_ = predict_dict["arg_starts"].cpu().numpy()
predict_dict_arg_ends_ = predict_dict["arg_ends"].cpu().numpy()
predict_dict_srl_scores_ = predict_dict["srl_scores"].detach().cpu().numpy()
num_args = predict_dict_num_args_[i] # the number of the candidate argument spans
num_preds = predict_dict_num_preds_[i] # the number of the candidate predicates
# for each predicate id, exec the decode process
for j, pred_id in enumerate(predict_dict_predicates_[i][:num_preds]):
# sorted arg_starts and arg_ends and srl_scores ? should be??? enforce_srl_constraint = False
arg_spans = _dp_decode_non_overlapping_spans(
predict_dict_arg_starts_[i][:num_args],
predict_dict_arg_ends_[i][:num_args],
predict_dict_srl_scores_[i, :num_args, j, :],
sentence_lengths[i], srl_labels_inv, pred_id, config.enforce_srl_constraint)
# To avoid warnings in the eval script.
if config.use_gold_predicates: # false
arg_spans.append((pred_id, pred_id, "V"))
if arg_spans:
predictions[i][int(pred_id)] = sorted(arg_spans, key=lambda x: (x[0], x[1]))
return predictions
|
recipes/Python/578399_alternative_way_draw_parallels_meridians/recipe-578399.py | tdiprima/code | 2,023 | 12681975 | <filename>recipes/Python/578399_alternative_way_draw_parallels_meridians/recipe-578399.py<gh_stars>1000+
#!/usr/bin/env python3
'''An alternative way to draw parallels and meridians with basemap.
Basemap is a toolkit of matplotlib used to plot geographic maps.
With this function you can:
* Draw the latitude/longitude grid easily in one line of code, specifying the
lat/lon intervals.
* Use rcParams, so all your figures will look more consistent.
* Specify the label pad in points (instead of projection units).
* Place the labels indicating which margins will be used.
'''
import numpy as np
def latlon_grid(bmap, lon_int, lat_int, labels='lb', **kwargs):
'''Draws a lat-lon grid in an easy way.
Some default values are taken from rcParams instead of 'black' (color) and
1.0 (linewidth) which is the default in Basemap.
In Basemap, the label pad is computed in projection units. Now you can use
the keyword argument 'labelpad' to control this separation in points. If
not specified then this value is taken from rcParams.
Arguments:
bmap -- Basemap object.
lon_int, lat_int -- Difference in degrees from one longitude or latitude to
the next.
labels -- String specifying which margins will be used to write the labels.
If None, no label will be shown.
It is assummed that left/right margins (i.e. Y axes) correspond
to latitudes and top/bottom (X axes) to longitudes. It is valid
every combination of the characters 't' | 'b' | 'l' | 'r'
(top|bottom|left|right).
Ex: 'lrb' means that the longitude values will appear in bottom
margin and latitudes in left and right.
**kwargs -- Other arguments to drawparallels, drawmeridians and plt.text.
labelpad has units of points.
'''
# Proccesses arguments and rcParams for defult values
if 'color' not in kwargs:
kwargs['color'] = plt.rcParams['grid.color']
if 'linewidth' not in kwargs:
kwargs['linewidth'] = plt.rcParams['grid.linewidth']
if 'labelpad' in kwargs:
padx = pady = kwargs['labelpad']
del kwargs['labelpad']
else:
pady = plt.rcParams['xtick.major.pad']
padx = plt.rcParams['ytick.major.pad']
if 'size' in kwargs:
xfontsize = yfontsize = kwargs['size']
del kwargs['size']
elif 'fontsize' in kwargs:
xfontsize = yfontsize = kwargs['fontsize']
del kwargs['fontsize']
else:
xfontsize = plt.rcParams['xtick.labelsize']
yfontsize = plt.rcParams['ytick.labelsize']
# Vectors of coordinates
lon0 = bmap.lonmin // lon_int * lon_int
lat0 = bmap.latmin // lat_int * lat_int
lon1 = bmap.lonmax // lon_int * lon_int
lat1 = bmap.latmax // lat_int * lat_int
nlons = (lon1 - lon0) / lon_int + 1
nlats = (lat1 - lat0) / lat_int + 1
assert nlons / int(nlons) == 1, nlons
assert nlats / int(nlats) == 1, nlats
lons = np.linspace(lon0, lon1, int(nlons))
lats = np.linspace(lat0, lat1, int(nlats))
# If not specified then computes de label offset by 'labelpad'
xos = yos = None
if 'xoffset' in kwargs:
xos = kwargs['xoffset']
if 'yoffset' in kwargs:
yos = kwargs['yoffset']
if xos is None and yos is None:
# Page size in inches and axes limits
fig_w, fig_h = plt.gcf().get_size_inches()
points = plt.gca().get_position().get_points()
x1, y1 = tuple(points[0])
x2, y2 = tuple(points[1])
# Width and height of axes in points
w = (x2 - x1) * fig_w * 72
h = (y2 - y1) * fig_h * 72
# If the aspect relation is fixed then compute the real values
if bmap.fix_aspect:
aspect = bmap.aspect * w / h
if aspect > 1:
w = h / bmap.aspect
elif aspect < 1:
h = w * bmap.aspect
# Offset in projection units (meters or degrees)
xos = padx * (bmap.urcrnrx - bmap.llcrnrx) / w
yos = pady * (bmap.urcrnry - bmap.llcrnry) / h
# Set the labels
latlabels = [False] * 4
lonlabels = [False] * 4
if labels is not None:
pst = {'l': 0, 'r': 1, 't': 2, 'b': 3}
lst = {'l': latlabels, 'r': latlabels, 't': lonlabels, 'b': lonlabels}
for i in labels.lower():
lst[i][pst[i]] = True
# Draws the grid
bmap.drawparallels(lats, labels=latlabels, fontsize=yfontsize,
xoffset=xos, yoffset=yos, **kwargs)
bmap.drawmeridians(lons, labels=lonlabels, fontsize=xfontsize,
xoffset=xos, yoffset=yos, **kwargs)
# TEST
if __name__ == '__main__':
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as basemap
# Some rcParams (example)
plt.rc('grid', linewidth=0.5, color='g')
plt.rcParams['xtick.major.pad'] = 6
plt.rcParams['ytick.major.pad'] = 12
plt.rcParams['xtick.labelsize'] = 'small'
plt.rcParams['ytick.labelsize'] = 'x-small'
# Basemap
m = basemap.Basemap(projection='merc', llcrnrlon=-120, llcrnrlat=30,
urcrnrlon=120, urcrnrlat=70)
# Plots a figure to compare
plt.subplot(211)
x = np.linspace(-10, 10, 10000)
y = np.sin(x)
plt.plot(x, y)
plt.grid()
plt.title('Example of figure using only rc values')
# Draws the grid (using the function)
plt.subplot(212)
latlon_grid(m, 30, 10, labels='lb', dashes=[1, 3])
m.drawcoastlines()
plt.title('Using latlon_grid()')
plt.show()
|
tests/test_misc.py | Alindil/python-plexapi | 749 | 12681981 | # -*- coding: utf-8 -*-
import os
import shlex
import subprocess
from os.path import abspath, dirname, join
import pytest
SKIP_EXAMPLES = ["Example 4"]
@pytest.mark.skipif(os.name == "nt", reason="No make.bat specified for Windows")
def test_build_documentation():
docroot = join(dirname(dirname(abspath(__file__))), "docs")
cmd = shlex.split("sphinx-build -aE . _build")
proc = subprocess.Popen(
cmd, cwd=docroot, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
status = proc.wait()
assert status == 0
issues = []
for output in proc.communicate():
for line in str(output).split("\\n"):
line = line.lower().strip()
if "warning" in line or "error" in line or "traceback" in line:
issues.append(line)
for line in issues:
print(line)
assert not issues
def test_readme_examples(plex):
failed = 0
examples = _fetch_examples()
assert len(examples), "No examples found in README"
for title, example in examples:
if _check_run_example(title):
try:
print("\n%s\n%s" % (title, "-" * len(title)))
exec("\n".join(example))
except Exception as err:
failed += 1
print("Error running test: %s\nError: %s" % (title, err))
assert not failed, "%s examples raised an exception." % failed
def _fetch_examples():
parsing = False
examples = []
filepath = join(dirname(dirname(abspath(__file__))), "README.rst")
with open(filepath, "r") as handle:
for line in handle.read().split("\n"):
line = line[4:]
if line.startswith("# Example "):
parsing = True
title = line.lstrip("# ")
examples.append([title, []])
elif parsing and line == "":
parsing = False
elif parsing:
examples[-1][1].append(line)
return examples
def _check_run_example(title):
for skip_example in SKIP_EXAMPLES:
if skip_example in title:
return False
return True
|
qiskit/providers/ibmq/random/utils.py | dowem/qiskit-ibmq-provider | 199 | 12681988 | <reponame>dowem/qiskit-ibmq-provider<filename>qiskit/providers/ibmq/random/utils.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module for utility functions."""
from typing import List
import numpy as np
def bytes_to_bitarray(the_bytes: bytes, num_bits: int) -> List[int]:
"""Convert input bytes into an array of bits.
Args:
the_bytes: Bytes to be converted.
num_bits: Number of bits to return.
Returns:
An array of bits.
"""
return [(the_bytes[i >> 3] >> (i & 7)) & 1 for i in range(num_bits)]
def bitarray_to_bytes(bitarray: List[int]) -> bytes:
"""Convert an array of bits to bytes.
Args:
bitarray: Bit array to be converted.
Returns:
Input array in bytes.
"""
n_bits = len(bitarray)
n_bytes = (n_bits + 7) >> 3
int_array = [0] * n_bytes
for i in range(n_bits):
int_array[i >> 3] |= bitarray[i] << (i & 7)
return bytes(int_array)
def generate_wsr(num_bits: int) -> List:
"""Generate a list of WSR bits.
Args:
num_bits: Number of bits needed.
Returns:
A list of random binary numbers.
"""
return list(np.random.randint(2, size=num_bits))
|
tests/install/test.py | nacl/rules_pkg | 123 | 12681994 | #!/usr/bin/env python3
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import unittest
import stat
import subprocess
from rules_python.python.runfiles import runfiles
from pkg.private import manifest
class PkgInstallTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.runfiles = runfiles.Create()
# Somewhat of an implementation detail, but it works. I think.
manifest_file = cls.runfiles.Rlocation("rules_pkg/tests/install/test_installer_install_script-install-manifest.json")
with open(manifest_file, 'r') as fh:
manifest_data_raw = json.load(fh)
cls.manifest_data = {}
for entry in manifest_data_raw:
entry_struct = manifest.ManifestEntry(*entry)
cls.manifest_data[entry_struct.dest] = entry_struct
cls.installdir = os.path.join(os.getenv("TEST_TMPDIR"), "installdir")
env = {}
env.update(cls.runfiles.EnvVars())
subprocess.check_call([
cls.runfiles.Rlocation("rules_pkg/tests/install/test_installer"),
"--destdir", cls.installdir,
"--verbose",
],
env=env)
def entity_type_at_path(self, path):
if os.path.islink(path):
return manifest.ENTRY_IS_LINK
elif os.path.isfile(path):
return manifest.ENTRY_IS_FILE
elif os.path.isdir(path):
return manifest.ENTRY_IS_DIR
else:
# We can't infer what TreeArtifacts are by looking at them -- the
# build system is not aware of their contents.
raise ValueError("Entity {} is not a link, file, or directory")
def assertEntryTypeMatches(self, entry, actual_path):
actual_entry_type = self.entity_type_at_path(actual_path)
self.assertEqual(actual_entry_type, entry.entry_type,
"Entity {} should be a {}, but was actually {}".format(
entry.dest,
manifest.entry_type_to_string(entry.entry_type),
manifest.entry_type_to_string(actual_entry_type),
))
def assertEntryModeMatches(self, entry, actual_path):
# TODO: permissions in windows are... tricky. Don't bother
# testing for them if we're in it for the time being
if os.name == 'nt':
return
actual_mode = stat.S_IMODE(os.stat(actual_path).st_mode)
expected_mode = int(entry.mode, 8)
self.assertEqual(actual_mode, expected_mode,
"Entry {} has mode {:04o}, expected {:04o}".format(
entry.dest, actual_mode, expected_mode,
))
def test_manifest_matches(self):
unowned_dirs = set()
owned_dirs = set()
# Figure out what directories we are supposed to own, and which ones we
# aren't.
#
# Unowned directories are created implicitly by requesting other
# elements be created or installed.
#
# Owned directories are created explicitly with the pkg_mkdirs rule.
for dest, data in self.manifest_data.items():
if data.entry_type == manifest.ENTRY_IS_DIR:
owned_dirs.add(dest)
# TODO(nacl): The initial stage of the accumulation returns an empty string,
# which end up in the set representing the root of the manifest.
# This may not be the best thing.
unowned_dirs.update([p for p in itertools.accumulate(os.path.dirname(dest).split('/'),
func=lambda accum, new: accum + '/' + new)])
# In the above loop, unowned_dirs contains all possible directories that
# are in the manifest. Prune them here.
unowned_dirs -= owned_dirs
# TODO: check for ownership (user, group)
found_entries = {dest: False for dest in self.manifest_data.keys()}
for root, dirs, files in os.walk(self.installdir):
rel_root_path = os.path.relpath(root, self.installdir)
# The rest of this uses string comparison. To reduce potential
# confusion, ensure that the "." doesn't show up elsewhere.
#
# TODO(nacl) consider using pathlib here, which will reduce the
# need for path cleverness.
if rel_root_path == '.':
rel_root_path = ''
# TODO(nacl): check for treeartifacts here. If so, prune `dirs`,
# and set the rest aside for future processing.
# Directory ownership tests
if len(files) == 0 and len(dirs) == 0:
# Empty directories must be explicitly requested by something
if rel_root_path not in self.manifest_data:
self.fail("Directory {} not in manifest".format(rel_root_path))
entry = self.manifest_data[rel_root_path]
self.assertEntryTypeMatches(entry, root)
self.assertEntryModeMatches(entry, root)
found_entries[rel_root_path] = True
else:
# There's something in here. Depending on how it was set up, it
# could either be owned or unowned.
if rel_root_path in self.manifest_data:
entry = self.manifest_data[rel_root_path]
self.assertEntryTypeMatches(entry, root)
self.assertEntryModeMatches(entry, root)
found_entries[rel_root_path] = True
else:
# If any unowned directories are here, they must be the
# prefix of some entity in the manifest.
self.assertIn(rel_root_path, unowned_dirs)
for f in files:
# The path on the filesystem in which the file actually exists.
# TODO(#382): This part of the test assumes that the path
# separator is '/', which is not the case in Windows. However,
# paths emitted in the JSON manifests may also be using
# '/'-separated paths.
#
# Confirm the degree to which this is a problem, and remedy as
# needed. It maybe worth setting the keys in the manifest_data
# dictionary to pathlib.Path or otherwise converting them to
# native paths.
fpath = os.path.normpath("/".join([root, f]))
# The path inside the manifest (relative to the install
# destdir).
rel_fpath = os.path.normpath("/".join([rel_root_path, f]))
if rel_fpath not in self.manifest_data:
self.fail("Entity {} not in manifest".format(rel_fpath))
entry = self.manifest_data[rel_fpath]
self.assertEntryTypeMatches(entry, fpath)
self.assertEntryModeMatches(entry, fpath)
found_entries[rel_fpath] = True
# TODO(nacl): check for TreeArtifacts
num_missing = 0
for dest, present in found_entries.items():
if present is False:
print("Entity {} is missing from the tree".format(dest))
num_missing += 1
self.assertEqual(num_missing, 0)
if __name__ == "__main__":
unittest.main()
|
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeters.py | sisisin/pulumi-gcp | 121 | 12682012 | <reponame>sisisin/pulumi-gcp
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServicePerimetersArgs', 'ServicePerimeters']
@pulumi.input_type
class ServicePerimetersArgs:
def __init__(__self__, *,
parent: pulumi.Input[str],
service_perimeters: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]]] = None):
"""
The set of arguments for constructing a ServicePerimeters resource.
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]] service_perimeters: The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
pulumi.set(__self__, "parent", parent)
if service_perimeters is not None:
pulumi.set(__self__, "service_perimeters", service_perimeters)
@property
@pulumi.getter
def parent(self) -> pulumi.Input[str]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: pulumi.Input[str]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter(name="servicePerimeters")
def service_perimeters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]]]:
"""
The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
return pulumi.get(self, "service_perimeters")
@service_perimeters.setter
def service_perimeters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]]]):
pulumi.set(self, "service_perimeters", value)
@pulumi.input_type
class _ServicePerimetersState:
def __init__(__self__, *,
parent: Optional[pulumi.Input[str]] = None,
service_perimeters: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]]] = None):
"""
Input properties used for looking up and filtering ServicePerimeters resources.
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]] service_perimeters: The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
if parent is not None:
pulumi.set(__self__, "parent", parent)
if service_perimeters is not None:
pulumi.set(__self__, "service_perimeters", service_perimeters)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter(name="servicePerimeters")
def service_perimeters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]]]:
"""
The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
return pulumi.get(self, "service_perimeters")
@service_perimeters.setter
def service_perimeters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePerimetersServicePerimeterArgs']]]]):
pulumi.set(self, "service_perimeters", value)
class ServicePerimeters(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
parent: Optional[pulumi.Input[str]] = None,
service_perimeters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePerimetersServicePerimeterArgs']]]]] = None,
__props__=None):
"""
Replace all existing Service Perimeters in an Access Policy with the Service Perimeters provided. This is done atomically.
This is a bulk edit of all Service Perimeters and may override existing Service Perimeters created by `accesscontextmanager.ServicePerimeter`,
thus causing a permadiff if used alongside `accesscontextmanager.ServicePerimeter` on the same parent.
To get more information about ServicePerimeters, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
## Example Usage
### Access Context Manager Service Perimeters Basic
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeters("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
service_perimeters=[
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
title="",
),
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["bigtable.googleapis.com"],
),
title="",
),
])
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
require_screen_lock=False,
),
regions=[
"CH",
"IT",
"US",
],
)],
),
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="chromeos_no_lock")
```
## Import
ServicePerimeters can be imported using any of these accepted formats
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeters:ServicePerimeters default {{parent}}/servicePerimeters
```
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeters:ServicePerimeters default {{parent}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePerimetersServicePerimeterArgs']]]] service_perimeters: The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServicePerimetersArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Replace all existing Service Perimeters in an Access Policy with the Service Perimeters provided. This is done atomically.
This is a bulk edit of all Service Perimeters and may override existing Service Perimeters created by `accesscontextmanager.ServicePerimeter`,
thus causing a permadiff if used alongside `accesscontextmanager.ServicePerimeter` on the same parent.
To get more information about ServicePerimeters, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
## Example Usage
### Access Context Manager Service Perimeters Basic
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeters("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
service_perimeters=[
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
title="",
),
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["bigtable.googleapis.com"],
),
title="",
),
])
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
require_screen_lock=False,
),
regions=[
"CH",
"IT",
"US",
],
)],
),
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="chromeos_no_lock")
```
## Import
ServicePerimeters can be imported using any of these accepted formats
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeters:ServicePerimeters default {{parent}}/servicePerimeters
```
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeters:ServicePerimeters default {{parent}}
```
:param str resource_name: The name of the resource.
:param ServicePerimetersArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServicePerimetersArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
parent: Optional[pulumi.Input[str]] = None,
service_perimeters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePerimetersServicePerimeterArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServicePerimetersArgs.__new__(ServicePerimetersArgs)
if parent is None and not opts.urn:
raise TypeError("Missing required property 'parent'")
__props__.__dict__["parent"] = parent
__props__.__dict__["service_perimeters"] = service_perimeters
super(ServicePerimeters, __self__).__init__(
'gcp:accesscontextmanager/servicePerimeters:ServicePerimeters',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
parent: Optional[pulumi.Input[str]] = None,
service_perimeters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePerimetersServicePerimeterArgs']]]]] = None) -> 'ServicePerimeters':
"""
Get an existing ServicePerimeters resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePerimetersServicePerimeterArgs']]]] service_perimeters: The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServicePerimetersState.__new__(_ServicePerimetersState)
__props__.__dict__["parent"] = parent
__props__.__dict__["service_perimeters"] = service_perimeters
return ServicePerimeters(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def parent(self) -> pulumi.Output[str]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@property
@pulumi.getter(name="servicePerimeters")
def service_perimeters(self) -> pulumi.Output[Optional[Sequence['outputs.ServicePerimetersServicePerimeter']]]:
"""
The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.
Structure is documented below.
"""
return pulumi.get(self, "service_perimeters")
|
tests/Exscript/servers/SSHdTest.py | saveshodhan/exscript | 226 | 12682017 | from __future__ import absolute_import
import sys
import unittest
import re
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from .ServerTest import ServerTest
from Exscript.servers import SSHd
from Exscript.protocols import SSH2
class SSHdTest(ServerTest):
CORRELATE = SSHd
def _create_daemon(self):
self.daemon = SSHd(self.host, self.port, self.device)
def _create_client(self):
return SSH2()
def suite():
return unittest.TestLoader().loadTestsFromTestCase(SSHdTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.