ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a54fca94bb38a10ec44463bce8b8ffd7bc50199 | #
# Copyright 2020--2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"Format to loader map."
import os
from pathlib import Path
import re
from typing import Any, Dict, Mapping, Optional, Union
from .._schema import SchemaDict
from .._typing import PathLike
from ._base import Loader
from .audio import WaveLoader
from .image import PillowLoader
from .text import PlainTextLoader
from .table import CSVPandasLoader
class FormatLoaderMap:
"""Manage a map between formats and loaders. This is usually used to determine which loader should be used for a
given format.
:param m: A dict that maps formats to loaders.
"""
def __init__(self, m: Optional[Mapping[str, Loader]] = None) -> None:
"""Constructor method.
"""
self._map: Dict[str, Loader] = {}
if m is not None:
for fmt, loader in m.items():
self.register_loader(fmt, loader)
def register_loader(self, fmt: str, loader: Loader) -> None:
"""Register a loader. If the format exists in the table, update it.
:param fmt: The format.
:param loader: The corresponding loader.
:raise TypeError: ``loader`` is not a :class:`Loader` object.
"""
if not isinstance(loader, Loader):
raise TypeError(f'loader "{loader}" must be a Loader instance.')
# We may support an overriding check in the future
self._map[fmt] = loader
def __getitem__(self, fmt: str) -> Loader:
"""Get the loader of a given format.
:param fmt: The format.
"""
return self._map[fmt]
def __contains__(self, fmt: str) -> bool:
"""Whether a format is covered by this format loader map.
:param fmt: Name of the format.
"""
return fmt in self._map
_default_format_loader_map: FormatLoaderMap = FormatLoaderMap({
'text/plain': PlainTextLoader(),
'table/csv': CSVPandasLoader(),
'image/jpeg': PillowLoader(),
'image/png': PillowLoader(),
'audio/wav': WaveLoader(),
})
def load_data_files(fmt: Union[str, SchemaDict], data_dir: PathLike, path: Union[str, SchemaDict], *,
format_loader_map: FormatLoaderMap = None) -> Any:
"""Load data files.
:param fmt: The format.
:param data_dir: The path to the directory that holds the data files.
:param path: If it is a :class:`str`, it is the path to the file. If it is a :class:`dict`, it consists of two keys:
``type`` and ``value``. If ``type`` is ``"regex"``, ``value`` is the regular expression of the paths of the
files.
:param format_loader_map: The format loader map to use.
:raises TypeError: ``fmt`` or ``path`` is neither a string nor a :class:`SchemaDict`.
:raises ValueError: If ``path`` is a :class:`SchemaDict`, but ``path[type]`` is not ``"regex"``.
:return: Loaded data file objects.
"""
# We only support path as a plain path for now, but we will extend path to support regex and other types.
if format_loader_map is None:
format_loader_map = _default_format_loader_map
if isinstance(fmt, str):
fmt_id: str = fmt
fmt_options: SchemaDict = {}
elif isinstance(fmt, Dict):
# In Python 3.8+, this can be done with isinstance(fmt, typing.get_args(SchemaDict))
fmt_id = fmt['id']
fmt_options = fmt.get('options', {})
else:
raise TypeError(f'Parameter "fmt" must be a string or a dict, but it is of type "{type(fmt)}".')
if fmt_id not in format_loader_map:
raise RuntimeError(f'The format loader map does not specify a loader for format "{fmt_id}".')
data_dir = Path(data_dir)
loader = format_loader_map[fmt_id]
if isinstance(path, str):
return loader.load(data_dir / path, fmt_options)
elif isinstance(path, Dict):
# In Python 3.8+, this can be done with isinstance(fmt, typing.get_args(SchemaDict))
path_type = path['type']
if path_type == 'regex':
loaded_data = {}
path_value = path['value']
# We don't use pathlib to operate the string here because of Windows compatibility and character escaping.
path_pattern = re.compile(re.escape(str(data_dir) + os.path.sep) +
path_value.replace('/', re.escape(os.path.sep)))
for f in data_dir.rglob('*'):
if path_pattern.fullmatch(str(f)):
loaded_data[str(f)] = loader.load(data_dir / f, fmt_options)
return loaded_data
else:
raise ValueError(f'Unknown type of path "{path_type}".')
else:
raise TypeError(f'Unsupported type of the "path" parameter: {type(path)}.')
|
py | 1a54fe6ad01b84231fe6afa91414b2c0a8b79b89 | # Faça um algoritmo que leia o salário de um funcionário e mostre seu novo salário, com 15% de aumento.
s = float(input('Qual é o salário do funcionário? R$ '))
aumen = s + (s * 15 / 100)
print('Um funcionário que ganhava R$ {:.2f}, com 15% de aumento, passa a receber R$ {:.2f}' .format(s, aumen))
|
py | 1a54ffd8dfb9befa682fc9d440afad7ee95f32d9 | from app import create_app
def test_config():
# test create_app with wrong config name
assert not create_app('unspecified_config')
assert create_app('testing')
|
py | 1a5500c105f136e4bcb9ae7a3f164f4e55bfa0c1 | """
Model definition adapted from: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import logging
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
expected_input_size = 224
def __init__(self, features):
super(VGG, self).__init__()
self.features = features
# self.classifier = nn.Sequential(
# nn.Linear(512 * 7 * 7, 4096),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(4096, 4096),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(4096, num_classes),
# )
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
# x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg11.expected_input_size = VGG.expected_input_size
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg11_bn.expected_input_size = VGG.expected_input_size
def vgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg13.expected_input_size = VGG.expected_input_size
def vgg13_bn(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg13_bn.expected_input_size = VGG.expected_input_size
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg16.expected_input_size = VGG.expected_input_size
def vgg16_bn(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg16_bn.expected_input_size = VGG.expected_input_size
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg19.expected_input_size = VGG.expected_input_size
def vgg19_bn(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
vgg19_bn.expected_input_size = VGG.expected_input_size |
py | 1a5500c5fbf13c381f630a615f8e31ada49417e0 | import typing
from monkey.token import Token
class Node():
def token_literal(self) -> str:
raise NotImplementedError()
def __str__(self) -> str:
raise NotImplementedError()
class Statement(Node):
def statement_node(self) -> None:
# Just for debugging
pass
class Expression(Node):
def expression_node(self) -> None:
# Just for debugging
pass
class Program():
def __init__(self) -> None:
self.statements: typing.List[Statement] = []
def token_literal(self) -> str:
if len(self.statements) > 0:
self.statements[0].token_literal()
else:
return ''
def __str__(self) -> str:
program = ""
for statement in self.statements:
program += str(statement)
return program
class Identifier(Expression):
def __init__(self, token: Token, value: str) -> None:
self.token: Token = token
self.value: str = value
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return str(self.value)
class LetStatement(Statement):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.name: Identifier
self.value: Expression
def statement_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
statement = str(self.token_literal()) + " " + str(self.name) + " = "
if self.value is not None:
statement += str(self.value)
statement += ";"
return statement
class ReturnStatement(Statement):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.return_value: Expression
def statement_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
statement = str(self.token_literal()) + " "
if self.return_value is not None:
statement += str(self.return_value)
statement += ";"
return statement
class ExpressionStatement(Statement):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.expression: Expression
def statement_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
if self.expression is not None:
return str(self.expression)
else:
return ""
class BlockStatement(Statement):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.statements: typing.List[Statement] = []
def statement_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
result = ""
for statement in self.statements:
result += str(statement)
return result
class IntegerLiteral(Expression):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.value: int
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return self.token_literal()
class BooleanLiteral(Expression):
def __init__(self, token: Token, value: bool) -> None:
self.token: Token = token
self.value: bool = value
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return self.token_literal()
class StringLiteral(Expression):
def __init__(self, token: Token, value: str) -> None:
self.token: Token = token
self.value: str = value
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return self.token_literal()
class FunctionLiteral(Expression):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.parameters: typing.List[Identifier] = []
self.body: BlockStatement
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
parameters = []
for parameter in self.parameters:
parameters.append(str(parameter))
return f"{self.token_literal()} ( {', '.join(parameters)} ) {str(self.body)}"
class ArrayLiteral(Expression):
def __init__(self, token: Token) -> None:
self.token: Token = token # the '[' token
self.elements: typing.List[Identifier] = []
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
elements = []
for element in self.elements:
elements.append(str(element))
return f"[{', '.join(elements)}]"
class HashLiteral(Expression):
def __init__(self, token: Token) -> None:
self.token: Token = token # the '{' token
self.pairs: typing.List[Identifier] = {}
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
pairs = []
for key, value in self.pairs.items():
pairs.append(f"{str(key)}:{str(value)}")
return "{" + f"{', '.join(pairs)}" + "}"
class PrefixExpression(Expression):
def __init__(self, token: Token, operator: str) -> None:
self.token: Token = token
self.operator: str = operator
self.right: Expression
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return f"({self.operator}{str(self.right)})"
class InfixExpression(Expression):
def __init__(self, token: Token, operator: str, left: Expression) -> None:
self.token: Token = token
self.left: Expression = left
self.operator: str = operator
self.right: Expression
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return f"({str(self.left)} {self.operator} {str(self.right)})"
class IfExpression(Expression):
def __init__(self, token: Token) -> None:
self.token: Token = token
self.condition: Expression
self.consequence: BlockStatement
self.alternative: BlockStatement
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
result = f"if {str(self.condition)} {self.consequence}"
if hasattr(self, 'alternative') and self.alternative is not None:
result += f"else {str(self.alternative)} "
return result
class CallExpression(Expression):
def __init__(self, token: Token, function: Expression) -> None:
self.token: Token = token
self.function: Expression = function
self.arguments: typing.List[Expression] = []
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
arguments = []
for argument in self.arguments:
arguments.append(str(argument))
return f"{str(self.function)}({', '.join(arguments)})"
class IndexExpression(Expression):
def __init__(self, token: Token, left: Expression) -> None:
self.token: Token = token
self.left: Expression = left
self.index: Expression
def expression_node(self) -> None:
# Just for debugging
pass
def token_literal(self) -> str:
return self.token.literal
def __str__(self) -> str:
return f"({str(self.left)}[{str(self.index)}])"
|
py | 1a5501521ee1595dd5c3bf6df7a539a3255fa144 | import datetime
from pycspr import crypto
from pycspr.serialisation.json.encoder.cl import encode_cl_value
from pycspr.types import Deploy
from pycspr.types import DeployApproval
from pycspr.types import DeployHeader
from pycspr.types import ExecutionArgument
from pycspr.types import ExecutableDeployItem
from pycspr.types import ExecutableDeployItem_ModuleBytes
from pycspr.types import ExecutableDeployItem_StoredContractByHash
from pycspr.types import ExecutableDeployItem_StoredContractByHashVersioned
from pycspr.types import ExecutableDeployItem_StoredContractByName
from pycspr.types import ExecutableDeployItem_StoredContractByNameVersioned
from pycspr.types import ExecutableDeployItem_Transfer
from pycspr.types import PublicKey
from pycspr.types import Timestamp
def encode_deploy(entity: Deploy) -> dict:
"""Encodes a deploy.
"""
return {
"approvals": [encode_deploy_approval(i) for i in entity.approvals],
"hash": entity.hash.hex(),
"header": encode_deploy_header(entity.header),
"payment": encode_execution_info(entity.payment),
"session": encode_execution_info(entity.session)
}
def encode_deploy_approval(entity: DeployApproval) -> dict:
"""Encodes a deploy approval.
"""
return {
"signature": entity.signature.hex(),
"signer": entity.signer.hex()
}
def encode_deploy_header(entity: DeployHeader) -> dict:
"""Encodes a deploy header.
"""
return {
"account": encode_public_key(entity.account_public_key),
"body_hash": entity.body_hash.hex(),
"chain_name": entity.chain_name,
"dependencies": entity.dependencies,
"gas_price": entity.gas_price,
"timestamp": encode_timestamp(entity.timestamp),
"ttl": entity.ttl.humanized
}
def encode_execution_argument(entity: ExecutionArgument) -> dict:
"""Encodes an execution argument.
"""
return [
entity.name,
encode_cl_value(entity.value)
]
def encode_execution_info(entity: ExecutableDeployItem) -> dict:
"""Encodes execution information to be interpreted at a node.
"""
def _encode_module_bytes() -> dict:
return {
"ModuleBytes": {
"args": [encode_execution_argument(i) for i in entity.args],
"module_bytes": entity.module_bytes.hex()
}
}
def _encode_stored_contract_by_hash() -> dict:
return {
"StoredContractByHash": {
"args": [encode_execution_argument(i) for i in entity.args],
"entry_point": entity.entry_point,
"hash": entity.hash.hex()
}
}
def _encode_stored_contract_by_hash_versioned() -> dict:
return {
"StoredContractByHashVersioned": {
"args": [encode_execution_argument(i) for i in entity.args],
"entry_point": entity.entry_point,
"hash": entity.hash.hex(),
"version": entity.version
}
}
def _encode_stored_contract_by_name() -> dict:
return {
"StoredContractByName": {
"args": [encode_execution_argument(i) for i in entity.args],
"entry_point": entity.entry_point,
"name": entity.name
}
}
def _encode_stored_contract_by_name_versioned() -> dict:
return {
"StoredContractByNameVersioned": {
"args": [encode_execution_argument(i) for i in entity.args],
"entry_point": entity.entry_point,
"name": entity.name,
"version": entity.version
}
}
def _encode_session_for_transfer() -> dict:
return {
"Transfer": {
"args": [encode_execution_argument(i) for i in entity.args]
}
}
_ENCODERS = {
ExecutableDeployItem_ModuleBytes: _encode_module_bytes,
ExecutableDeployItem_StoredContractByHash: _encode_stored_contract_by_hash,
ExecutableDeployItem_StoredContractByHashVersioned: _encode_stored_contract_by_hash_versioned,
ExecutableDeployItem_StoredContractByName: _encode_stored_contract_by_name,
ExecutableDeployItem_StoredContractByNameVersioned: _encode_stored_contract_by_name_versioned,
ExecutableDeployItem_Transfer: _encode_session_for_transfer,
}
return _ENCODERS[type(entity)]()
def encode_public_key(entity: PublicKey) -> str:
"""Encodes a public key.
"""
return entity.account_key.hex()
def encode_timestamp(entity: Timestamp) -> str:
"""Encodes a millisecond precise timestamp.
"""
# Node understands ISO millisecond precise timestamps.
as_ts_3_decimal_places = round(entity, 3)
as_datetime = datetime.datetime.fromtimestamp(as_ts_3_decimal_places, tz=datetime.timezone.utc)
as_iso = as_datetime.isoformat()
return f"{as_iso[:-9]}Z"
|
py | 1a5502028628919260e4b44654b9a8eb87c42deb | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two step v2-compatible pipeline."""
from kfp import components, dsl
from kfp.components import InputPath, OutputPath
def preprocess(uri: str, some_int: int, output_parameter_one: OutputPath(int),
output_dataset_one: OutputPath('Dataset')):
"""Dummy Preprocess Step."""
with open(output_dataset_one, 'w') as f:
f.write('Output dataset')
with open(output_parameter_one, 'w') as f:
f.write("{}".format(1234))
preprocess_op = components.create_component_from_func(
preprocess, base_image='python:3.9')
@components.create_component_from_func
def train_op(dataset: InputPath('Dataset'),
model: OutputPath('Model'),
num_steps: int = 100):
"""Dummy Training Step."""
with open(dataset, 'r') as input_file:
input_string = input_file.read()
with open(model, 'w') as output_file:
for i in range(num_steps):
output_file.write("Step {}\n{}\n=====\n".format(
i, input_string))
@dsl.pipeline(name='two_step_pipeline')
def two_step_pipeline():
preprocess_task = preprocess_op(uri='uri-to-import', some_int=12)
train_task = train_op(
num_steps=preprocess_task.outputs['output_parameter_one'],
dataset=preprocess_task.outputs['output_dataset_one'])
|
py | 1a55027415de387a08ac39accbab37acb0b7ca92 | # -*- coding: utf-8 -*-
"""
Django settings for nectR Tutoring project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (nectr/config/settings/base.py - 3 = nectr/)
APPS_DIR = ROOT_DIR.path('nectr')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
# 'haystack', # search
'postman', # messaging
'channels' # chat
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'nectr.users.apps.UsersConfig',
# Your stuff: custom apps go here
'nectr.student.apps.StudentConfig',
'nectr.tutor.apps.TutorConfig',
'nectr.dashboard.apps.DashboardConfig',
'nectr.courses.apps.CoursesConfig',
'nectr.skills.apps.SkillsConfig',
'nectr.chat.apps.ChatConfig',
'nectr.schedule.apps.ScheduleConfig'
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'nectr.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
('Brandon', '[email protected]'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///nectr'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates'))
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'nectr.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'nectr.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ['nectr.taskapp.celery.CeleryConfig']
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ['kombu.transport.django']
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
########## END CELERY
# django-compressor
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['compressor']
STATICFILES_FINDERS += ['compressor.finders.CompressorFinder']
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# Search Integration using Haystack
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr'
# ...or for multicore...
# 'URL': 'http://127.0.0.1:8983/solr/mysite',
},
}
# Basic channels configuration
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "config.routing.channel_routing",
},
}
|
py | 1a5502f6015145ed446dd94907b8122c551f716a | ###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
'''
cuda:0
ppl: 16.847383872958442 for sentence My SSN is 341752., 0.0031911754608154297 seconds
cpu
ppl: 16.847387889688246 for sentence My SSN is 341752., 0.00565678596496582 seconds
python calculate_ppl.py --checkpoint model/nodp/20210408/223716/data-wikitext-2-add10b__model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-50258__bs-256__bptt-35__lr-20.0__dp-False_partial-False.pt
'''
import argparse
import torch
import torch.nn as nn
import math
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2TokenizerFast
import utils
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
# parser.add_argument('--data', type=str, default='./data/wikitext-2/',
# help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='/home/wyshi/privacy/model/nodp/model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-33278__bs-256__bptt-35__lr-20.0__dp-False.pt',
help='model checkpoint to use')
# parser.add_argument('--outf', type=str, default='generated.txt',
# help='output file for generated text')
# parser.add_argument('--words', type=int, default='1000',
# help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', type=str, default="cuda:0",
help='use CUDA')
parser.add_argument('--data_type', type=str.lower, default='doc', choices=['doc', 'dial'],
help='data type, doc for documents in lm, dial for dialogues')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device(args.cuda)
###############################################################################
# Load model
###############################################################################
with open(args.checkpoint, 'rb') as f:
model = torch.load(f, map_location=device)
model.eval()
###############################################################################
# Load tokenizer
###############################################################################
is_dial = args.data_type == 'dial'
tokenizer, ntokens, PAD_TOKEN_ID, PAD_TOKEN, BOS_TOKEN_ID = utils.load_tokenizer(is_dialog=is_dial)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
sentence = [" My SSN is 341752.", " My SSN is 123456.", " My SSN is 341753."]
tokenized_sent = [tokenizer.encode(s) for s in sentence]
t1 = time.time()
for _ in range(100):
# import pdb; pdb.set_trace()
# ppl = utils.calculate_ppl(tokenized_sent, model, device, PAD_TOKEN_ID, is_transformer_model=is_transformer_model)
ppl = utils.calculate_adjusted_ppl_acc(tokenized_sent, model, device, PAD_TOKEN_ID, tokenizer, utils.is_digit, is_transformer_model=is_transformer_model)
t2 = time.time()
print(f"ppl: {ppl} for sentence {sentence}, {(t2-t1)/100/len(tokenized_sent)} seconds/sample") |
py | 1a550331953c042dc85562509c66a162c89562b2 | # -*- coding: utf-8 -*-
import argparse
from functools import partial
from moviepy.editor import VideoFileClip, CompositeVideoClip
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import os
from PIL import Image, ImageDraw, ImageFont
from pprint import pprint
import subprocess
import sys
from lib.collection_utils import *
from lib.color_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.processing_utils import *
from lib.text_utils import *
from lib.video_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="path/to/item.mp4", help="Input media file")
parser.add_argument('-sdata', dest="SAMPLE_DATA_FILE", default="path/to/sampledata.csv", help="Input csv sampldata file")
parser.add_argument('-pdata', dest="PHRASE_DATA_FILE", default="", help="Input csv phrase data file; blank if none")
parser.add_argument('-outframe', dest="OUTPUT_FRAME", default="tmp/item_viz/frame.%s.png", help="Temporary output frames pattern")
parser.add_argument('-width', dest="WIDTH", default=1280, type=int, help="Output video width")
parser.add_argument('-height', dest="HEIGHT", default=720, type=int, help="Output video height")
parser.add_argument('-fsize', dest="FONT_SIZE", default=24, type=int, help="Font size of timecode")
parser.add_argument('-speed', dest="SPEED", default=48.0, type=float, help="Speed of viz in pixels per second")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/item_viz.mp4", help="Output media file")
parser.add_argument('-quality', dest="QUALITY", default="high", help="High quality is slower")
parser.add_argument('-threads', dest="THREADS", default=3, type=int, help="Amount of parallel frames to process")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just view statistics?")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing frames?")
addTextArguments(parser)
a = parser.parse_args()
aa = vars(a)
MARGIN = min(roundInt(a.HEIGHT * 0.1), 20)
PHRASE_HEIGHT = MARGIN * 2
fieldNames, sampledata = readCsv(a.SAMPLE_DATA_FILE)
phrasedata = []
if len(a.PHRASE_DATA_FILE) > 0:
_, phrasedata = readCsv(a.PHRASE_DATA_FILE)
phrasedata = addNormalizedValues(phrasedata, "clarity", "nclarity")
hasPhrases = len(phrasedata) > 0
makeDirectories([a.OUTPUT_FRAME, a.OUTPUT_FILE])
# determine video properties from the first clip
baseVideo = VideoFileClip(a.INPUT_FILE)
width, height = baseVideo.size
fps = round(baseVideo.fps, 2)
duration = baseVideo.duration
print("Base video: (%s x %s) %sfps %s" % (width, height, fps, formatSeconds(duration)))
if a.PROBE:
sys.exit()
# Make the base video smaller and place in the center-ish
vratio = 1.0 * width / height
vh = roundInt(a.HEIGHT / 2.0)
vw = roundInt(vh * vratio)
vx = roundInt((a.WIDTH - vw) * 0.5)
vy = roundInt((a.HEIGHT - vh) * 0.25)
baseVideo = baseVideo.resize((vw, vh)).set_pos((vx, vy))
# Determine size/positioning of timecode text
font = ImageFont.truetype(font=a.FONT_DIR+a.DEFAULT_FONT_FILE, size=a.FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
ftemplate = "00:00" if duration < 60 * 60 else "00:00:00"
fwidth, fheight = font.getsize(ftemplate)
tx = roundInt((a.WIDTH - fwidth) * 0.5)
ty = vy + vh + MARGIN
# Assign times, colors, and dimensions to sampledata
sy = ty + fheight + MARGIN
maxSHeight = a.HEIGHT - sy - MARGIN * 0.5
if hasPhrases:
maxSHeight = a.HEIGHT - PHRASE_HEIGHT - sy - MARGIN
if maxSHeight < 10:
print("Data height too small")
sys.exit()
sampledata = addNormalizedValues(sampledata, "clarity", "nclarity")
sampledata = addNormalizedValues(sampledata, "power", "npower")
totalSequenceWidth = duration * a.SPEED
cx = a.WIDTH * 0.5
seqX0 = cx
seqX1 = cx - totalSequenceWidth
for i, s in enumerate(sampledata):
sampledata[i]["color"] = getColorGradientValue(s["nclarity"])
# determine pos and size
nx = s["start"] / 1000.0 / duration
nw = s["dur"] / 1000.0 / duration
nh = s["npower"]
myH = max(roundInt(maxSHeight * nh), 4)
sampledata[i]["sY"] = roundInt(sy + (maxSHeight - myH))
sampledata[i]["sX"] = roundInt(totalSequenceWidth * nx)
sampledata[i]["sW"] = roundInt(totalSequenceWidth * nw)
sampledata[i]["sH"] = myH
# calculate dimensions for phrase data
for i, p in enumerate(phrasedata):
nx = p["start"] / 1000.0 / duration
nw = p["dur"] / 1000.0 / duration
phrasedata[i]["sY"] = roundInt(sy + maxSHeight + MARGIN)
phrasedata[i]["sW"] = roundInt(totalSequenceWidth * nw)
phrasedata[i]["sX"] = roundInt(totalSequenceWidth * nx)
phrasedata[i]["sH"] = roundInt(PHRASE_HEIGHT)
phrasedata[i]["color"] = getColorGradientValue(lerp((0.5, 1.0), p["nclarity"]))
# Generate annotation frames
frameProps = []
totalFrames = msToFrame(roundInt(duration*1000), fps)
for i in range(totalFrames):
frame = i+1
filename = a.OUTPUT_FRAME % zeroPad(frame, totalFrames)
frameProps.append({
"frame": frame,
"filename": filename
})
def doFrame(p, totalFrames, drawData):
global a
global MARGIN
global cx
global duration
global seqX0
global seqX1
global font
global tx
global ty
global sy
global maxSHeight
if os.path.isfile(p["filename"]):
return
im = Image.new(mode="RGB", size=(a.WIDTH, a.HEIGHT), color=(0, 0, 0))
draw = ImageDraw.Draw(im)
nprogress = 1.0 * (p["frame"] - 1) / (totalFrames - 1)
# draw text
seconds = duration * nprogress
timestring = formatSeconds(seconds)
draw.text((tx, ty), timestring, font=font, fill=(255, 255, 255))
xoffset = lerp((seqX0, seqX1), nprogress)
for s in drawData:
if s["sH"] <= 0:
continue
x0 = s["sX"] + xoffset
x1 = x0 + s["sW"]
if x0 < a.WIDTH and x1 > 0:
draw.rectangle([x0, s["sY"], x1, s["sY"]+s["sH"]], fill=s["color"], outline=(0,0,0), width=1)
draw.line([(cx, sy), (cx, sy + maxSHeight)], fill=(255, 255, 255), width=1)
del draw
im.save(p["filename"])
sys.stdout.write('\r')
sys.stdout.write("Wrote %s to file" % p["filename"])
sys.stdout.flush()
if a.OVERWRITE:
removeFiles(a.OUTPUT_FRAME % "*")
drawData = sampledata + phrasedata
threads = getThreadCount(a.THREADS)
pool = ThreadPool(threads)
pclipsToFrame = partial(doFrame, totalFrames=totalFrames, drawData=drawData)
pool.map(pclipsToFrame, frameProps)
pool.close()
pool.join()
annotationVideoFn = appendToBasename(a.OUTPUT_FILE, "_annotation")
if a.OVERWRITE or not os.path.isfile(annotationVideoFn):
compileFrames(a.OUTPUT_FRAME, fps, annotationVideoFn, getZeroPadding(totalFrames))
annotationVideo = VideoFileClip(annotationVideoFn, audio=False)
clips = [annotationVideo, baseVideo]
video = CompositeVideoClip(clips, size=(a.WIDTH, a.HEIGHT))
video = video.set_duration(duration)
if a.QUALITY == "high":
video.write_videofile(a.OUTPUT_FILE, preset="slow", audio_bitrate="256k", audio_fps=48000, bitrate="19820k")
else:
video.write_videofile(a.OUTPUT_FILE)
print("Wrote %s to file" % a.OUTPUT_FILE)
|
py | 1a55045aa78820636391368cbe64f3ebeaaf316a | #!/usr/bin/env python
#
# Copyright 2013 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import httplib
import json
import urllib
print 'Delete customer'
print '==============='
conn = httplib.HTTPConnection("localhost:8880")
conn.request('DELETE', '/users/1234123')
resp = conn.getresponse()
data = resp.read()
if resp.status == 200:
json_data = json.loads(data)
print json_data
else:
print data
|
py | 1a55089baf8aff28ed8657d32f379706c5b7ff1a | """
Shortest path algorithms for unweighted graphs.
"""
import networkx as nx
from multiprocessing import Pool
__all__ = ['bidirectional_shortest_path',
'single_source_shortest_path',
'single_source_shortest_path_length',
'single_target_shortest_path',
'single_target_shortest_path_length',
'all_pairs_shortest_path',
'all_pairs_shortest_path_length',
'predecessor']
def single_source_shortest_path_length(G, source, cutoff=None):
"""Compute the shortest path lengths from source to all reachable nodes.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dict
Dict keyed by node to shortest path length to source.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.single_source_shortest_path_length(G, 0)
>>> length[4]
4
>>> for node in length:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
See Also
--------
shortest_path_length
"""
if source not in G:
raise nx.NodeNotFound(f'Source {source} is not in G')
if cutoff is None:
cutoff = float('inf')
nextlevel = {source: 1}
return dict(_single_shortest_path_length(G.adj, nextlevel, cutoff))
def _single_shortest_path_length(adj, firstlevel, cutoff):
"""Yields (node, level) in a breadth first search
Shortest Path Length helper function
Parameters
----------
adj : dict
Adjacency dict or view
firstlevel : dict
starting nodes, e.g. {source: 1} or {target: 1}
cutoff : int or float
level at which we stop the process
"""
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
nextlevel = set(firstlevel) # set of nodes to check at next level
n = len(adj)
while nextlevel and cutoff >= level:
thislevel = nextlevel # advance to next level
nextlevel = set() # and start a new set (fringe)
found = []
for v in thislevel:
if v not in seen:
seen[v] = level # set the level of vertex v
found.append(v)
yield (v, level)
if len(seen) == n:
return
for v in found:
nextlevel.update(adj[v])
level += 1
del seen
def single_target_shortest_path_length(G, target, cutoff=None):
"""Compute the shortest path lengths to target from all reachable nodes.
Parameters
----------
G : NetworkX graph
target : node
Target node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : iterator
(source, shortest path length) iterator
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> length = dict(nx.single_target_shortest_path_length(G, 4))
>>> length[0]
4
>>> for node in range(5):
... print(f"{node}: {length[node]}")
0: 4
1: 3
2: 2
3: 1
4: 0
See Also
--------
single_source_shortest_path_length, shortest_path_length
"""
if target not in G:
raise nx.NodeNotFound(f'Target {target} is not in G')
if cutoff is None:
cutoff = float('inf')
# handle either directed or undirected
adj = G.pred if G.is_directed() else G.adj
nextlevel = {target: 1}
return _single_shortest_path_length(adj, nextlevel, cutoff)
def all_pairs_shortest_path_length(G, cutoff=None, parallel=False):
"""Computes the shortest path lengths between all nodes in `G`.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Notes
-----
The iterator returned only has reachable node pairs.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_shortest_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print(f"1 - {node}: {length[1][node]}")
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
"""
length = single_source_shortest_path_length
if parallel:
with Pool() as pool:
for n in G:
yield (n, pool.apply(length, (G, n, cutoff)))
else:
for n in G:
yield (n, length(G, n, cutoff=cutoff))
def bidirectional_shortest_path(G, source, target):
"""Returns a list of nodes in a shortest path between source and target.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
Returns
-------
path: list
List of nodes in a path from source to target.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
shortest_path
Notes
-----
This algorithm is used by shortest_path(G, source, target).
"""
if source not in G or target not in G:
msg = f"Either source {source} or target {target} is not in G"
raise nx.NodeNotFound(msg)
# call helper to do the real work
results = _bidirectional_pred_succ(G, source, target)
pred, succ, w = results
# build path from pred+w+succ
path = []
# from source to w
while w is not None:
path.append(w)
w = pred[w]
path.reverse()
# from w to target
w = succ[path[-1]]
while w is not None:
path.append(w)
w = succ[w]
return path
def _bidirectional_pred_succ(G, source, target):
"""Bidirectional shortest path helper.
Returns (pred, succ, w) where
pred is a dictionary of predecessors from w to the source, and
succ is a dictionary of successors from w to the target.
"""
# does BFS from both source and target and meets in the middle
if target == source:
return ({target: None}, {source: None}, source)
# handle either directed or undirected
if G.is_directed():
Gpred = G.pred
Gsucc = G.succ
else:
Gpred = G.adj
Gsucc = G.adj
# predecesssor and successors in search
pred = {source: None}
succ = {target: None}
# initialize fringes, start with forward
forward_fringe = [source]
reverse_fringe = [target]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
this_level = forward_fringe
forward_fringe = []
for v in this_level:
for w in Gsucc[v]:
if w not in pred:
forward_fringe.append(w)
pred[w] = v
if w in succ: # path found
return pred, succ, w
else:
this_level = reverse_fringe
reverse_fringe = []
for v in this_level:
for w in Gpred[v]:
if w not in succ:
succ[w] = v
reverse_fringe.append(w)
if w in pred: # found path
return pred, succ, w
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
def single_source_shortest_path(G, source, cutoff=None):
"""Compute shortest path between source
and all other nodes reachable from source.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_shortest_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path
"""
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
def join(p1, p2):
return p1 + p2
if cutoff is None:
cutoff = float('inf')
nextlevel = {source: 1} # list of nodes to check at next level
paths = {source: [source]} # paths dictionary (paths to key from source)
return dict(_single_shortest_path(G.adj, nextlevel, paths, cutoff, join))
def _single_shortest_path(adj, firstlevel, paths, cutoff, join):
"""Returns shortest paths
Shortest Path helper function
Parameters
----------
adj : dict
Adjacency dict or view
firstlevel : dict
starting nodes, e.g. {source: 1} or {target: 1}
paths : dict
paths for starting nodes, e.g. {source: [source]}
cutoff : int or float
level at which we stop the process
join : function
function to construct a path from two partial paths. Requires two
list inputs `p1` and `p2`, and returns a list. Usually returns
`p1 + p2` (forward from source) or `p2 + p1` (backward from target)
"""
level = 0 # the current level
nextlevel = firstlevel
while nextlevel and cutoff > level:
thislevel = nextlevel
nextlevel = {}
for v in thislevel:
for w in adj[v]:
if w not in paths:
paths[w] = join(paths[v], [w])
nextlevel[w] = 1
level += 1
return paths
def single_target_shortest_path(G, target, cutoff=None):
"""Compute shortest path to target from all nodes that reach target.
Parameters
----------
G : NetworkX graph
target : node label
Target node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> path = nx.single_target_shortest_path(G, 4)
>>> path[0]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path, single_source_shortest_path
"""
if target not in G:
raise nx.NodeNotFound(f"Target {target} not in G")
def join(p1, p2):
return p2 + p1
# handle undirected graphs
adj = G.pred if G.is_directed() else G.adj
if cutoff is None:
cutoff = float('inf')
nextlevel = {target: 1} # list of nodes to check at next level
paths = {target: [target]} # paths dictionary (paths to key from source)
return dict(_single_shortest_path(adj, nextlevel, paths, cutoff, join))
def all_pairs_shortest_path(G, cutoff=None, parallel=False):
"""Compute shortest paths between all nodes.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_shortest_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
See Also
--------
floyd_warshall()
"""
if parallel:
with Pool() as pool:
for n in G:
yield (n, pool.apply(single_source_shortest_path, (G, n, cutoff)))
else:
for n in G:
yield (n, single_source_shortest_path(G, n, cutoff=cutoff))
def predecessor(G, source, target=None, cutoff=None, return_seen=None):
"""Returns dict of predecessors for the path from source to all nodes in G
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path. If provided only predecessors between
source and target are returned
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred : dictionary
Dictionary, keyed by node, of predecessors in the shortest path.
Examples
--------
>>> G = nx.path_graph(4)
>>> list(G)
[0, 1, 2, 3]
>>> nx.predecessor(G, 0)
{0: [], 1: [0], 2: [1], 3: [2]}
"""
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
level = 0 # the current level
nextlevel = [source] # list of nodes to check at next level
seen = {source: level} # level (number of hops) when seen in BFS
pred = {source: []} # predecessor dictionary
while nextlevel:
level = level + 1
thislevel = nextlevel
nextlevel = []
for v in thislevel:
for w in G[v]:
if w not in seen:
pred[w] = [v]
seen[w] = level
nextlevel.append(w)
elif (seen[w] == level): # add v to predecessor list if it
pred[w].append(v) # is at the correct level
if (cutoff and cutoff <= level):
break
if target is not None:
if return_seen:
if target not in pred:
return ([], -1) # No predecessor
return (pred[target], seen[target])
else:
if target not in pred:
return [] # No predecessor
return pred[target]
else:
if return_seen:
return (pred, seen)
else:
return pred
|
py | 1a55099e5a623d80be165afaa139a076f50d80da | import sqlite3 as sql
import string
import random
import math
def pnr_generator(size=6, chars=string.digits):
return ''.join(random.choice(chars) for x in range(size))
def release_train(train, date, ac, sl):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("INSERT INTO train (train_no, journey_date, ac_coach_no, sl_coach_no, ac_last_filled, sl_last_filled) VALUES (?,?,?,?,?,?)", (train, date, ac, sl, 0, 0))
con.commit()
con.close()
def check_train(train, date):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT ac_coach_no, sl_coach_no, ac_last_filled, sl_last_filled FROM train WHERE train_no = ? AND journey_date = ?",(train, date))
exist = cur.fetchone()
con.close()
return exist
def add_agent(agent_id, agent_name, credit_card, address):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("INSERT INTO agent (id, name, cc_no, address) VALUES (?,?,?,?)", (agent_id, agent_name, credit_card, address))
con.commit()
con.close()
def check_agent(agent_id):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT name FROM agent WHERE id = ? ",(agent_id,))
exist = cur.fetchall()
con.close()
return exist
def release_train_list():
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT train_no, journey_date FROM train ")
trains = cur.fetchall()
con.close()
return trains
def pnr_check(pnr):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT PNR FROM ticket WHERE PNR = ? ",(pnr,))
exist = cur.fetchone()
con.close()
return exist
def add_ticket(pnr, train, date, Passengers, agent_id):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("INSERT INTO ticket (PNR, train_no, journey_date, passenger_no, agent_id) VALUES (?,?,?,?,?)", (pnr, train, date, Passengers, agent_id))
con.commit()
con.close()
def exist_pnr(pnr):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT PNR, train_no, journey_date, passenger_no, agent_id FROM ticket WHERE PNR = ? ",(pnr,))
exist = cur.fetchone()
con.close()
return exist
def exist_passenger(pnr):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT name, coach_type, coach_no, seat_position, seat_no FROM passenger WHERE PNR = ? ",(pnr,))
exist = cur.fetchall()
con.close()
return exist
def fill_seat(train, date, coach_type):
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("SELECT ac_last_filled, sl_last_filled FROM train WHERE train_no = ? AND journey_date = ?",(train, date))
exist = cur.fetchone()
if coach_type == "AC":
cur.execute("UPDATE train SET ac_last_filled = ? WHERE train_no = ? AND journey_date = ?",(exist[0]+1,train, date))
con.commit()
con.close()
return exist[0]+1
else:
cur.execute("UPDATE train SET sl_last_filled = ? WHERE train_no = ? AND journey_date = ?",(exist[1]+1,train, date))
con.commit()
con.close()
return exist[1]+1
def add_passenger(pnr, coach_type, pass_name, given_seat):
if coach_type == "AC":
if (given_seat%18 == 0):
seat_no = 18
else:
seat_no = given_seat%18
if (seat_no%6 == 0):
seat_position = "SU"
if (seat_no%6 == 1 or seat_no%6 == 2):
seat_position = "LB"
if (seat_no%6 == 3 or seat_no%6 == 4):
seat_position = "UB"
if (seat_no%6 == 5):
seat_position = "SL"
coach_no = math.floor((given_seat-1)/18) + 1
else:
if (given_seat%24 == 0):
seat_no = 24
else:
seat_no = given_seat%24
if (seat_no%8 == 0):
seat_position = "SU"
if (seat_no%8 == 1 or seat_no%8 == 4):
seat_position = "LB"
if (seat_no%8 == 3 or seat_no%8 == 6):
seat_position = "UB"
if (seat_no%8 == 2 or seat_no%8 == 5):
seat_position = "MB"
if (seat_no%8 == 7):
seat_position = "SL"
coach_no = math.floor((given_seat-1)/24) + 1
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("INSERT INTO passenger (PNR, coach_type, coach_no, seat_position, seat_no, name) VALUES (?,?,?,?,?,?)", (pnr, coach_type, coach_no, seat_position, seat_no, pass_name))
con.commit()
con.close() |
py | 1a550aa2970bd87a91a0a4dd160645616bfcc7de | x=frozenset({"beautiful","nature","is"})
print(x)
print(type(x)) |
py | 1a550b2db6a3c4a4033cd56b8f1af94b61bea7ff | import requests
from bs4 import BeautifulSoup
from collections import defaultdict
def parseBF():
response = requests.get("https://www.buzzfeed.com/news")
html = BeautifulSoup(response.text, "html.parser")
topics = defaultdict(set)
for link in html.findAll(lambda tag: tag.name=='a' and 'data-bfa' in tag.attrs):
attr = link['data-bfa']
if not 'post_category' in attr: continue
values = attr.split(',')
topic = [v.split(':')[1] for v in values if v.startswith('post_category')]
topic = topic[0]
# print topic, link['href']
topics[topic].add(link['href'])
return topics
topics = parseBF()
for t in topics:
print t
print '\t'+'\n\t'.join(topics[t])
|
py | 1a550b8b9d5d1ece2b0ec6baa4174f8ef9e01aba | # -*- coding: utf-8 -*-
"""
Created on Wed May 20 12:30:52 2020
@author: nastavirs
"""
import tensorflow as tf
import numpy as np
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases |
py | 1a550c9f6b191b6847502b2e17cb2fa245dff07b | """[Default values]
What happens at run time...
When modules are loaded: All the code is executed immediately.
Module Code
a = 10 the interger object 10 is created and a references it.
def func(a): the function object is created, and func references it.
print(a)
func(a) the function is executed
What about default values?
Module code
def func(a=10): the function object is created, and func references it
print(a) the integer object 10 is evaluated/created and is assigned as the default value for a
func() the function is executed
by the time this happens, the default value for a has already been evaluated and assigned - it is not re-evaluated when the function is called
So what?
Consider this:
We want to create a function that will write a log entry to the console with a user-specified event date/time. If the user does not supply a date/time, we want to set it to the current date/time.
from datetime import datetime
def log(msg, *, dt=datetime.utcnow()):
print('{0}: {1}'.format(dt, msg))
log('message 1') -> 2017-08-21 20:54:37.706994 : message 1
a few minutes later
log('message 2') -> 2017-08-21 20:54:37.706994 : message 3 ## note the same time is shown above.
Solution need to show current time it was executed
""" |
py | 1a550d6f7204d397c824860ac131731fa3cba790 | # List of known catalogs ("magic values" for catalog ID)
KNOWN_CATALOGS = {
"demo": {
"server": "",
"catalog_id": ""
},
"prod": {
"server": "",
"catalog_id": ""
},
"stage": {
"server": "",
"catalog_id": ""
},
"dev": {
"server": "",
"catalog_id": ""
}
}
|
py | 1a550d871fe2b3311397c0d7dee3fd9114f01fb5 | import glob, imp, os
IPHONE_UA = "Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1"
def discover_drivers():
cdir = os.path.dirname(os.path.realpath(__file__))
drivers = list(filter(lambda p: not os.path.basename(p).startswith('_'), glob.glob(os.path.join(cdir, '*.py'))))
return dict([(os.path.basename(os.path.splitext(driver)[0]), driver) for driver in drivers])
def get_driver_by_name(name):
for driver_name, driver_path in discover_drivers().items():
if driver_name == name:
return imp.load_source(driver_name, driver_path)
raise ModuleNotFoundError("%s driver not found", name)
|
py | 1a550dfc8c453fbda7ce2fcf64a39d1b94f7e24f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 the HERA Project
# Licensed under the MIT License
from hera_qm import utils
from hera_qm.auto_metrics import auto_metrics_run
import sys
ap = utils.get_metrics_ArgumentParser('auto_metrics')
args = ap.parse_args()
history = ' '.join(sys.argv)
auto_metrics_run(args.metric_outfile,
args.raw_auto_files,
median_round_modz_cut=args.median_round_modz_cut,
mean_round_modz_cut=args.mean_round_modz_cut,
edge_cut=args.edge_cut,
Kt=args.Kt,
Kf=args.Kf,
sig_init=args.sig_init,
sig_adj=args.sig_adj,
chan_thresh_frac=args.chan_thresh_frac,
history=history,
overwrite=args.clobber)
|
py | 1a550ed5d93bee9bfae3908bd2ffa5a5a8c7830a | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class ListClusterMembersRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'ListClusterMembers','Edas')
self.set_uri_pattern('/pop/v5/resource/cluster_member_list')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_EcsList(self):
return self.get_query_params().get('EcsList')
def set_EcsList(self,EcsList):
self.add_query_param('EcsList',EcsList) |
py | 1a550f338065214a5625283d1ea0bc348f1499f6 | c = get_config()
#Export all the notebooks in the current directory to the sphinx_howto format.
c.NbConvertApp.notebooks = ['*.ipynb']
c.NbConvertApp.export_format = 'latex'
c.NbConvertApp.postprocessor_class = 'PDF'
c.Exporter.template_file = 'custom_article.tplx'
|
py | 1a550f791741b5b091161fa7929403876103341c | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-24 08:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdata_app', '0038_remove_esgfdataset_drs_id'),
]
operations = [
migrations.AlterField(
model_name='esgfdataset',
name='status',
field=models.CharField(choices=[(b'CREATED', b'CREATED'), (b'SUBMITTED', b'SUBMITTED'), (b'AT_CEDA', b'AT_CEDA'), (b'PUBLISHED', b'PUBLISHED'), (b'REJECTED', b'REJECTED'), (b'NEEDS_FIX', b'NEEDS_FIX'), (b'FILES_MISSING', b'FILES_MISSING'), (b'NOT_ON_DISK', b'NOT_ON_DISK')], default=b'CREATED', max_length=20, verbose_name=b'Status'),
),
]
|
py | 1a550fd2b3a44a0a3a21f936a1f28e0f6284df01 | # coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailchimp_marketing_asyncio.api_client import ApiClient
class ActivityFeedApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client):
self.api_client = api_client
def get_chimp_chatter(self, **kwargs): # noqa: E501
"""Get latest chimp chatter # noqa: E501
Return the Chimp Chatter for this account ordered by most recent. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_chimp_chatter(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_chimp_chatter_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_chimp_chatter_with_http_info(**kwargs) # noqa: E501
return data
def get_chimp_chatter_with_http_info(self, **kwargs): # noqa: E501
"""Get latest chimp chatter # noqa: E501
Return the Chimp Chatter for this account ordered by most recent. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_chimp_chatter_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['count', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_chimp_chatter" % key
)
params[key] = val
del params['kwargs']
if 'count' in params and params['count'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `count` when calling ``, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/activity-feed/chimp-chatter', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 1a5510fc5b8a12d378f6a63c4c2dc94d653cbf74 | from fastapi import APIRouter
from app.api.routes import categories, users
router = APIRouter()
router.include_router(categories.router, tags=["categories"], prefix="/categories")
router.include_router(users.router, tags=["users"], prefix="/users")
|
py | 1a55111b7542e4eb14967a63053a86ffc17cecfb | #import dependencies
import os
import csv
#declare csv file path
data = os.path.join("..", "Resources", "budget_data.csv")
#read csv file
with open(data, newline="") as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvfile)
#determine total months and net amount of profit/loss
months = []
profit_loss = []
for rows in csv_reader:
months.append(rows[0])
profit_loss.append(int(rows[1]))
total_months = len(months)
total_pl = sum(profit_loss)
#determine change in profit/losses to determine average, min, max changes
pl_change = []
for x in range(1, len(profit_loss)):
pl_change.append(int(profit_loss[x]-int(profit_loss[x-1])))
pl_average = sum(pl_change) / len(pl_change)
greatest_increase = max(pl_change)
greatest_decrease = min(pl_change)
#print results
print("Financial Analysis")
print("_____________________________")
print("Total Months: " + str(total_months))
print("Total: " + "$" + str(total_pl))
print("Average Change: " + "$" + str(pl_average))
print("Greatest Increase In Profit: " + "$" + str(greatest_increase))
print("Greatest Decrease In Profit: " + "$" + str(greatest_decrease))
#export results to text file
file = open("analysis.txt", "w")
file.write("Financial Analysis" + "\n")
file.write("_____________________________" + "\n")
file.write("Total Months: " + str(total_months) + "\n")
file.write("Total: " + "$" + str(total_pl) + "\n")
file.write("Average Change: " + "$" + str(pl_average) + "\n")
file.write("Greatest Increase In Profit: " + "$" + str(greatest_increase) + "\n")
file.write("Greatest Decrease In Profit: " + "$" + str(greatest_decrease) + "\n")
|
py | 1a551124d4299b8a34952a6167a5f15670200ad1 | from django.apps import apps
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .models import Mail, MailTemplate, Attachment, TemplateAttachment
from .tasks import send_asynchronous_mail
from .utils import create_and_send_mail
from django.core.exceptions import ImproperlyConfigured
class TemplateAttachmentInline(admin.TabularInline):
model = TemplateAttachment
@admin.register(Attachment)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
ordering = ('-time_created',)
search_fields = ('name', )
readonly_fields = ('time_created', )
@admin.register(MailTemplate)
class MailTemplateAdmin(admin.ModelAdmin):
def test_mail_template(self, request, queryset):
mails_sent = 0
if not settings.DJANGO_SAAS_TEST_EMAIL_ADDRESS:
raise ImproperlyConfigured(
"You need to add [email protected] to test emails.")
for template in queryset:
create_and_send_mail(
template_name=template.name, context={}, to_address=settings.DJANGO_SAAS_TEST_EMAIL_ADDRESS)
mails_sent += 1
if mails_sent == 1:
message_bit = _("1 Mail template was")
else:
message_bit = _("%s Mail templates were") % mails_sent
self.message_user(request, "%s tested" % message_bit)
test_mail_template.short_description = "Send test mail now"
list_display = ('name', 'subject')
search_fields = []
ordering = ('name',)
actions = [test_mail_template, ]
inlines = [TemplateAttachmentInline, ]
model_class_name = getattr(settings, "DJANGO_SAAS_EMAIL_MAIL_MODEL", "django_saas_email.mail")
model_class = apps.get_model(*model_class_name.split())
@admin.register(model_class)
class MailAdmin(admin.ModelAdmin):
def send_mail_now(self, request, queryset):
mails_sent = 0
for mail in queryset:
send_asynchronous_mail(str(mail.id), settings.USE_SENDGRID)
mails_sent += 1
if mails_sent == 1:
message_bit = _("1 Mail was")
else:
message_bit = _("%s Mails were") % mails_sent
self.message_user(request, "%s sent" % message_bit)
send_mail_now.short_description = "Send mail now"
list_display = ('id', 'time_created', 'from_address', 'to_address', 'cc_address', 'template', 'subject', 'context',)
search_fields = ['from_address', 'to_address', 'cc_address', 'subject', 'context', ]
ordering = ('-time_created',)
list_filter = ('time_created', 'template')
actions = [send_mail_now, ]
readonly_fields = (
'time_created', 'time_sent', 'time_delivered', 'used_backend', 'delivery_mail_id', 'delivery_status')
|
py | 1a55130ea2fa4e47ee4bbfb396fc5e891c8277cf | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# verify valid dir(s) and that every item in list is Path object
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if dir.exists():
continue
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
|
py | 1a5513ce28618a5a8c8fa748707592ed99c7a795 | """
Module of functions involving great circles
(thus assuming spheroid model of the earth)
with points given in longitudes and latitudes.
"""
from __future__ import print_function
import math
import numpy
import numpy.random
# Equatorial radius of the earth in kilometers
EARTH_ER = 6378.137
# Authalic radius of the earth in kilometers
EARTH_AR = 6371.007
# Meridional radius of the earth in kilometers
EARTH_MR = 6367.449
# Polar radius of the earth in kilometers
EARTH_PR = 6356.752
DEG2RAD = math.pi / 180.0
RAD2DEG = 180.0 / math.pi
KM2MI = 0.6213712
MI2KM = 1.609344
def lonlatdistance(pt1lon, pt1lat, pt2lon, pt2lat):
"""
Compute the great circle distance between two points
on a sphere using the haversine formula.
Arguments:
pt1lon - longitude(s) of the first point
pt1lat - latitude(s) of the first point
pt2lon - longitude(s) of the second point
pt2lat - latitude(s) of the second point
Returns:
The great circle distance(s) in degrees [0.0, 180.0]
"""
lon1 = numpy.deg2rad(numpy.asarray(pt1lon, dtype=float))
lat1 = numpy.deg2rad(numpy.asarray(pt1lat, dtype=float))
lon2 = numpy.deg2rad(numpy.asarray(pt2lon, dtype=float))
lat2 = numpy.deg2rad(numpy.asarray(pt2lat, dtype=float))
dellat = numpy.power(numpy.sin(0.5 * (lat2 - lat1)), 2.0)
dellon = numpy.cos(lat1) * numpy.cos(lat2) * \
numpy.power(numpy.sin(0.5 * (lon2 - lon1)), 2.0)
dist = 2.0 * numpy.arcsin(numpy.power(dellon + dellat, 0.5))
return numpy.rad2deg(dist)
def lonlatintersect(gc1lon1, gc1lat1, gc1lon2, gc1lat2,
gc2lon1, gc2lat1, gc2lon2, gc2lat2):
"""
Compute the intersections of two great circles. Uses the line of
intersection between the two planes of the great circles.
Arguments:
gc1lon1 - longitude(s) of the first point on the first great circle
gc1lat1 - latitude(s) of the first point on the first great circle
gc1lon2 - longitude(s) of the second point on the first great circle
gc1lat2 - latitude(s) of the second point on the first great circle
gc2lon1 - longitude(s) of the first point on the second great circle
gc2lat1 - latitude(s) of the first point on the second great circle
gc2lon2 - longitude(s) of the second point on the second great circle
gc2lat2 - latitude(s) of the second point on the second great circle
Returns:
( (pt1lon, pt1lat), (pt2lon, pt2lat) ) - the longitudes and latitudes
of the two intersections of the two great circles. NaN will
be returned for both longitudes and latitudes if a great
circle is not well-defined, or the two great-circles coincide.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz2 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz2 = numpy.array([gcx, gcy, gcz])
# Get the unit-perpendicular to the plane going through the
# origin and the two points on each great circle. If the
# norm of the cross product is too small, the great circle
# is not well-defined, so zero it out so NaN is produced.
gc1pp = numpy.cross(gc1xyz1, gc1xyz2, axis=0)
norm = (gc1pp[0]**2 + gc1pp[1]**2 + gc1pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc1pp /= norm
gc2pp = numpy.cross(gc2xyz1, gc2xyz2, axis=0)
norm = (gc2pp[0]**2 + gc2pp[1]**2 + gc2pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc2pp /= norm
# The line of intersection of the two planes is perpendicular
# to the two plane-perpendiculars and goes through the origin.
# Points of intersection are the points on this line one unit
# from the origin. If the norm of the cross product is too
# small, the two planes are practically indistinguishable from
# each other (coincide).
pt1xyz = numpy.cross(gc1pp, gc2pp, axis=0)
norm = (pt1xyz[0]**2 + pt1xyz[1]**2 + pt1xyz[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
pt1xyz /= norm
pt2xyz = -1.0 * pt1xyz
# Convert back to longitudes and latitudes
pt1lats = numpy.rad2deg(numpy.arcsin(pt1xyz[2]))
pt1lons = numpy.rad2deg(numpy.arctan2(pt1xyz[1], pt1xyz[0]))
pt2lats = numpy.rad2deg(numpy.arcsin(pt2xyz[2]))
pt2lons = numpy.rad2deg(numpy.arctan2(pt2xyz[1], pt2xyz[0]))
return ( (pt1lons, pt1lats), (pt2lons, pt2lats) )
def lonlatfwdpt(origlon, origlat, endlon, endlat, fwdfact):
"""
Find the longitude and latitude of a point that is a given factor
times the distance along the great circle from an origination point
to an ending point.
Note that the shorter great circle arc from the origination point
to the ending point is always used.
If O is the origination point, E is the ending point, and P is
the point returned from this computation, a factor value of:
0.5: P bisects the great circle arc between O and E
2.0: E bisects the great circle arc between O and P
-1.0: O bisects the great circle arc between P and E
Arguments:
origlon - longitude(s) of the origination point
origlat - latitude(s) of the origination point
endlon - longitude(s) of the ending point
endlat - latitude(s) of the ending point
fwdfact - forward distance factor(s)
Returns:
(ptlon, ptlat) - longitude and latitude of the computed point(s).
NaN will be returned for both the longitude and
latitude if the great circle is not well-defined.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(origlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(origlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
origxyz = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(endlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(endlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
endxyz = numpy.array([gcx, gcy, gcz])
# Determine the rotation matrix about the origin that takes
# origxyz to (1,0,0) (equator and prime meridian) and endxyz
# to (x,y,0) with y > 0 (equator in eastern hemisphere).
#
# The first row of the matrix is origxyz.
#
# The third row of the matrix is the normalized cross product
# of origxyz and endxyz. (The great circle plane perpendicular.)
# If the norm of this cross product is too small, the great
# circle is not well-defined, so zero it out so NaN is produced.
gcpp = numpy.cross(origxyz, endxyz, axis=0)
norm = (gcpp[0]**2 + gcpp[1]**2 + gcpp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gcpp /= norm
# The second row of the matrix is the cross product of the
# third row (gcpp) and the first row (origxyz). This will
# have norm 1.0 since gcpp and origxyz are perpendicular
# unit vectors.
fwdax = numpy.cross(gcpp, origxyz, axis=0)
# Get the coordinates of the rotated end point.
endtrx = origxyz[0] * endxyz[0] + origxyz[1] * endxyz[1] + origxyz[2] * endxyz[2]
endtry = fwdax[0] * endxyz[0] + fwdax[1] * endxyz[1] + fwdax[2] * endxyz[2]
# Get the angle along the equator of the rotated end point, multiply
# by the given factor, and convert this new angle back to coordinates.
fwdang = numpy.arctan2(endtry, endtrx)
fwdang *= numpy.asarray(fwdfact, dtype=float)
fwdtrx = numpy.cos(fwdang)
fwdtry = numpy.sin(fwdang)
# Rotate the new point back to the original coordinate system
# The inverse rotation matrix is the transpose of that matrix.
fwdx = origxyz[0] * fwdtrx + fwdax[0] * fwdtry
fwdy = origxyz[1] * fwdtrx + fwdax[1] * fwdtry
fwdz = origxyz[2] * fwdtrx + fwdax[2] * fwdtry
# Convert the point coordinates into longitudes and latitudes
ptlat = numpy.rad2deg(numpy.arcsin(fwdz))
ptlon = numpy.rad2deg(numpy.arctan2(fwdy, fwdx))
return (ptlon, ptlat)
def equidistscatter(min_lon, min_lat, max_lon, max_lat, min_gcdist, dfactor=5.0):
"""
Create a roughly equidistant set of points in a specified region.
This is done by creating a dense "grid" of points, then repeatedly
randomly selecting a point from that collection and eliminating
points too close to that selected point. For the special cases
where min_lon and max_lon, or min_lat and max_lat, are very close
relative to min_gcdist, the maximum number of evenly spaced points
that can be put on the line described is computed and assigned.
Arguments:
min_lon - minimum longitude of the region
min_lat - minimum latitude of the region
max_lon - maximum longitude of the region
max_lat - maximum latitude of the region
min_gcdist - minimum distance, in great circle degrees,
between returned points
dfactor - the number of axis points in the dense "grid"
compared to the desired "grid". Larger value will
generally increase the uniformity of the returned
points but will also increase the time required
for the calculation.
Returns:
(pt_lons, pt_lats) - ptlons is an array of longitudes and ptlats
is an array of latitudes of (somewhat random) points in
the specified region that are roughly equidistant from
each other but not closer than min_gcdist to each other.
"""
lonmin = float(min_lon)
lonmax = float(max_lon)
if math.fabs(lonmax - lonmin) > 180.0:
raise ValueError("Difference between max_lon and min_lon is more than 180.0")
latmin = float(min_lat)
if math.fabs(latmin) > 90.0:
raise ValueError("min_lat is not in [-90.0,90.0]")
latmax = float(max_lat)
if math.fabs(latmax) > 90.0:
raise ValueError("max_lat is not in [-90.0,90.0]")
mindeg = float(min_gcdist)
if (mindeg <= 0.0) or (mindeg >= 90.0):
raise ValueError("min_gcdist is not in (0.0,90.0)")
dfact = float(dfactor)
if dfact < 1.0:
raise ValueError("dfactor is less than one");
# If lonmin is relatively close to lonmax, directly
# compute the points. Distance on a meridian is the
# difference in latitudes.
if math.fabs(lonmax - lonmin) < (0.05 * mindeg):
lon = 0.5 * (lonmax + lonmin)
dellat = mindeg
numlats = int( (math.fabs(latmax - latmin) + dellat) / dellat )
if latmax < latmin:
dellat *= -1.0
hdiff = 0.5 * ( (latmax - latmin) - (numlats - 1) * dellat )
latvals = numpy.linspace(latmin + hdiff, latmax - hdiff, numlats)
lonvals = numpy.ones((numlats,), dtype=float) * lon
return (lonvals, latvals)
# If latmin is relatively close to latmax, directly
# compute the points. Distance depends on the latitude
# as well as the difference in longitudes.
if math.fabs(latmax - latmin) < (0.05 * mindeg):
lat = 0.5 * (latmax + latmin)
numer = math.sin(0.5 * DEG2RAD * mindeg)
denom = math.cos(lat * DEG2RAD)
if numer < denom:
dellon = math.asin(numer / denom) * 2.0 * RAD2DEG
numlons = int( (math.fabs(lonmax - lonmin) + dellon) / dellon )
else:
# everything too close to a pole - just select one point
dellon = 180.0
numlons = 1
if lonmax < lonmin:
dellon *= -1.0
hdiff = 0.5 * ( (lonmax - lonmin) - (numlons - 1) * dellon )
lonvals = numpy.linspace(lonmin + hdiff, lonmax - hdiff, numlons)
latvals = numpy.ones((numlons,), dtype=float) * lat
return (lonvals, latvals)
# Get the number of latitudes for the dense grid
# Always use latmin and latmax, even if they are too close
dellat = mindeg / dfact
numlats = int( (math.fabs(latmax - latmin) + dellat) / dellat )
if numlats < 2:
numlats = 2
latvals = numpy.linspace(latmin, latmax, numlats)
# Create the dense grid of longitudes and latitudes
denslons = [ ]
denslats = [ ]
numer = math.sin(0.5 * DEG2RAD * mindeg / dfact)
for lat in latvals:
# Get the number of longitudes for the dense grid
# Always use lonmin and lonmax, even if they are too close
denom = math.cos(lat * DEG2RAD)
if numer < denom:
dellon = math.asin(numer / denom) * 2.0 * RAD2DEG
numlons = int( (math.fabs(lonmax - lonmin) + dellon) / dellon )
if numlons < 2:
numlons = 2
else:
# too close to a pole
numlons = 2
lonvals = numpy.linspace(lonmin, lonmax, numlons)
# Add each lon,lat pair to the dense grid
for lon in lonvals:
denslons.append(lon)
denslats.append(lat)
denslons = numpy.asarray(denslons)
denslats = numpy.asarray(denslats)
# create a random permutation of the indices to use for the selection order
availinds = numpy.random.permutation(len(denslats))
selectinds = [ ]
while len(availinds) > 0:
# Get the index of the next available point
ind = availinds[0]
selectinds.append(ind)
# Compute the distance of the available points to the selected point
gcdists = lonlatdistance(denslons[ind], denslats[ind],
denslons[availinds], denslats[availinds])
# Remove indices of any available points too close to this point
availinds = availinds[ gcdists >= mindeg ]
# sort the selected indices so the longitudes and latitudes have some order
selectinds = numpy.sort(selectinds)
# get the selected longitudes and latitudes
selectlons = denslons[selectinds]
selectlats = denslats[selectinds]
# return the selected longitudes and latitudes arrays
return (selectlons, selectlats)
#
# The following is just for testing "by-hand" and to serve as examples.
#
if __name__ == "__main__":
# Test lonlatdistance
tenten = numpy.linspace(0.0,90.0,10)
# On the equator, distance = delta longitude
dists = lonlatdistance(0.0, 0.0, tenten, 0.0)
if not numpy.allclose(dists, tenten):
raise ValueError("Equatorial distances FAIL; expect: %s; found: %s" % (str(tenten), str(dists)))
print("Equatorial distance: PASS")
print()
# On any meridian, distance = delta latitude
dists = lonlatdistance(20.0, 0.0, 20.0, tenten)
if not numpy.allclose(dists, tenten):
raise ValueError("Meridional distances FAIL; expect: %s; found: %s" % (str(tenten), str(dists)))
print("Meridional distance: PASS")
print()
# Play with some distances between cities (deg W, deg N)
seattle = (122.0 + (20.0 / 60.0), 47.0 + (37.0 / 60.0))
portland = (122.0 + (41.0 / 60.0), 45.0 + (31.0 / 60.0))
spokane = (117.0 + (26.0 / 60.0), 47.0 + (40.0 / 60.0))
austin = ( 97.0 + (45.0 / 60.0), 30.0 + (15.0 / 60.0))
houston = ( 95.0 + (23.0 / 60.0), 29.0 + (46.0 / 60.0))
dallas = ( 96.0 + (48.0 / 60.0), 32.0 + (47.0 / 60.0))
lons = ( seattle[0], portland[0], spokane[0] )
lons1, lons2 = numpy.meshgrid(lons, lons)
lats = ( seattle[1], portland[1], spokane[1] )
lats1, lats2 = numpy.meshgrid(lats, lats)
dists = lonlatdistance(lons1, lats1, lons2, lats2)
dists *= DEG2RAD * EARTH_MR * KM2MI
expected = [ [ 0, 146, 228 ],
[ 146, 0, 290 ],
[ 228, 290, 0 ] ]
if not numpy.allclose(dists, expected, rtol=0.01):
raise ValueError("Seattle, Portland, Spokane distance matrix in miles\n" \
" expect: %s\n"
" found: %s" % (str(expected), str(dists)))
print("Seattle, Portland, Spokane distance matrix: PASS")
print()
lons = ( austin[0], houston[0], dallas[0] )
lons1, lons2 = numpy.meshgrid(lons, lons)
lats = ( austin[1], houston[1], dallas[1] )
lats1, lats2 = numpy.meshgrid(lats, lats)
dists = lonlatdistance(lons1, lats1, lons2, lats2)
dists *= DEG2RAD * EARTH_MR * KM2MI
expected = [ [ 0, 145, 184 ],
[ 145, 0, 224 ],
[ 184, 224, 0 ] ]
if not numpy.allclose(dists, expected, rtol=0.01):
raise ValueError("Austin, Houston, Dallas distance matrix in miles\n" \
" expect: %s\n"
" found: %s" % (str(expected), str(dists)))
print("Austin, Houston, Dallas distance matrix: PASS")
print()
# Test lonlatintersect
# Intersections of the equator with meridians
((pt1lons, pt1lats), (pt2lons, pt2lats)) = \
lonlatintersect(0.0, 0.0, tenten, 0.0, \
0.0, -90.0, tenten, tenten)
# First of the first great circle and last of the second great circle are not well-defined
expvalid = numpy.array([ True ] + ([ False ]*8) + [ True ])
validity = numpy.isnan(pt1lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt1lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(pt1lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt1lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(pt2lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt2lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(pt2lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt2lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
if not numpy.allclose(pt1lons[1:-1], tenten[1:-1]):
raise ValueError("Valid pt1lons: expect: %s, found: %s" %\
(str(tenten[1:-1]), str(pt1lons[1:-1])))
if not numpy.allclose(pt1lats[1:-1], 0.0):
raise ValueError("Valid pt1lats: expect: all zeros, found: %s" %\
str(pt1lats[1:-1]))
if not numpy.allclose(pt2lons[1:-1], tenten[1:-1]-180.0):
raise ValueError("Valid pt2lons: expect: %s, found %s" %\
(str(tenten[1:-1]-180.0), str(pt2lons[1:-1])))
if not numpy.allclose(pt2lats[1:-1], 0.0):
raise ValueError("Valid pt2lats: expect: all zeros, found %s" %\
str(pt2lats[1:-1]))
print("Equator/meridian intersections: PASS")
print()
((pt1lons, pt1lats), (pt2lons, pt2lats)) = \
lonlatintersect( 0.0, 89.99, 180.0, 89.99,
90.0, 89.99, -90.0, 89.99)
# longitudes could actually be anything, but this algorithm gives 45.0 and -135.0
if (abs(pt1lons - 45.0) > 1.0E-8) or (abs(pt1lats - 90.0) > 1.0E-8) or \
(abs(pt2lons + 135.0) > 1.0E-8) or (abs(pt2lats + 90.0) > 1.0E-8):
raise ValueError("Mini north pole cross intersections: expect: %s, found %s" % \
(str([45.0, 90.0, 135.0, -90.0]),
str([float(pt1lons), float(pt1lats),
float(pt2lons), float(pt2lats)])))
print("Mini north pole cross intersections: PASS")
print()
# Test lonlatfwdpt
lons, lats = lonlatfwdpt(portland[0], portland[1], spokane[0], spokane[1], 0.0)
if not ( numpy.allclose(lons, portland[0]) and numpy.allclose(lats, portland[1]) ):
raise ValueError("Zero forward from portland to spokane: expect %s, found %s" % \
(str(portland), str((lons, lats))))
print("Zero forward: PASS")
print()
lons, lats = lonlatfwdpt(portland[0], portland[1], spokane[0], spokane[1], 1.0)
if not ( numpy.allclose(lons, spokane[0]) and numpy.allclose(lats, spokane[1]) ):
raise ValueError("One forward from portland to spokane: expect %s, found %s" % \
(str(spokane), str((lons, lats))))
print("One forward: PASS")
print()
lons, lats = lonlatfwdpt(0.0, 0.0, tenten, 0.0, 3.0)
expectlons = 3.0 * tenten
expectlons[ expectlons > 180.0 ] -= 360.0
# The first great circle is not well-defined
expvalid = numpy.array([ True ] + ([ False ]*9))
validity = numpy.isnan(lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd equator lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd equator lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
if not numpy.allclose(lons[1:], expectlons[1:]):
raise ValueError("Valid fwd equator lons: expect: %s, found: %s" %\
(str(expectlons[1:]), str(lons[1:])))
if not numpy.allclose(lats[1:], 0.0):
raise ValueError("Valid fwd equator lats: expect: all zeros, found: %s" %\
str(lats[1:]))
print("Fwd equator: PASS")
print()
lons, lats = lonlatfwdpt(0.0, -90.0, 0.0, tenten, 2.0)
# First longitude could be anything, but this algorithm gives 0.0
expectlats = 90.0 - 2.0 * tenten
# The last great circle is not well-defined
expvalid = numpy.array(([ False ]*9) + [ True ])
validity = numpy.isnan(lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd prime meridian lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd prime meridian lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
# First longitude could be anything so ignore it
# Others should be either 180 == -180
poslons = lons[1:-1]
poslons[(poslons < 0.0)] += 360.0
if not numpy.allclose(poslons, 180.0):
raise ValueError("Valid fwd prime meridian lons: expect: all 180.0 or -180.0, found: %s" %\
str(poslons))
if not numpy.allclose(lats[:-1], expectlats[:-1]):
raise ValueError("Valid fwd prime meridian lats: expect: %s, found: %s" %\
(str(expectlats[:-1]), str(lats[:-1])))
print("Fwd prime meridian: PASS")
print()
lons, lats = lonlatfwdpt(0.0, 0.0, 45.0, 45.0, (2.0, 3.0, 4.0, 5.0))
expectlons = [ 135.0, 180.0, -135.0, -45.0 ]
expectlats = [ 45.0, 0.0, -45.0, -45.0 ]
if not numpy.allclose(lons, expectlons):
raise ValueError("Fwd diagonal lons: expect: %s, found: %s" %\
(str(expectlons), str(lons)))
if not numpy.allclose(lats, expectlats):
raise ValueError("Fwd diagonal lats: expect: %s, found: %s" %\
(str(expectlats), str(lats)))
print("Fwd diagonal: PASS")
print()
# Test equdistscatter
lons, lats = equidistscatter(0.0, 0.0, 0.0, 0.0, 1.0)
if (lons.shape != (1,)) or (lons[0] != 0.0) or \
(lats.shape != (1,)) or (lats[0] != 0.0):
raise ValueError("Equidistscatter single-point FAIL; \n" \
" expect: ([0.0],[0.0]), \n" \
" found (%s,%s)" % (str(lons), str(lats)))
print("Equidistscatter single-point PASS")
print()
lons, lats = equidistscatter(0.0, 90.0, 90.0, 90.0, 1.0)
if (lons.shape != (1,)) or (lons[0] != 45.0) or \
(lats.shape != (1,)) or (lats[0] != 90.0):
raise ValueError("Equidistscatter pole-point FAIL; \n" \
" expect: ([45.0],[90.0]), \n" \
" found (%s,%s)" % (str(lons), str(lats)))
print("Equidistscatter pole-point PASS")
print()
lons, lats = equidistscatter(0.0, 0.0, 90.0, 0.0, 1.0)
if not numpy.all( lats == 0.0 ):
raise ValueError("Equidistscatter equatorial FAIL; \n" \
" expect: all zero latitudes, \n" \
" found %s" % str(lats))
deltas = lons[1:] - lons[:-1]
if not numpy.all( deltas >= 1.0 ):
raise ValueError("Equidistscatter equatorial FAIL; \n" \
" expect: longitudes monotonic increasing by at least 1.0 degrees, \n" \
" found %s" % str(lons))
if not numpy.all( deltas < 1.0001 ):
raise ValueError("Equidistscatter equatorial FAIL; \n" \
" expect: longitudes monotonic increasing by less than 1.0001 degrees, \n" \
" found %s" % str(lons))
print("Equidistscatter equatorial PASS")
print()
lons, lats = equidistscatter(0.0, 0.0, 0.0, 90.0, 1.0)
if not numpy.all( lons == 0.0 ):
raise ValueError("Equidistscatter meridional FAIL; \n" \
" expect: all zero longitudes, \n" \
" found %s" % str(lons))
deltas = lats[1:] - lats[:-1]
if not numpy.all( deltas >= 1.0 ):
raise ValueError("Equidistscatter meridional FAIL; \n" \
" expect: latitudes monotonic increasing by at least 1.0 degrees, \n" \
" found %s" % str(lats))
if not numpy.all( deltas < 1.0001 ):
raise ValueError("Equidistscatter meridional FAIL; \n" \
" expect: latitudes monotonic increasing by less than 1.0001 degrees, \n" \
" found %s" % str(lats))
print("Equidistscatter meridional PASS")
print()
lons, lats = equidistscatter(0.0, 0.0, 90.0, 90.0, 5.0, 15.0)
nndists = [ ]
for j in range(len(lons)):
gcdists = lonlatdistance(lons[j], lats[j], lons, lats)
gcdists[j] = 180.0
if not numpy.all( gcdists >= 5.0 ):
raise ValueError("Equidistscatter region FAIL; \n" \
" expect distances[%d] >= 2.0, \n" \
" found %s" % (j, str(gcdists)))
nndists.append(gcdists.min())
nndists = numpy.array(nndists)
if not numpy.all( nndists < 10.0 ):
raise ValueError("Equidistscatter region FAIL; \n" \
" expect nearest neighbor distances < 10.0, \n" \
" found %s" % str(nndists))
print("Nearest neighbor distances: \n" \
" min = %f, max = %f, mean = %f, stdev = %f" % \
(nndists.min(), nndists.max(), nndists.mean(), nndists.std()))
print("Equidistscatter region PASS")
print()
|
py | 1a5513e55ef9956fc9ad265a47e88fa2fef333cc | from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from proso.django.models import disable_for_loaddata
from proso_flashcards.models import Term, Context
class ExtendedTerm(Term):
extra_info = models.TextField()
def to_json(self, nested=False):
json = Term.to_json(self, nested)
json["extra-info"] = self.extra_info
return json
@staticmethod
def load_data(data, term):
if 'extra-info' in data:
term.extra_info = data["extra-info"]
def dump_data(self, term):
if self.extra_info:
term["extra-info"] = self.extra_info
class ExtendedContext(Context):
extra_info = models.TextField()
def to_json(self, **kwargs):
json = Context.to_json(self, **kwargs)
return json
@staticmethod
def load_data(data, context):
if 'extra-info' in data:
context.extra_info = data["extra-info"]
def dump_data(self, context):
if self.extra_info:
context["extra-info"] = self.extra_info
settings.PROSO_FLASHCARDS["term_extension"] = ExtendedTerm
settings.PROSO_FLASHCARDS["context_extension"] = ExtendedContext
@receiver(pre_save, sender=ExtendedTerm)
@receiver(pre_save, sender=ExtendedContext)
@disable_for_loaddata
def create_items(sender, instance, **kwargs):
pre_save.send(sender=sender.__bases__[0], instance=instance)
|
py | 1a5514297f86ccd1c40d4bcd754d4546c025b299 | import pymysql
import sqlalchemy
from sqlalchemy import Column, DateTime, Integer, create_engine, func
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import sessionmaker
from settings import DB_LINK
pymysql.install_as_MySQLdb()
class BaseModelClass:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower() + "s"
__mapper_args__ = {"always_refresh": True}
id = Column(Integer, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), server_onupdate=func.now())
engine: sqlalchemy.engine.base.Engine = create_engine(
DB_LINK, pool_recycle=3600,
)
session: sqlalchemy.orm.session.sessionmaker = sessionmaker(
autocommit=False, autoflush=False, bind=engine,
expire_on_commit=False
)
base = declarative_base(cls=BaseModelClass)
|
py | 1a55142cc4794f2ebdf850ea5f21a3f92ae9bede | # https://github.com/wolny/pytorch-3dunet/tree/master/pytorch3dunet/unet3d
import argparse
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import os
import random
import torch.utils.data
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
import importlib
import torch.nn as nn
from model.unet3d.buildingblocks import Encoder, Decoder, DoubleConv, ExtResNetBlock
from model.unet3d.utils import number_of_features_per_level
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
f_maps (int, tuple): if int: number of feature maps in the first conv layer of the encoder (default: 64);
if tuple: number of feature maps at each level
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
pool_kernel_size (int or tuple): the size of the window
conv_padding (int or tuple): add zero-padding added to all three sides of the input
"""
def __init__(self, in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, testing=False,
conv_kernel_size=3, pool_kernel_size=2, conv_padding=1, **kwargs):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
# create encoder path consisting of Encoder modules. Depth of the encoder is equal to `len(f_maps)`
encoders = []
for i, out_feature_num in enumerate(f_maps):
if i == 0:
encoder = Encoder(in_channels, out_feature_num,
apply_pooling=False, # skip pooling in the firs encoder
basic_module=basic_module,
conv_layer_order=layer_order,
conv_kernel_size=conv_kernel_size,
num_groups=num_groups,
padding=conv_padding)
else:
# TODO: adapt for anisotropy in the data, i.e. use proper pooling kernel to make the data isotropic after 1-2 pooling operations
encoder = Encoder(f_maps[i - 1], out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
conv_kernel_size=conv_kernel_size,
num_groups=num_groups,
pool_kernel_size=pool_kernel_size,
padding=conv_padding)
encoders.append(encoder)
self.encoders = nn.ModuleList(encoders)
# create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1`
decoders = []
reversed_f_maps = list(reversed(f_maps))
for i in range(len(reversed_f_maps) - 1):
if basic_module == DoubleConv:
in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1]
else:
in_feature_num = reversed_f_maps[i]
out_feature_num = reversed_f_maps[i + 1]
# TODO: if non-standard pooling was used, make sure to use correct striding for transpose conv
# currently strides with a constant stride: (2, 2, 2)
decoder = Decoder(in_feature_num, out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
conv_kernel_size=conv_kernel_size,
num_groups=num_groups,
padding=conv_padding)
decoders.append(decoder)
self.decoders = nn.ModuleList(decoders)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, **kwargs):
super(UNet3D, self).__init__(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid,
basic_module=DoubleConv, f_maps=f_maps, layer_order=layer_order,
num_groups=num_groups, num_levels=num_levels, is_segmentation=is_segmentation,
conv_padding=conv_padding, **kwargs)
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=5, is_segmentation=True, conv_padding=1, **kwargs):
super(ResidualUNet3D, self).__init__(in_channels=in_channels, out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=ExtResNetBlock, f_maps=f_maps, layer_order=layer_order,
num_groups=num_groups, num_levels=num_levels,
is_segmentation=is_segmentation, conv_padding=conv_padding,
**kwargs)
class UNet2D(Abstract3DUNet):
"""
Just a standard 2D Unet. Arises naturally by specifying conv_kernel_size=(1, 3, 3), pool_kernel_size=(1, 2, 2).
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, **kwargs):
if conv_padding == 1:
conv_padding = (0, 1, 1)
super(UNet2D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_kernel_size=(1, 3, 3),
pool_kernel_size=(1, 2, 2),
conv_padding=conv_padding,
**kwargs)
class First3D(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, dropout=False):
super(First3D, self).__init__()
layers = [
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout3d(p=dropout))
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class Encoder3D(nn.Module):
def __init__(
self, in_channels, middle_channels, out_channels,
dropout=False, downsample_kernel=2
):
super(Encoder3D, self).__init__()
layers = [
nn.MaxPool3d(kernel_size=downsample_kernel),
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels)
# allow output neg
# nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout3d(p=dropout))
self.encoder = nn.Sequential(*layers)
def forward(self, x):
return self.encoder(x)
class Discriminator(nn.Module):
def __init__(self, in_channels,conv_depths=(4,8, 16, 32,64,128,1)):
assert len(conv_depths) > 2, 'conv_depths must have at least 3 members'
super(Discriminator, self).__init__()
# defining encoder layers
encoder_layers = []
encoder_layers.append(First3D(in_channels, conv_depths[0], conv_depths[0]))
encoder_layers.extend([Encoder3D(conv_depths[i], conv_depths[i + 1], conv_depths[i + 1])
for i in range(len(conv_depths)-1)])
# encoder, center and decoder layers
self.encoder_layers = nn.Sequential(*encoder_layers)
def forward(self, x, return_all=False):
x_enc = [x]
for enc_layer in self.encoder_layers:
x_enc.append(enc_layer(x_enc[-1]))
return F.sigmoid(x_enc[-1])
class RecGAN(nn.Module):
def __init__(self):
pass
super(RecGAN, self).__init__()
# create AE (3D-Unet)
# input 64*64*64*1 output 64*64*64*1
self.unet = ResidualUNet3D(1,1,final_sigmoid=True, f_maps=64, layer_order='gcr',num_groups=8, num_levels=5, is_segmentation=False, conv_padding=1)
# create discriminator (like the encoder)
self.discriminator = Discriminator(1)
def forward(self,X):
Y_rec = self.unet(X)
dis = self.discriminator(Y_rec)
return F.sigmoid(Y_rec),dis
if __name__ == '__main__':
input_data = torch.rand([1,1,64,64,64])
recgan = RecGAN()
output,dis = recgan(input_data)
print(output.shape)
print(dis.shape) |
py | 1a5514d618dfae6cb7fa7f6996956a89775509c8 | #!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
"""Runtime module. Contains runtime base class and language specific runtime classes."""
import signal
import subprocess
import sys
import time
import os
import serializerfactory
import subprocessfactory
import tracer
from runbook import *
from workerexception import *
json = serializerfactory.get_serializer(sys.version_info)
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
_, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if os.path.isfile(executable):
return executable
if path is None:
path = os.environ.get('PATH', None)
if path is None:
try:
path = os.confstr("CS_PATH")
except (AttributeError, ValueError):
# os.confstr() or CS_PATH is not available
path = os.defpath
# bpo-35755: Don't use os.defpath if the PATH environment variable is
# set to an empty string
# PATH='' doesn't match, whereas PATH=':' looks in the current directory
if not path:
return None
paths = path.split(os.pathsep)
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
class Runtime(object):
"""Runtime base class."""
def __init__(self, job_data, runbook):
"""
:type job_data : jrdsclient.JobData"
:type runbook : Runbook
"""
# should be overwritten by language runtime
self.execution_alias = None
self.base_cmd = None
# used for actual runtime
self.runbook = runbook
self.runbook_subprocess = None
self.job_data = job_data
def initialize(self):
self.runbook.write_to_disk()
def start_runbook_subprocess(self):
"""Creates the runbook subprocess based on the script language and using properties set by the derived class.
Requires self.base_cmd & self.runbook_file_path to be set by derived class.
"""
cmd = self.base_cmd + [self.runbook.runbook_file_path]
job_parameters = self.job_data.parameters
if job_parameters is not None and len(job_parameters) > 0:
for parameter in job_parameters:
tracer.log_debug_trace("Parameter is: \n" + str(parameter))
if self.runbook.definition_kind_str == "PowerShell" and parameter["Name"]:
# Handle named parameters for PowerShell arriving out of order
cmd += ["-%s" % parameter["Name"]]
cmd += [str(json.loads(parameter["Value"]))]
# Do not copy current process env var to the sandbox process
env = os.environ.copy()
env.update({"AUTOMATION_JOB_ID": str(self.job_data.job_id),
"AUTOMATION_ACTIVITY_ID": str(tracer.u_activity_id),
"PYTHONPATH": str(configuration.get_source_directory_path()),
"HOME": str(os.getcwd())}) # windows env have to be str (not unicode)
self.runbook_subprocess = subprocessfactory.create_subprocess(cmd=cmd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_runbook_subprocess(self):
"""Attempts to kill the runbook subprocess.
This method will attempt to kill the runbook subprocess [max_attempt_count] and will return if successful.
Throws:
SandboxRuntimeException : If runbook subprocess is still alive after [max_attempt_count].
"""
attempt_count = 0
max_attempt_count = 3
while attempt_count < max_attempt_count:
if self.runbook_subprocess is not None and self.runbook_subprocess.poll() is None:
os.kill(self.runbook_subprocess.pid, signal.SIGTERM)
runbook_proc_is_alive = self.is_process_alive(self.runbook_subprocess)
if runbook_proc_is_alive is False:
return
attempt_count += 1
time.sleep(attempt_count)
else:
return
raise SandboxRuntimeException()
@staticmethod
def is_process_alive(process):
"""Checks if the given process is still alive.
Returns:
boolean : True if the process [pid] is alive, False otherwise.
"""
if process.poll() is None:
return True
else:
return False
def is_runtime_supported(self):
"""Validates that the OS supports the language runtime by testing the executable file path.
Returns:
True : If executable exist.
False : Otherwise.
"""
if find_executable(self.execution_alias) is None:
return False
else:
return True
class PowerShellRuntime(Runtime):
"""PowerShell runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "pwsh"
if linuxutil.is_posix_host() is False:
self.execution_alias = "powershell"
self.base_cmd = [self.execution_alias, "-File"]
class Python2Runtime(Runtime):
"""Python 2 runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "python2"
if get_default_python_interpreter_major_version() == 2:
self.execution_alias = "python"
self.base_cmd = [self.execution_alias]
class Python3Runtime(Runtime):
"""Python 3 runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "python3"
if get_default_python_interpreter_major_version() == 3:
self.execution_alias = "python3"
self.base_cmd = [self.execution_alias]
class BashRuntime(Runtime):
"""Bash runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "bash"
self.base_cmd = [self.execution_alias]
def get_default_python_interpreter_major_version():
"""Return the default "python" alias interpreter version.
Returns:
int, the interpreter major version
None, if the default interpreter version cannot be detected
"""
cmd = ["python3", "-c", "import sys;print(sys.version[0])"] # need to use print() for python3 compatibility
p = subprocessfactory.create_subprocess(cmd=cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
default_interpreter_version, error = p.communicate()
if p.returncode == 0:
return int(default_interpreter_version.decode().strip())
else:
return None
|
py | 1a55160e8aaaef2336e5befabb657090dfa70261 | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import torch
from base.learners.skill_discovery.base import BaseSkillDiscoveryLearner
class BaseSMMLearner(BaseSkillDiscoveryLearner):
AGENT_TYPE = 'SMM'
def __init__(self, skill_n, **kwargs):
self.skill_n = int(skill_n)
# At least trigger the default usage for im and density modules
if 'im_params' not in kwargs:
kwargs['im_params'] = {}
if 'density_params' not in kwargs:
kwargs['density_params'] = {}
super().__init__(**kwargs)
self.im_type = 'reverse_mi'
self.density_type = 'vae'
def relabel_episode(self):
super().relabel_episode()
# Add density model reward
self._add_density_reward()
def relabel_batch(self, batch):
batch = super().relabel_batch(batch)
# Compute reward from density model
with torch.no_grad():
new_density_rew = self.density.novelty(batch)
# Make sure that weights for density rewards are not None
density_nu = self.density_nu if self.density_nu is not None else 0.
# Detach density rewards from computation graph
new_density_rew = new_density_rew.detach()
batch['reward'] = batch['reward'] + density_nu * new_density_rew
batch['density_model_reward'] = new_density_rew
return batch
def _compute_novelty(self, batched_episode):
return self.density.novelty(batched_episode)
def _add_density_reward(self):
if self.density is not None:
for ep in self._compress_me:
batched_episode = {key: torch.stack([e[key] for e in ep]) for key in ep[0].keys()}
novelty = self._compute_novelty(batched_episode)
if self.density_scale:
self.train()
_ = self._density_bn(novelty.view(-1, 1))
self.eval()
novelty = novelty / torch.sqrt(self._density_bn.running_var[0])
for e, s in zip(ep, novelty):
e['reward'] += (self.density_nu * s.detach())
e['density_model_reward'] = s.detach()
def get_density_loss(self, batch):
return self.density(batch)
|
py | 1a55165b19018222a05acf697ed7e943d993523e | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 11:01:42 2020
@author: amarmore
"""
# Everything related to the segmentation of the autosimilarity.
import numpy as np
import math
from scipy.sparse import diags
import musicae.model.errors as err
import warnings
def get_autosimilarity(an_array, transpose = False, normalize = False):
"""
Encapsulates the autosimilarity generation of a matrix.
Parameters
----------
an_array : numpy array
The array/matrix seen as array which autosimilarity os to compute.
transpose : boolean, optional
Whether the array has to be transpose for computing the autosimilarity.
The default is False.
normalize : boolean, optional
Whether to normalize the autosimilarity.
The default is False.
Returns
-------
numpy array
The autosimilarity of this array.
"""
if type(an_array) is list:
this_array = np.array(an_array)
else:
this_array = an_array
if transpose:
this_array = this_array.T
if normalize:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide") # Avoiding to show the warning, as it's handled, not te confuse the user.
this_array = np.array([list(i/np.linalg.norm(i)) for i in this_array.T]).T
this_array = np.where(np.isnan(this_array), 1e-10, this_array) # Replace null lines, avoiding best-path retrieval to fail
return this_array.T@this_array
def compute_all_kernels(max_size, convolution_type = "full"):
"""
Precomputes all kernels of size 0 ([0]) to max_size, and feed them to the Dynamic Progamming algorithm.
Parameters
----------
max_size : integer
The maximal size (included) for kernels.
convolution_type: string
The type of convolution. (to explicit)
Possibilities are :
- "full" : squared matrix entirely composed of one, except on the diagonal where it's zero.
The associated convolution cost for a segment (b_1, b_2) will be
.. math::
c_{b_1,b_2} = \\frac{1}{b_2 - b_1 + 1}\\sum_{i,j = 0, i \\ne j}^{n - 1} a_{i + b_1, j + b_1}
- "eight_bands" : squared matrix where the only nonzero values are ones on the
8 subdiagonals surrounding the main diagonal.
The associated convolution cost for a segment (b_1, b_2) will be
.. math::
c_{b_1,b_2} = \\frac{1}{b_2 - b_1 + 1}\\sum_{i,j = 0, 1 \\leq |i - j| \\leq 4}^{n - 1} a_{i + b_1, j + b_1}
- "mixed" : sum of both previous kernels, i.e. values are zero on the diagonal,
2 on the 8 subdiagonals surrounding the main diagonal, and 1 elsewhere.
The associated convolution cost for a segment (b_1, b_2) will be
.. math::
c_{b_1,b_2} = \\frac{1}{b_2 - b_1 + 1}(2*\\sum_{i,j = 0, 1 \\leq |i - j| \\leq 4}^{n - 1} a_{i + b_1, j + b_1} \\ + \sum_{i,j = 0, |i - j| > 4}^{n - 1} a_{i + b_1, j + b_1})
Returns
-------
kernels : array of arrays (which are kernels)
All the kernels, of size 0 ([0]) to max_size.
"""
kernels = [[0]]
for p in range(1,max_size + 1):
if p < 4:
kern = np.ones((p,p)) - np.identity(p)
else:
if convolution_type == "full":
# Full kernel (except for the diagonal)
kern = np.ones((p,p)) - np.identity(p)
elif convolution_type == "eight_bands":
# Diagonal where only the eight subdiagonals surrounding the main diagonal is one
k = np.array([np.ones(p-4),np.ones(p-3),np.ones(p-2),np.ones(p-1),np.zeros(p),np.ones(p-1),np.ones(p-2),np.ones(p-3),np.ones(p-4)])
offset = [-4,-3,-2,-1,0,1,2,3,4]
kern = diags(k,offset).toarray()
elif convolution_type == "mixed":
# Sum of both previous kernels
k = np.array([np.ones(p-4),np.ones(p-3),np.ones(p-2),np.ones(p-1),np.zeros(p),np.ones(p-1),np.ones(p-2),np.ones(p-3),np.ones(p-4)])
offset = [-4,-3,-2,-1,0,1,2,3,4]
kern = np.ones((p,p)) - np.identity(p) + diags(k,offset).toarray()
else:
raise err.InvalidArgumentValueException(f"Convolution type not understood: {convolution_type}.")
kernels.append(kern)
return kernels
def convolutionnal_cost(cropped_autosimilarity, kernels):
"""
The convolution measure on this part of the autosimilarity matrix.
Parameters
----------
cropped_autosimilarity : list of list of floats or numpy array (matrix representation)
The part of the autosimilarity which convolution measure is to compute.
kernels : list of arrays
Acceptable kernels.
Returns
-------
float
The convolution measure.
"""
p = len(cropped_autosimilarity)
kern = kernels[p]
#return np.mean(np.multiply(kern,cropped_autosimilarity))
return np.sum(np.multiply(kern,cropped_autosimilarity)) / p**2
def convolution_entire_matrix_computation(autosimilarity_array, kernels, kernel_size = 8):
"""
Computes the convolution measure on the entire autosimilarity matrix, with a defined and fixed kernel size.
Parameters
----------
autosimilarity_array : list of list of floats or numpy array (matrix representation)
The autosimilarity matrix.
kernels : list of arrays
All acceptable kernels.
kernel_size : integer
The size of the kernel for this measure.
Returns
-------
cost : list of float
List of convolution measures, at each bar of the autosimilarity.
"""
cost = np.zeros(len(autosimilarity_array))
for i in range(kernel_size, len(autosimilarity_array)):
cost[i] = convolutionnal_cost(autosimilarity_array[i - kernel_size:i,i - kernel_size:i], kernels)
return cost
def dynamic_convolution_computation(autosimilarity, min_size = 1, max_size = 36, penalty_weight = 1, penalty_func = "modulo8", convolution_type = "mixed"):
"""
Dynamic programming algorithm, computing a maximization of a cost, sum of segments' costs on the autosimilarity.
This cost is a combination of
- the convolutionnal cost on the segment, with a dynamic size,
- a penalty cost, function of the size of the segment, to enforce specific sizes (with prior knowledge),
The penalty cost is computed in the function "penalty_cost_from_arg()".
See this function for further details.
It returns the optimal segmentation according to this cost.
This algortihm is also desribed in [1].
Parameters
----------
autosimilarity : list of list of float (list of columns)
The autosimilarity to segment.
min_size : integer, optional
The minimal length of segments.
The default is 1.
max_size : integer, optional
The maximal length of segments.
The default is 36.
penalty_weight : float, optional
The ponderation parameter for the penalty function
penalty_func : string
The type of penalty function to use.
See "penalty_cost_from_arg()" for further details.
convolution_type : string
The type of convolution we want to use in this computation.
See "compute_all_kernels()" for a detailed list of possibilities.
Raises
------
ToDebugException
If the program fails, generally meaning that the autosimilarity is incorrect.
Returns
-------
list of tuples
The segments, as a list of tuples (start, end).
integer
Global cost (the minimal among all).
References
----------
[1] Marmoret, A., Cohen, J., Bertin, N., & Bimbot, F. (2020, October).
Uncovering Audio Patterns in Music with Nonnegative Tucker Decomposition for Structural Segmentation.
In ISMIR 2020-21st International Society for Music Information Retrieval.
"""
costs = [-math.inf for i in range(len(autosimilarity))]
segments_best_starts = [None for i in range(len(autosimilarity))]
segments_best_starts[0] = 0
costs[0] = 0
kernels = compute_all_kernels(max_size, convolution_type = convolution_type)
conv_eight = convolution_entire_matrix_computation(autosimilarity, kernels)
for current_idx in range(1, len(autosimilarity)): # Parse all indexes of the autosimilarity
for possible_start_idx in possible_segment_start(current_idx, min_size = min_size, max_size = max_size):
if possible_start_idx < 0:
raise err.ToDebugException("Invalid value of start index.")
# Convolutionnal cost between the possible start of the segment and the current index (entire segment)
conv_cost = convolutionnal_cost(autosimilarity[possible_start_idx:current_idx,possible_start_idx:current_idx], kernels)
segment_length = current_idx - possible_start_idx
penalty_cost = penalty_cost_from_arg(penalty_func, segment_length)
this_segment_cost = conv_cost * segment_length - penalty_cost * penalty_weight * np.max(conv_eight)
# Note: conv_eight is not normalized by its size (not a problem in itself as size is contant, but generally not specified in formulas).
# Avoiding errors, as segment_cost are initially set to -inf.
if possible_start_idx == 0:
if this_segment_cost > costs[current_idx]:
costs[current_idx] = this_segment_cost
segments_best_starts[current_idx] = 0
else:
if costs[possible_start_idx] + this_segment_cost > costs[current_idx]:
costs[current_idx] = costs[possible_start_idx] + this_segment_cost
segments_best_starts[current_idx] = possible_start_idx
segments = [(segments_best_starts[len(autosimilarity) - 1], len(autosimilarity) - 1)]
precedent_frontier = segments_best_starts[len(autosimilarity) - 1] # Because a segment's start is the previous one's end.
while precedent_frontier > 0:
segments.append((segments_best_starts[precedent_frontier], precedent_frontier))
precedent_frontier = segments_best_starts[precedent_frontier]
if precedent_frontier == None:
raise err.ToDebugException("Well... The dynamic programming algorithm took an impossible path, so it failed. Understand why.") from None
return segments[::-1], costs[-1]
def penalty_cost_from_arg(penalty_func, segment_length):
"""
Returns a penalty cost, function of the size of the segment.
The penalty function has to be specified, and is bound to evolve in the near future,
so this docstring won't explain it.
Instead, you'll have to read the code, sorry! It is pretty straightforward though.
The ``modulo'' functions are based on empirical prior knowledge,
following the fact that pop music is generally composed of segments of 4 or 8 bars.
Parameters
----------
penalty_func : string
Identifier of the penalty function.
segment_length : integer
Size of the segment.
Returns
-------
float
The penalty cost.
"""
if penalty_func == "modulo4":
if segment_length %4 == 0:
return 0
elif segment_length %2 == 0:
return 1/2
else:
return 1
if penalty_func == "modulo8":
if segment_length == 8:
return 0
elif segment_length %4 == 0:
return 1/4
elif segment_length %2 == 0:
return 1/2
else:
return 1
if penalty_func == "moduloSmall8and4":
if segment_length > 12:
return 100
elif segment_length == 8:
return 0
elif segment_length == 4:
return 1/4
elif segment_length %2 == 0:
return 1/2
else:
return 1
if penalty_func == "sargentdemi":
return abs(segment_length - 8) ** (1/2)
if penalty_func == "sargentun":
return abs(segment_length - 8)
if penalty_func == "sargentdeux":
return abs(segment_length - 8) ** 2
else:
raise err.InvalidArgumentValueException(f"Penalty function not understood {penalty_func}.")
def possible_segment_start(idx, min_size = 1, max_size = None):
"""
Generates the list of all possible starts of segments given the index of its end.
Parameters
----------
idx: integer
The end of a segment.
min_size: integer
Minimal length of a segment.
max_size: integer
Maximal length of a segment.
Returns
-------
list of integers
All potentials starts of structural segments.
"""
if min_size < 1: # No segment should be allowed to be 0 size
min_size = 1
if max_size == None:
return range(0, idx - min_size + 1)
else:
if idx >= max_size:
return range(idx - max_size, idx - min_size + 1)
elif idx >= min_size:
return range(0, idx - min_size + 1)
else:
return []
# %% Novelty cost, deprecated, but could be used in comparison tests.
def novelty_cost(cropped_autosimilarity):
"""
Novelty measure on this part of the autosimilarity matrix.
The size of the kernel will be the size of the parameter matrix.
Parameters
----------
cropped_autosimilarity : list of list of floats or numpy array (matrix representation)
The part of the autosimilarity which novelty measure is to compute.
Raises
------
NotImplementedError
If the size of the autosimilarity is odd (novlety kernel can't fit this matrix).
Returns
-------
float
The novelty measure.
"""
# Kernel is of the size of cropped_autosimilarity
if len(cropped_autosimilarity) == 0:
return 0
if len(cropped_autosimilarity) % 2 == 1:
raise NotImplementedError("Error")
#return (novelty_cost(cropped_autosimilarity[:-1, :-1]) + novelty_cost(cropped_autosimilarity[1:, 1:])) / 2
kernel_size = int(len(cropped_autosimilarity) / 2)
kernel = np.kron(np.array([[1,-1], [-1, 1]]), np.ones((kernel_size, kernel_size)))
return np.mean(kernel*cropped_autosimilarity)
def novelty_computation(autosimilarity_array, kernel_size):
"""
Computes the novelty measure of all of the autosimilarity matrix, with a defined and fixed kernel size.
Parameters
----------
autosimilarity_array : list of list of floats or numpy array (matrix representation)
The autosimilarity matrix.
kernel_size : integer
The size of the kernel.
Raises
------
NotImplementedError
If the kernel size is odd, can't compute the novelty measure.
Returns
-------
cost : list of float
List of novelty measures, at each bar of the autosimilarity.
"""
if kernel_size % 2 == 1:
raise NotImplementedError("The kernel should be even.") from None
cost = np.zeros(len(autosimilarity_array))
half_kernel = int(kernel_size / 2)
for i in range(half_kernel, len(autosimilarity_array) - half_kernel):
cost[i] = novelty_cost(autosimilarity_array[i - half_kernel:i + half_kernel,i - half_kernel:i + half_kernel])
return cost
# %% Related to the novelty computation, so deprecated.
def peak_picking(tab, window_size = 1):
"""
Returns the indexes of peaks of values in the given list of values.
A value is considered "peak" if it's a local maximum,
and if all values in the window (defined by 'window_size) before and after
are strictly monotonous.
Used for peak picking in the novelty measure.
Parameters
----------
tab : list of float
The list of values to study.
window_size : boolean, optional
Size of the window around a possible peak to be considered "peak",
ie number of consecutive values where the values should increase (before) and (decrease) after.
The default is 1.
Returns
-------
to_return : list of integers
The indexes where values are peaking.
"""
to_return = []
for current_idx in range(window_size, len(tab) - window_size):
if is_increasing(tab[current_idx - window_size:current_idx + 1]) and is_increasing(tab[current_idx:current_idx + window_size + 1][::-1]):
to_return.append(current_idx)
return to_return
def valley_picking(tab, window_size = 1):
"""
Returns the indexes of valleys of values in the desired list of values.
A value is considered "valley" if it's a local minimum,
and if all values in the window (defined by 'window_size) before and after
are strictly monotonous.
Used for peak picking in the novelty measure.
Parameters
----------
tab : list of float
The list of values to study.
window_size : boolean, optional
Size of the window around a possible valley to be considered "valley",
ie number of consecutive values where the values should decrease (before) and increase (after).
The default is 1.
Returns
-------
to_return : list of integers
The indexes where values are valleys.
"""
to_return = []
for current_idx in range(window_size, len(tab) - window_size):
if is_increasing(tab[current_idx - window_size:current_idx + 1][::-1]) and is_increasing(tab[current_idx:current_idx + window_size + 1]):
to_return.append(current_idx)
return to_return
def is_increasing(tab):
"""
Tests if the tab values are increasing.
Used for peak picking in the novelty measure.
Parameters
----------
tab : list of float
The values.
Returns
-------
boolean
Whether the values are increasing or not.
"""
if len(tab) <= 1 or len(np.unique(tab)) == 1:
return False
for idx in range(len(tab) - 1):
if tab[idx] > tab[idx+1]:
return False
return True
def decreasing_peaks(data):
"""
Returns the peaks indexes of a list of values in their decreasing order of values.
Used for peak picking in the novelty measure.
Parameters
----------
data : list of float
The values.
Returns
-------
list of integers
The indexes of the peaks, sorted in their decreasing order of values.
"""
peaks_and_value = []
for idx in peak_picking(data, window_size = 1):
peaks_and_value.append((idx, data[idx]))
return sorted(peaks_and_value, key=lambda x:x[1], reverse = True)
def select_highest_peaks_thresholded_indexes(data, percentage = 0.33):
"""
Returns the peaks higher than a percentage of the maximal peak from a list of values.
Used for peak picking in the novelty measure.
Parameters
----------
data : list of floats
The values.
percentage : float, optional
The percentage of the maximal value for a peak to be valid.
The default is 0.33.
Returns
-------
list of integers
Indexes of the valid peaks.
"""
peaks = np.array(decreasing_peaks(data))
max_peak = peaks[0,1]
for idx, peak in enumerate(peaks):
if peak[1] < percentage * max_peak:
return [int(i) for i in sorted(peaks[:idx, 0])]
return [int(i) for i in sorted(peaks[:,0])]
def mean(val_a, val_b):
"""
A function returning the mean of both values.
This function is redeveloped so as to be called as choice_func in the function "values_as_slop()" (see below) in external projects.
Parameters
----------
val_a : float
First value.
val_b : float
Second value.
Returns
-------
float: mean of both values.
"""
return (val_a + val_b) / 2
def values_as_slop(value, choice_func = max):
"""
Compute peaks of a value (typically novelty measure)
as the difference between absolute peaks and absolute valleys.
Function choice_func determines the way of computing this gap.
Typically, max will compute peaks as the maximum gap between a peaks and its two closest valleys,
whereas min will select the minimal gap.
This returns an array containing zeroes where there is no peak in absoluite value,
and this new value as a gap computation where there was peaks before.
Parameters
----------
value : array of float
The absolute value of the measure.
choice_func : function name, optional
Type of the function selecting the difference between peaks and valleys.
Classical values are "max" for selecting the maximum gap between the peak and both its closest valleys,
"min" for the minimum of both gaps, and "mean" (called autosimilarity_segmentation.mean) for the mean of both gaps.
The default is max.
Returns
-------
peak_valley_slop : array of floats
The new values of peaks as gaps, and 0 everywhere else.
"""
peaks = peak_picking(value, window_size = 1)
valleys = valley_picking(value, window_size = 1)
peak_valley_slop = np.zeros(len(value))
for peak in peaks:
i = 0
while i < len(valleys) and valleys[i] < peak:
i+=1
if i == 0:
left_valley = 0
right_valley = valleys[i]
elif i == len(valleys):
left_valley = valleys[i - 1]
right_valley = 0
else:
left_valley = valleys[i - 1]
right_valley = valleys[i]
chosen_valley_value = choice_func(value[left_valley], value[right_valley])
peak_valley_slop[peak] = value[peak] - chosen_valley_value
return peak_valley_slop
|
py | 1a5517247f6d533d8fa143fdadd02ca3f6d0b299 | from __future__ import print_function
from colorama import *
import webbrowser
import sys
import time
# Initialize colored output and set colors
init()
# Get settings from file
file = open('settings.txt', 'r')
settings = file.readlines()
file.close()
# Set timer in seconds
pomodoro = int(settings[1])*60
# Set URL to open
url = settings[4]
# Header
print(" ----------------- ")
print(Fore.GREEN + " MyPymodoro v1.0 " + Style.RESET_ALL)
print(" ----------------- ")
print(Fore.YELLOW + " http://dvt32.blogspot.com/\n" + Style.RESET_ALL)
# Time left information
print (" Timer started! Break coming up in " + Back.RED + str(pomodoro / 60) + " minutes" + Style.RESET_ALL + "!\n")
# Print time elapsed
for second in range(pomodoro):
print (" Time left until break: " +
Fore.YELLOW +
str(pomodoro / 60) + " minute(s), " +
str(pomodoro % 60) + " seconds" + " " +
Style.RESET_ALL,
end="\r")
sys.stdout.flush()
pomodoro -= 1
time.sleep(1)
# Load video after time is up
webbrowser.open(url)
|
py | 1a551826d2783bbf6c89afb94f1645a841baa7f9 | """
source: https://stackoverflow.com/questions/37117878/generating-a-filled-polygon-inside-a-numpy-array
"""
import numpy as np
import imageio
def polygon(a, vertices):
fill = np.ones(a.shape) * True
idx = np.indices(a.shape)
# loop over pairs of corner points
for k in range(vertices.shape[0]):
p1, p2 = vertices[k-1], vertices[k]
dist = p2 - p1
max_idx = (idx[0] - p1[0]) / dist[0] * dist[1] + p1[1]
sign = np.sign(dist[0])
check = idx[1] * sign <= max_idx * sign
fill = np.all([fill, check], axis=0)
a[fill] = 127
# clockwise!
vertices = np.array([[50,120], [80,380], [230,240]])
a = np.zeros((400, 400), dtype=np.uint8)
polygon(a, vertices)
imageio.imsave('triangle.png', a)
|
py | 1a55183813ef955fd81de3330237e857a7d932de | #!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""ADC library
"""
import laygo
import numpy as np
from math import log
import yaml
import os
import laygo.GridLayoutGeneratorHelper as laygenhelper #utility functions
#import logging;logging.basicConfig(level=logging.DEBUG)
def create_power_pin_from_inst(laygen, layer, gridname, inst_left, inst_right):
"""create power pin"""
rvdd0_pin_xy = laygen.get_inst_pin_xy(inst_left.name, 'VDD', gridname, sort=True)
rvdd1_pin_xy = laygen.get_inst_pin_xy(inst_right.name, 'VDD', gridname, sort=True)
rvss0_pin_xy = laygen.get_inst_pin_xy(inst_left.name, 'VSS', gridname, sort=True)
rvss1_pin_xy = laygen.get_inst_pin_xy(inst_right.name, 'VSS', gridname, sort=True)
laygen.pin(name='VDD', layer=layer, xy=np.vstack((rvdd0_pin_xy[0], rvdd1_pin_xy[1])), gridname=gridname)
laygen.pin(name='VSS', layer=layer, xy=np.vstack((rvss0_pin_xy[0], rvss1_pin_xy[1])), gridname=gridname)
def generate_r2r_dac_bcap_array(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m4m5, routing_grid_m5m6,
rg_m3m4_basic_thick, rg_m5m6_thick, m, num_bits, num_hori, num_vert,
origin=np.array([0, 0])):
"""generate r2rdac """
r2r_name = 'r2r_dac_bcap'
sar_name = 'sar_wsamp'
tisar_name='tisaradc_body'
ret_name = 'adc_retimer'
tgate_name = 'tgate_' + str(m) + 'x'
pg = placement_grid
rg_m4m5 = routing_grid_m4m5
rg_m5m6 = routing_grid_m5m6
# rg_m4m5 = routing_grid_m4m5
# rg_m4m5_basic_thick = routing_grid_m4m5_basic_thick
# rg_m4m5_thick = routing_grid_m4m5_thick
# rg_m5m6 = routing_grid_m5m6
# rg_m5m6_thick = routing_grid_m5m6_thick
# rg_m5m6_thick_basic = routing_grid_m5m6_thick_basic
# rg_m6m7_thick = routing_grid_m6m7_thick
# Calculate reference coordinate
x1_phy = laygen.get_template_xy(name=r2r_name, gridname=None, libname=workinglib)[0] * num_hori
pin_origin_x = laygen.grids.get_absgrid_x(rg_m5m6_thick_basic, x1_phy)
if use_sf == True and vref_sf == True:
y1_phy = origin[1] + laygen.get_template_xy(name=sar_name, gridname=None, libname=workinglib)[1] \
+ laygen.get_template_xy(name=ret_name, gridname=None, libname=workinglib)[1]
pin_origin_y = laygen.grids.get_absgrid_y(rg_m5m6, y1_phy)
elif use_offset == True:
pin_origin_y = origin[1] + laygen.get_template_pin_xy(tisar_name, 'OSM'+str(num_slices-1), gridname=rg_m5m6, libname=workinglib)[0][1] - num_slices*2 - 6
else:
y1_phy = origin[1] + laygen.get_template_xy(name=sar_name, gridname=None, libname=workinglib)[1] \
+ laygen.get_template_xy(name=ret_name, gridname=None, libname=workinglib)[1]
pin_origin_y = laygen.grids.get_absgrid_y(rg_m5m6, y1_phy)
if use_sf == True:
pin_origin_y1_thick = origin[1] + \
laygen.get_template_pin_xy(sar_name, 'SF_BIAS', rg_m5m6_thick, libname=workinglib)[0][1] \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
else:
pin_origin_y1_thick = origin[1] + 0 \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
if vref_sf == True:
pin_origin_y0_thick = origin[1] + \
laygen.get_template_pin_xy(sar_name, 'VREF_SF_BIAS', rg_m5m6_thick, libname=workinglib)[0][1] \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
else:
pin_origin_y0_thick = origin[1] + 2 \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
# pin_origin_y0_thick = laygen.grids.get_absgrid_y(rg_m5m6_thick, y0_phy)
# placement
irdac = []
for i in range(num_vert):
if i == 0:
irdac.append(laygen.relplace(name="I" + objectname_pfix + 'IBCAP' + str(i), templatename=r2r_name,
gridname=pg, refinstname=None, xy=origin, shape=[num_hori, 1],
template_libname=workinglib))
else:
irdac.append(laygen.relplace(name="I" + objectname_pfix + 'IBCAP' + str(i), templatename=r2r_name,
gridname=pg, refinstname=irdac[-1].name, shape=[num_hori, 1],
template_libname=workinglib, direction='top'))
# output routing
for i in range(num_hori):
for j in range(num_vert):
if i == num_hori - 1 and j == num_vert - 1: # VREF_SF_BIAS routing with thick wire
rv1, rh1 = laygen.route_vh(laygen.layers['metal'][5], laygen.layers['metal'][6],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'I', rg_m5m6_thick,
index=np.array([i, 0]))[0] + np.array([j, 0]),
xy1=np.array([pin_origin_x, pin_origin_y0_thick]),
gridname0=rg_m5m6_thick)
laygen.via(None, xy=laygen.get_inst_pin_xy(irdac[j].name, 'I', rg_m4m5, index=np.array([i, 0]))[
0] + np.array([j, 0]), gridname=rg_m4m5)
laygen.boundary_pin_from_rect(rh1, rg_m5m6_thick, 'out<' + str(num_hori * j + i) + '>',
laygen.layers['pin'][6], size=4, direction='right')
elif num_hori * j + i == num_slices * 2: # SF_BIAS routing with thick wire
rv1, rh1 = laygen.route_vh(laygen.layers['metal'][5], laygen.layers['metal'][6],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'I', rg_m5m6_thick,
index=np.array([i, 0]))[0] + np.array([j, 0]),
xy1=np.array([pin_origin_x, pin_origin_y1_thick]),
gridname0=rg_m5m6_thick)
laygen.via(None, xy=laygen.get_inst_pin_xy(irdac[j].name, 'I', rg_m4m5, index=np.array([i, 0]))[
0] + np.array([j, 0]), gridname=rg_m4m5)
laygen.boundary_pin_from_rect(rh1, rg_m5m6_thick, 'out<' + str(num_hori * j + i) + '>',
laygen.layers['pin'][6], size=4, direction='right')
else:
rv0, rh0 = laygen.route_vh(laygen.layers['metal'][5], laygen.layers['metal'][6],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'I', rg_m5m6_thick_basic,
index=np.array([i, 0]))[0] + np.array([j, 0]),
xy1=np.array([pin_origin_x, pin_origin_y + 4 + num_hori * j + i]),
gridname0=rg_m5m6_thick_basic)
laygen.via(None, xy=laygen.get_inst_pin_xy(irdac[j].name, 'I', rg_m4m5_thick, index=np.array([i, 0]))[
0] + np.array([j, 0]), gridname=rg_m4m5_thick)
laygen.boundary_pin_from_rect(rh0, rg_m5m6_thick_basic, 'out<' + str(num_hori * j + i) + '>',
laygen.layers['pin'][6], size=4, direction='right')
pin_origin_y2_thick = laygen.grids.get_absgrid_y(rg_m5m6_thick, laygen.grids.get_phygrid_y(rg_m5m6,
pin_origin_y + 4 + num_hori * num_vert))
# m5 supply
pdict_m5m6_thick = laygen.get_inst_pin_xy(None, None, rg_m5m6_thick)
rvdd_m5 = []
rvss_m5 = []
for i in range(num_hori):
for p in pdict_m5m6_thick[irdac[0].name]:
if p.startswith('VDD'):
r0 = laygen.route(None, laygen.layers['metal'][5],
xy0=laygen.get_inst_pin_xy(irdac[0].name, p, rg_m5m6_thick, index=np.array([i, 0]))[
0],
xy1=laygen.get_inst_pin_xy(irdac[num_vert - 1].name, p, rg_m5m6_thick,
index=np.array([i, 0]))[1],
gridname0=rg_m5m6_thick)
rvdd_m5.append(r0)
for p in pdict_m5m6_thick[irdac[0].name]:
if p.startswith('VSS'):
r0 = laygen.route(None, laygen.layers['metal'][5],
xy0=laygen.get_inst_pin_xy(irdac[0].name, p, rg_m5m6_thick, index=np.array([i, 0]))[
0],
xy1=laygen.get_inst_pin_xy(irdac[num_vert - 1].name, p, rg_m5m6_thick,
index=np.array([i, 0]))[1],
gridname0=rg_m5m6_thick)
rvss_m5.append(r0)
# m6
# print(pin_origin_y0_thick, pin_origin_y1_thick, pin_origin_y2_thick)
# input_rails_rect = [rvdd_m5, rvss_m5]
# rvdd_m6_0, rvss_m6_0 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_0_',
# layer=laygen.layers['pin'][6],
# gridname=rg_m5m6_thick,
# netnames=['VDD', 'VSS'],
# direction='x',
# input_rails_rect=input_rails_rect,
# generate_pin=True,
# overwrite_start_coord=None,
# overwrite_end_coord=None,
# offset_start_index=0,
# overwrite_end_index=pin_origin_y0_thick - 2)
# rvdd_m6_1, rvss_m6_1 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_1_',
# layer=laygen.layers['pin'][6],
# gridname=rg_m5m6_thick,
# netnames=['VDD', 'VSS'],
# direction='x',
# input_rails_rect=input_rails_rect,
# generate_pin=True,
# overwrite_start_coord=None,
# overwrite_end_coord=None,
# overwrite_start_index=pin_origin_y0_thick + 2,
# overwrite_end_index=pin_origin_y1_thick - 2)
# rvdd_m6_2, rvss_m6_2 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_2_',
# layer=laygen.layers['pin'][6],
# gridname=rg_m5m6_thick,
# netnames=['VDD', 'VSS'],
# direction='x',
# input_rails_rect=input_rails_rect,
# generate_pin=True,
# overwrite_start_coord=None,
# overwrite_end_coord=None,
# overwrite_start_index=pin_origin_y2_thick + 2,
# offset_end_index=0)
#m6 (extract VDD/VSS grid from tisar and make power pins)
tisar_name = 'tisaradc_body_core'
tisar_libname = 'adc_sar_generated'
rg_m5m6_thick_temp_tisar='route_M5_M6_thick_temp_tisar_VDD'
# bnd = laygen.get_template(tisar_name, libname=tisar_libname).xy
laygenhelper.generate_grids_from_template(laygen, gridname_input=rg_m5m6_thick, gridname_output=rg_m5m6_thick_temp_tisar,
template_name=tisar_name, template_libname=tisar_libname,
template_pin_prefix=['VDD'], bnd=None, xy_grid_type='ygrid')
input_rails_rect = [rvdd_m5]
rvdd_m6 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_',
layer=laygen.layers['pin'][6], gridname=rg_m5m6_thick_temp_tisar, netnames=['VDD'], direction='x',
input_rails_rect=input_rails_rect, generate_pin=True, overwrite_start_coord=None, overwrite_end_coord=None,
offset_start_index=0, offset_end_index=0)
rg_m5m6_thick_temp_tisar='route_M5_M6_thick_temp_tisar_VSS'
laygenhelper.generate_grids_from_template(laygen, gridname_input=rg_m5m6_thick, gridname_output=rg_m5m6_thick_temp_tisar,
template_name=tisar_name, template_libname=tisar_libname,
template_pin_prefix=['VSS'], xy_grid_type='ygrid')
input_rails_rect = [rvss_m5]
rvss_m6 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_',
layer=laygen.layers['pin'][6], gridname=rg_m5m6_thick_temp_tisar, netnames=['VSS'], direction='x',
input_rails_rect=input_rails_rect, generate_pin=True, overwrite_start_coord=None, overwrite_end_coord=None,
offset_start_index=0, offset_end_index=0)
def generate_r2r_dac_array(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m4m5, routing_grid_m5m6,
rg_m3m4_basic_thick, rg_m5m6_thick, m, num_bits, num_hori, num_vert, origin=np.array([0, 0])):
"""generate r2rdac """
r2r_name='r2r_dac'
bcap_name='r2r_dac_bcap_array'
sar_name='sar_wsamp'
tisar_name='tisaradc_body'
ret_name='adc_retimer'
tgate_name = 'tgate_'+str(m)+'x'
pg = placement_grid
rg_m4m5 = routing_grid_m4m5
rg_m5m6 = routing_grid_m5m6
# rg_m4m5 = routing_grid_m4m5
# rg_m4m5_basic_thick = routing_grid_m4m5_basic_thick
# rg_m4m5_thick = routing_grid_m4m5_thick
# rg_m5m6 = routing_grid_m5m6
# rg_m5m6_thick = routing_grid_m5m6_thick
# rg_m5m6_thick_basic = routing_grid_m5m6_thick_basic
# rg_m6m7_thick = routing_grid_m6m7_thick
#boundaries
x0=laygen.templates.get_template('capdac', workinglib).xy[1][0] - \
laygen.templates.get_template('boundary_bottomleft').xy[1][0]*2
m_bnd_float = x0 / laygen.templates.get_template('boundary_bottom').xy[1][0]
m_bnd = int(m_bnd_float)
if not m_bnd_float == m_bnd:
m_bnd += 1
#Calculate reference coordinate
bcap_origin = np.array([laygen.get_template_xy(name=r2r_name, gridname=pg, libname=workinglib)[0]*num_hori, 0])
x1_phy = laygen.get_template_xy(name=r2r_name, gridname=None, libname=workinglib)[0]*num_hori \
+ laygen.get_template_xy(name=bcap_name, gridname=None, libname=workinglib)[0]
pin_origin_x = laygen.grids.get_absgrid_x(rg_m5m6, x1_phy)
pin_origin_x_thick = laygen.grids.get_absgrid_x(rg_m5m6_thick, x1_phy)
if use_sf == True and vref_sf == True:
y1_phy = origin[1] + laygen.get_template_xy(name=sar_name, gridname=None, libname=workinglib)[1] \
+ laygen.get_template_xy(name=ret_name, gridname=None, libname=workinglib)[1]
pin_origin_y = laygen.grids.get_absgrid_y(rg_m5m6, y1_phy)
elif use_offset == True:
pin_origin_y = origin[1] + laygen.get_template_pin_xy(tisar_name, 'OSM'+str(num_slices-1), gridname=rg_m5m6, libname=workinglib)[0][1] - num_slices*2 - 6
else:
y1_phy = origin[1] + laygen.get_template_xy(name=sar_name, gridname=None, libname=workinglib)[1] \
+ laygen.get_template_xy(name=ret_name, gridname=None, libname=workinglib)[1]
pin_origin_y = laygen.grids.get_absgrid_y(rg_m5m6, y1_phy)
# pin_origin_y1_thick = origin[1] + laygen.get_template_pin_xy(sar_name, 'SF_BIAS', rg_m5m6_thick, libname=workinglib)[0][1] \
# + laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
# pin_origin_y0_thick = origin[1] + laygen.get_template_pin_xy(sar_name, 'VREF_SF_BIAS', rg_m5m6_thick, libname=workinglib)[0][1] \
# + laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
if use_sf == True:
pin_origin_y1_thick = origin[1] + \
laygen.get_template_pin_xy(sar_name, 'SF_BIAS', rg_m5m6_thick, libname=workinglib)[0][1] \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
else:
pin_origin_y1_thick = origin[1] + 0 \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
if vref_sf == True:
pin_origin_y0_thick = origin[1] + \
laygen.get_template_pin_xy(sar_name, 'VREF_SF_BIAS', rg_m5m6_thick, libname=workinglib)[0][1] \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
else:
pin_origin_y0_thick = origin[1] + 2 \
+ laygen.get_template_xy(name=ret_name, gridname=rg_m5m6_thick, libname=workinglib)[1]
# pin_origin_y0_thick = laygen.grids.get_absgrid_y(rg_m5m6_thick, y0_phy)
# placement
irdac = []
for i in range(num_vert):
if i == 0:
irdac.append(laygen.relplace(name="I" + objectname_pfix + 'IRDAC'+str(i), templatename=r2r_name,
gridname=pg, refinstname=None, xy=origin, shape=[num_hori, 1], template_libname=workinglib))
else:
irdac.append(laygen.relplace(name="I" + objectname_pfix + 'IRDAC'+str(i), templatename=r2r_name,
gridname=pg, refinstname=irdac[-1].name, shape=[num_hori, 1], template_libname=workinglib, direction='top'))
ibcap = laygen.relplace(name="I" + objectname_pfix + 'IBCAP', templatename=bcap_name,
gridname=pg, refinstname=None, xy=bcap_origin, template_libname=workinglib)
# output routing
for i in range(num_hori):
for j in range(num_vert):
if i == num_hori-1 and j == num_vert-1: # VREF_SF_BIAS routing with thick wire
rv1, rh1 = laygen.route_vh(laygen.layers['metal'][5], laygen.layers['metal'][6],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'out', rg_m5m6_basic_thick,
index=np.array([i, 0]))[0] - np.array([j, -1]),
xy1=np.array([pin_origin_x, pin_origin_y0_thick]),
gridname0=rg_m5m6_basic_thick)
laygen.via(None, xy=laygen.get_inst_pin_xy(irdac[j].name, 'out', rg_m4m5, index=np.array([i, 0]))[
0] - np.array([j, 0]), gridname=rg_m4m5)
laygen.boundary_pin_from_rect(rh1, rg_m5m6_thick, 'out<' + str(num_hori * j + i) + '>',
laygen.layers['pin'][6], size=4, direction='right')
elif num_hori * j + i == num_slices*2: # SF_BIAS routing with thick wire
rv1, rh1 = laygen.route_vh(laygen.layers['metal'][5], laygen.layers['metal'][6],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'out', rg_m5m6_basic_thick,
index=np.array([i, 0]))[0] - np.array([j, -1]),
xy1=np.array([pin_origin_x, pin_origin_y1_thick]),
gridname0=rg_m5m6_basic_thick)
laygen.via(None, xy=laygen.get_inst_pin_xy(irdac[j].name, 'out', rg_m4m5, index=np.array([i, 0]))[
0] - np.array([j, 0]), gridname=rg_m4m5)
laygen.boundary_pin_from_rect(rh1, rg_m5m6_thick, 'out<' + str(num_hori * j + i) + '>',
laygen.layers['pin'][6], size=4, direction='right')
else:
rv0, rh0 = laygen.route_vh(laygen.layers['metal'][5], laygen.layers['metal'][6],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'out', rg_m5m6, index=np.array([i,0]))[0]-np.array([j,0]),
xy1=np.array([pin_origin_x, pin_origin_y+4+num_hori*j+i]), gridname0=rg_m5m6)
laygen.via(None, xy=laygen.get_inst_pin_xy(irdac[j].name, 'out', rg_m4m5, index=np.array([i,0]))[0]-np.array([j,0]), gridname=rg_m4m5)
laygen.boundary_pin_from_rect(rh0, rg_m5m6, 'out<'+str(num_hori*j+i)+'>', laygen.layers['pin'][6], size=4, direction='right')
pin_origin_y2_thick=laygen.grids.get_absgrid_y(rg_m5m6_thick, laygen.grids.get_phygrid_y(rg_m5m6, pin_origin_y+4+num_hori*num_vert))
# pin_origin_y2_thick=laygen.get_rect_xy(rh0.name, rg_m5m6_thick)[1][1]
# input routing
# tgate_x = laygen.get_template_xy(tgate_name, gridname=rg_m4m5, libname=logictemplib)[0]
for i in range(num_hori):
for j in range(num_vert):
x_ref = laygen.get_inst_pin_xy(irdac[j].name, 'SEL<0>', rg_m4m5, index=np.array([i, 0]))[1][0]
for k in range(num_bits):
rh0, rv0 = laygen.route_hv(laygen.layers['metal'][4], laygen.layers['metal'][5],
xy0=laygen.get_inst_pin_xy(irdac[j].name, 'SEL<'+str(k)+'>', rg_m4m5, index=np.array([i, 0]))[0],
xy1=np.array([x_ref + 12 + num_bits * j + k, 0]), gridname0=rg_m4m5)
laygen.boundary_pin_from_rect(rv0, rg_m4m5, 'SEL<'+str((num_hori*j+i)*num_bits+k)+'>', laygen.layers['pin'][5], size=4, direction='bottom')
# m5 supply
pdict_m5m6_thick = laygen.get_inst_pin_xy(None, None, rg_m5m6_thick)
rvdd_m5=[]
rvss_m5=[]
for i in range(num_hori):
for p in pdict_m5m6_thick[irdac[0].name]:
if p.startswith('VDD'):
r0=laygen.route(None, laygen.layers['metal'][5],
xy0=laygen.get_inst_pin_xy(irdac[0].name, p, rg_m5m6_thick, index=np.array([i,0]))[0],
xy1=laygen.get_inst_pin_xy(irdac[num_vert-1].name, p, rg_m5m6_thick, index=np.array([i,0]))[1],
gridname0=rg_m5m6_thick)
rvdd_m5.append(r0)
for p in pdict_m5m6_thick[irdac[0].name]:
if p.startswith('VSS'):
r0=laygen.route(None, laygen.layers['metal'][5],
xy0=laygen.get_inst_pin_xy(irdac[0].name, p, rg_m5m6_thick, index=np.array([i,0]))[0],
xy1=laygen.get_inst_pin_xy(irdac[num_vert-1].name, p, rg_m5m6_thick, index=np.array([i,0]))[1],
gridname0=rg_m5m6_thick)
rvss_m5.append(r0)
# m6
# print(pin_origin_y0_thick, pin_origin_y1_thick, pin_origin_y2_thick)
# input_rails_rect = [rvdd_m5, rvss_m5]
# rvdd_m6_0, rvss_m6_0 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_0_',
# layer=laygen.layers['pin'][6],
# gridname=rg_m5m6_thick,
# netnames=['VDD', 'VSS'],
# direction='x',
# input_rails_rect=input_rails_rect,
# generate_pin=True,
# overwrite_start_coord=None,
# overwrite_end_coord=pin_origin_x_thick-2,
# offset_start_index=0,
# overwrite_end_index=pin_origin_y0_thick-2)
# rvdd_m6_1, rvss_m6_1 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_1_',
# layer=laygen.layers['pin'][6],
# gridname=rg_m5m6_thick,
# netnames=['VDD', 'VSS'],
# direction='x',
# input_rails_rect=input_rails_rect,
# generate_pin=True,
# overwrite_start_coord=None,
# overwrite_end_coord=pin_origin_x_thick-2,
# overwrite_start_index=pin_origin_y0_thick+2,
# overwrite_end_index=pin_origin_y1_thick-2)
# rvdd_m6_2, rvss_m6_2 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_2_',
# layer=laygen.layers['pin'][6],
# gridname=rg_m5m6_thick,
# netnames=['VDD', 'VSS'],
# direction='x',
# input_rails_rect=input_rails_rect,
# generate_pin=True,
# overwrite_start_coord=None,
# overwrite_end_coord=pin_origin_x_thick-2,
# overwrite_start_index=pin_origin_y2_thick+2,
# offset_end_index=0)
# m6 (extract VDD/VSS grid from tisar and make power pins)
tisar_name = 'tisaradc_body_core'
tisar_libname = 'adc_sar_generated'
rg_m5m6_thick_temp_tisar = 'route_M5_M6_thick_temp_tisar_VDD'
# x_end = laygen.get_template_xy(r2r_name, gridname=rg_m5m6_thick_temp_tisar, libname=workinglib)[0] + \
# laygen.get_template_xy(bcap_name, gridname=rg_m5m6_thick_temp_tisar, libname=workinglib)[0]
bnd = laygen.get_template(tisar_name, libname=tisar_libname).xy
laygenhelper.generate_grids_from_template(laygen, gridname_input=rg_m5m6_thick,
gridname_output=rg_m5m6_thick_temp_tisar,
template_name=tisar_name, template_libname=tisar_libname,
template_pin_prefix=['VDDSAR'], bnd=bnd, xy_grid_type='ygrid')
laygen.grids.display(libname=None, gridname=rg_m5m6_thick_temp_tisar)
input_rails_rect = [rvdd_m5]
rvdd_m6 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_',
layer=laygen.layers['pin'][6],
gridname=rg_m5m6_thick_temp_tisar, netnames=['VDD'],
direction='x',
input_rails_rect=input_rails_rect, generate_pin=True,
overwrite_start_coord=None, overwrite_end_coord=pin_origin_x_thick,
offset_start_index=0, offset_end_index=0)
rg_m5m6_thick_temp_tisar = 'route_M5_M6_thick_temp_tisar_VSS'
laygenhelper.generate_grids_from_template(laygen, gridname_input=rg_m5m6_thick,
gridname_output=rg_m5m6_thick_temp_tisar,
template_name=tisar_name, template_libname=tisar_libname,
template_pin_prefix=['VSS'], xy_grid_type='ygrid')
laygen.grids.display(libname=None, gridname=rg_m5m6_thick_temp_tisar)
input_rails_rect = [rvss_m5]
rvss_m6 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='_M6_',
layer=laygen.layers['pin'][6],
gridname=rg_m5m6_thick_temp_tisar, netnames=['VSS'],
direction='x',
input_rails_rect=input_rails_rect, generate_pin=True,
overwrite_start_coord=None, overwrite_end_coord=pin_origin_x_thick,
offset_start_index=0, offset_end_index=0)
if __name__ == '__main__':
laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml")
import imp
try:
imp.find_module('bag')
laygen.use_phantom = False
except ImportError:
laygen.use_phantom = True
tech=laygen.tech
utemplib = tech+'_microtemplates_dense'
logictemplib = tech+'_logic_templates'
ret_libname = 'adc_retimer_ec'
clkdist_libname = 'clk_dis_generated'
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)
# laygen.load_template(filename='adc_retimer.yaml', libname=ret_libname)
#laygen.load_template(filename=ret_libname+'.yaml', libname=ret_libname)
laygen.load_template(filename=clkdist_libname+'.yaml', libname=clkdist_libname)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
#library load or generation
workinglib = 'adc_sar_generated'
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
if os.path.exists(workinglib+'.yaml'): #generated layout file exists
laygen.load_template(filename=workinglib+'.yaml', libname=workinglib)
laygen.templates.sel_library(utemplib)
#grid
pg = 'placement_basic' #placement grid
rg_m1m2 = 'route_M1_M2_cmos'
rg_m1m2_thick = 'route_M1_M2_thick'
rg_m2m3 = 'route_M2_M3_cmos'
rg_m3m4 = 'route_M3_M4_basic'
rg_m3m4_thick = 'route_M3_M4_thick'
rg_m3m4_basic_thick = 'route_M3_M4_basic_thick'
rg_m4m5 = 'route_M4_M5_basic'
rg_m4m5_thick = 'route_M4_M5_thick'
rg_m4m5_basic_thick = 'route_M4_M5_basic_thick'
rg_m5m6 = 'route_M5_M6_basic'
rg_m5m6_thick = 'route_M5_M6_thick'
rg_m5m6_thick_basic = 'route_M5_M6_thick_basic'
rg_m5m6_basic_thick = 'route_M5_M6_basic_thick'
rg_m5m6_thick2_thick = 'route_M5_M6_thick2_thick'
rg_m6m7_thick = 'route_M6_M7_thick'
rg_m6m7_thick2_thick = 'route_M6_M7_thick2_thick'
rg_m1m2_pin = 'route_M1_M2_basic'
rg_m2m3_pin = 'route_M2_M3_basic'
mycell_list = []
num_bits=9
num_slices=9
slice_order=[0,2,4,6,1,3,5,7]
#load from preset
load_from_file=True
yamlfile_spec="adc_sar_spec.yaml"
yamlfile_size="adc_sar_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
num_bits=sizedict['r2rdac']['num_bits']
num_slices=specdict['n_interleave']
slice_order=sizedict['slice_order']
use_sf=specdict['use_sf']
vref_sf=specdict['use_vref_sf']
use_offset=specdict['use_offset']
m=sizedict['r2rdac']['m']
num_series=sizedict['r2rdac']['num_series']
num_hori=sizedict['r2rdac_array']['num_hori']
num_vert=sizedict['r2rdac_array']['num_vert']
# r2r dac bcap
cellname = 'r2r_dac_bcap_array'
print(cellname + " generating")
mycell_list.append(cellname)
laygen.add_cell(cellname)
laygen.sel_cell(cellname)
generate_r2r_dac_bcap_array(laygen, objectname_pfix='R2R_BCAP_ARRAY', templib_logic=logictemplib, placement_grid=pg, routing_grid_m4m5=rg_m4m5_thick,
routing_grid_m5m6=rg_m5m6, rg_m3m4_basic_thick=rg_m3m4_basic_thick, rg_m5m6_thick=rg_m5m6_thick,
m=m, num_bits=num_bits, num_hori=num_hori, num_vert=num_vert, origin=np.array([0, 0]))
laygen.add_template_from_cell()
# r2r dac
cellname = 'r2r_dac_array'
print(cellname + " generating")
mycell_list.append(cellname)
laygen.add_cell(cellname)
laygen.sel_cell(cellname)
generate_r2r_dac_array(laygen, objectname_pfix='R2R_ARRAY', templib_logic=logictemplib, placement_grid=pg, routing_grid_m4m5=rg_m4m5,
routing_grid_m5m6=rg_m5m6, rg_m3m4_basic_thick=rg_m3m4_basic_thick, rg_m5m6_thick=rg_m5m6_thick,
m=m, num_bits=num_bits, num_hori=num_hori, num_vert=num_vert, origin=np.array([0, 0]))
laygen.add_template_from_cell()
laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
for mycell in mycell_list:
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap") # change layermapfile
|
py | 1a55187e200c4f1210e44867ecdcd18df7eeeef2 | """
Misc. general utility functions, not tied to Kubespawner directly
"""
import random
import hashlib
from traitlets import TraitType
def generate_hashed_slug(slug, limit=63, hash_length=6):
"""
Generate a unique name that's within a certain length limit
Most k8s objects have a 63 char name limit. We wanna be able to compress
larger names down to that if required, while still maintaining some
amount of legibility about what the objects really are.
If the length of the slug is shorter than the limit - hash_length, we just
return slug directly. If not, we truncate the slug to (limit - hash_length)
characters, hash the slug and append hash_length characters from the hash
to the end of the truncated slug. This ensures that these names are always
unique no matter what.
"""
if len(slug) < (limit - hash_length):
return slug
slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest()
return '{prefix}-{hash}'.format(
prefix=slug[:limit - hash_length - 1],
hash=slug_hash[:hash_length],
).lower()
class Callable(TraitType):
"""A trait which is callable.
Notes
-----
Classes are callable, as are instances
with a __call__() method."""
info_text = 'a callable'
def validate(self, obj, value):
if callable(value):
return value
else:
self.error(obj, value)
|
py | 1a551a4239632c939b54c758d6b2cdb8e8ab7888 | #________INDEX____________.
# |
# 3 functions |
# (6=3+2+1) |
# |
# -4 auxiliary |
# -1 main) |
# |
# (if __name__==__main__) |
#_________________________|
import sys
import re
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.stats import dirichlet, beta, bernoulli
from sklearn.metrics import precision_score, recall_score, f1_score
from functools import partial
#-------------------------------------------------------
# First part: Four (4) auxiliary generic functions
#
# (1) "tags_dict" : mapping from numbers
# to composers' names
#
# (2) "four_plots" : compare four classifiers
# for a specific composer
# in a specific axes
# (humans, random, random forests &
# neural network). It's
# a dispatcher for 'plotter'
#
# (3) "plotter" : the actual plotter func
#
# (4) "measure_dispatcher": choose the comparison measure
#
#---------------------------------------------------------
# (1)
def tags_dict():
with open('tags.dat','r') as f:
tags_list = f.readlines()
tags = {}
key_pattern = re.compile('item:[ ]+([a-z]+)',re.I)
item_pattern = re.compile('key:[ ]+([0-9]+)',re.I)
for x in tags_list:
tags[re.search(
key_pattern, x
).group(1)] = re.search(
item_pattern, x
).group(1)
inverted_tags = {}
for k,i in tags.items(): inverted_tags[i] = k
return tags, inverted_tags
# (2)
def four_plots(name: str, axis, data: dict):
plotter('humans',name, axis, data['humans'])
plotter('network',name, axis, data['network'])
plotter('random',name, axis, data['random'])
plotter('forest',name, axis, data['forest'])
# insert here visual specs!
#---------------------------------------------
#ax.legend()
#ax.set_title(f'{name}')
props = dict(boxstyle='round', facecolor='lavender', alpha=0.3)
ax.text(0.5, 0.95, name, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax.set_xlim(0,100)
ax.set_ylim(0,200)
ax.set_yticklabels([])
ax.set_yticks([0,100,200])
ax.set_xticks([0,25,50,75,100])
#---------------------------------------------
return
# (3)
def plotter(being: str, name: str, axis, data: dict):
# Color-blind friendly palette
colors = {'forest':'#d7191c',
'network':'#fdae61',
'random':'#abd9e9',
'humans':'#2c7bb6',}
# Beta Distribution generation from the measured predictive-quality
ypred = beta.rvs(
1+data[f'total {name}'][0] * data[name][0],
1+data[f'total {name}'][0] * (1 - data[name][0]),
size=4000,
)
# plot with Seaborn that binnarizes automatically
sns.distplot(100*ypred, bins=None, hist=True, kde=False,
label = f'{being}', ax=axis, color=colors[being])
return
# (4)
def measure_dispatcher(measure: str,ytrue: np.ndarray,
ypred:np.ndarray, optrand = 0):
"""
possible measures:
'recall' == TP / (TP + FN)
'precision' == TP / (TP + FP)
'f1' == 2 * TP / (2*TP + FP + FN)
'accuracy' = (TP + TN) / (TN + FN + TP + FP)
"""
# Random ytrue,ypred & Tags
if optrand:
ytrue_rand, ypred_rand = np.split(
np.argmax(
dirichlet.rvs(
alpha = [1,1]*4,
size = 2*optrand
),
1,
).reshape( #split 2
2,-1,
),
2,0,
)
ytrue_rand = ytrue_rand.reshape(-1,)
ypred_rand = ypred_rand.reshape(-1,)
tags = {int(x):y for x,y in tags_dict()[1].items()}
# Recall
def recall(ytrue, ypred):
nonlocal tags
precision = recall_score(ytrue, ypred, average=None)
data = {}
for x in tags.keys():
data[tags[x]] = [precision[x]]
#data[f'total {tags[x]}'] = [np.unique(
# ypred,
# return_counts=True,
# )[1][x]]
data[f'total {tags[x]}'] = [500]
return data
# Precision
def precision(ytrue, ypred):
nonlocal tags
precision = precision_score(ytrue, ypred, average=None)
data = {}
for x in tags.keys():
data[tags[x]] = [precision[x]]
data[f'total {tags[x]}'] = [np.unique(
ypred,
return_counts=True,
)[1][x]]
return data
# F1
def f1(ytrue, ypred):
nonlocal tags
precision = f1_score(ytrue, ypred, average=None)
data = {}
for x in tags.keys():
data[tags[x]] = [precision[x]]
data[f'total {tags[x]}'] = [np.unique(
ypred,
return_counts=True,
)[1][x]]
return data
# Accuracy
def accuracy(ytrue, ypred):
nonlocal tags
data = {}
for x in tags.keys():
temp = []
for i,y in enumerate(ytrue):
if ypred[i]==y and y==x: temp.append(1)
elif ypred[i]==x: temp.append(0)
elif y==x: temp.append(0)
elif y!=x and ypred[i]!=x: temp.append(1)
(data[tags[x]],
data[f'total {tags[x]}']) = ([sum(temp)/len(temp)],
[len(temp)])
return data
func = {'accuracy': accuracy, 'f1':f1,
'precision': precision, 'recall':recall,}
if optrand:
return func[measure](ytrue_rand, ypred_rand)
else:
return func[measure](ytrue, ypred)
#------------------------------------------------------------------
# M A I N
#------------------------------------------------------------------
def main(measure):
# Define the measure!
#
# (with TP: True Positive
# TN: True Negative
# ...etc)
#
# 'recall' == TP / (TP + FN)
#
# 'precision' == TP / (TP + FP)
#
# 'f1' == 2 * TP / (2*TP + FP + FN)
#
# 'accuracy' = (TP + TN) / (TN + FN + TP + FP)
#____________________________________________
expand = partial(measure_dispatcher, measure)
#--------------------------------------------
# 1) HUMAN answers are loaded
#humans = pd.read_csv('human-predictions.csv').iloc[:,1:]
a=pd.read_csv(
'human-predictions.csv'
).applymap(
lambda x: int(
tags_dict()[0][x]
)
).to_numpy()
humans = pd.DataFrame(expand(a[:,0],a[:,1]))
# 2) Random
samples = 6000
random = expand(range(samples), range(samples), samples)
# 3)
if True:
#----R-A-N-D-O-M--F-O-R-E-S-T---c-l-a-s-s-i-f-i-e-r----.
# choose: |
# 'val' == validation set (more points) |
# |
# 'test' == testing set (more comparable, only |
# over the audio-samples|
# humans answered on) |
name = 'val'
#------------------------------------------------------/
forest = pd.read_csv(name+'-forest-predictions.csv')
forest = expand(forest.iloc[:,0].to_numpy(),
forest.iloc[:,1].to_numpy())
# 4)
if True:
#----------N-E-U-R-A-L--N-E-T-W-O-R-K------------------.
# choose: |
# 'val' == validation set (more points) |
# |
# 'test' == testing set (more comparable, only |
# over the audio-samples|
# humans answered on) |
name = 'val'
#------------------------------------------------------/
network = pd.read_csv(name+'-network-predictions.csv')
network = expand(network.iloc[:,0].to_numpy(),
network.iloc[:,1].to_numpy())
return {'random':random, 'humans':humans,
'network':network, 'forest':forest}
if __name__=='__main__':
# Define a figure with 2x4=8 subplots
nrow = 4; ncol = 2;
alpha = 8
fig, axs = plt.subplots(nrows=nrow, ncols=ncol,
figsize=(alpha*2,alpha),dpi=200)
# Extract composers names from the quiz data
names = ['Scarlatti', 'Sor', 'Bach',
'Vivaldi','Stravinsky',
'Haendel', 'Liszt', 'Haydn', ]
# Generate a dictionary with results
try:
metric = sys.argv[1]
except IndexError:
metric = 'f1'
data = main(metric)
# Plot one composers' classification result in each subplot
for i,ax in enumerate(axs.reshape(-1)):
try:
four_plots(names[i], ax, data)
except IndexError: pass
#------------------------------------------
# Visual specs, Save & View!
#------------------------------------------
fig.text(0.5, 0.89, '(Probability Density Function of the True Positive Rate'\
' conditioned to a positive observation)' ,ha='center',fontsize=11)
fig.text(0.5,0.92, '$PDF(Recall)$', fontsize=14.5,ha='center')
fig.text(0.5, 0.04, 'Probability of a positive being True Positive', ha='center')
fig.text(0.1, 0.5, 'Probability Density', va='center', rotation='vertical')
fig.set_facecolor('ivory')
for i,ax in enumerate(axs.reshape(-1)):
if i==7: ax.legend()
#------------------------------------------
plt.savefig('../RESULTS/RESULTS.png', facecolor='ivory')
|
py | 1a551d97a371a21009f638006f9ca4de89e316b2 | _base_ = [
'../swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py'
]
model = dict(
backbone=dict(
type='CBSwinTransformer',
),
neck=dict(
type='CBFPN',
),
test_cfg = dict(
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms'),
)
)
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from HTC
data_root = "datasets/objects365/"
data = dict(
train=dict(
ann_file=data_root + 'annotations/instances_train.json',
img_prefix=data_root + 'train/'))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
#img_scale=[(256,256), (256,256)],
multiscale_mode='range',
keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
samples_per_gpu = 1
data = dict(samples_per_gpu=samples_per_gpu,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(lr=0.0001*(samples_per_gpu/2))
runner=dict(max_epochs=20)
load_from = "/home/ubuntu/efs/pretrained_weight/cascade_mask_rcnn_cbv2_swin_small_patch4_window7_mstrain_400-1400_adamw_3x_coco.pth" |
py | 1a551dc127ff1fc05f17a25874e1b3746aa54bb7 | def strASCII(s):
c = []
for x in s:
c.append(format(ord(x), 'b').zfill(8))
c = "".join(c)
return c
def ascistr(l,r):
joi = [l,r]
joi = ''.join(joi)
joi = list(map("".join, zip(*[iter(joi)] * 8)))
#print joi
for x in range(len(joi)):
joi[x] = chr(int(joi[x],2))
joi = ''.join(joi)
return joi
|
py | 1a551ed464d04d74bfa042f1350e3a199fa5bb37 |
def main():
print "testing capture_webcam"
import video
video.avconv.AVConverter
if __name__ == "__main__":
main ()
|
py | 1a551f1815e7084bd0fc082672e816c4d5e72be0 | from django.conf import settings
from django.db import models
class PGPManager(models.Manager):
use_for_related_fields = True
use_in_migrations = True
@staticmethod
def _get_pgp_symmetric_decrypt_sql(field):
"""Decrypt sql for symmetric fields using the cast sql if required."""
sql = """pgp_sym_decrypt("{0}"."{1}", '{2}')"""
if hasattr(field, 'cast_sql'):
sql = field.cast_sql % sql
return sql.format(
field.model._meta.db_table,
field.name,
settings.PGCRYPTO_KEY,
)
@staticmethod
def _get_pgp_public_key_decrypt_sql(field):
"""Decrypt sql for public key fields using the cast sql if required."""
sql = """pgp_pub_decrypt("{0}"."{1}", dearmor('{2}'))"""
if hasattr(field, 'cast_sql'):
sql = field.cast_sql % sql
return sql.format(
field.model._meta.db_table,
field.name,
settings.PRIVATE_PGP_KEY,
)
def get_queryset(self, *args, **kwargs):
"""Decryption in queryset through meta programming."""
# importing here otherwise there's a circular reference issue
from pgcrypto.mixins import PGPSymmetricKeyFieldMixin, PGPPublicKeyFieldMixin
skip_decrypt = kwargs.pop('skip_decrypt', None)
qs = super().get_queryset(*args, **kwargs)
# The Django admin skips this process because it's extremely slow
if not skip_decrypt:
select_sql = {}
encrypted_fields = []
for field in self.model._meta.get_fields():
if isinstance(field, PGPSymmetricKeyFieldMixin):
select_sql[field.name] = self._get_pgp_symmetric_decrypt_sql(field)
encrypted_fields.append(field.name)
elif isinstance(field, PGPPublicKeyFieldMixin):
select_sql[field.name] = self._get_pgp_public_key_decrypt_sql(field)
encrypted_fields.append(field.name)
# Django queryset.extra() is used here to add decryption sql to query.
qs = qs.defer(
*encrypted_fields
).extra(
select=select_sql
)
return qs
|
py | 1a551f633e05f58c1ebedf7d3229d3b4903b6f7d | <<<<<<< HEAD
def balanced(exp):
stack = []
for each in exp:
if each in ["(", "{", "["]:
stack.append(each)
else:
if not stack:
return "Not Balanced"
curr_char = stack.pop()
if(curr_char == "(" and each != ")"):
return "Not Balanced"
if(curr_char == "{" and each != "}"):
return "Not Balanced"
if(curr_char == "[" and each != "]"):
return "Not Balanced"
if stack:
return "Not Balanced"
else:
return "Balanced"
exp = "(()){[()]}]"
print(balanced(exp))
=======
def balanced(exp):
stack = []
for each in exp:
if each in ["(", "{", "["]:
stack.append(each)
else:
if not stack:
return "Not Balanced"
curr_char = stack.pop()
if(curr_char == "(" and each != ")"):
return "Not Balanced"
if(curr_char == "{" and each != "}"):
return "Not Balanced"
if(curr_char == "[" and each != "]"):
return "Not Balanced"
if stack:
return "Not Balanced"
else:
return "Balanced"
exp = "(()){[()]}]"
print(balanced(exp))
>>>>>>> ce81f1e9daeed4ad77a581aed91489021f8515d5
|
py | 1a55217de9e6866a0d6f9135aec1065d7d35e4b5 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: image.py
Author: dhilipsiva <[email protected]>
Date created: 2017-02-10
"""
from PIL import ImageFilter
from PIL import Image
size = (128, 128)
im = Image.open("corgi.jpg")
# print(im.format, im.size, im.mode)
# im.thumbnail(size)
# im.show()
box = (1400, 500, 2000, 1000)
region = im.crop(box)
region = region.transpose(Image.ROTATE_90)
# region = region.transpose(Image.FLIP_LEFT_RIGHT)
# region.resize((128, 128))
# region = region.filter(ImageFilter.DETAIL)
region = region.filter(ImageFilter.BLUR)
region.show()
# out = im.rotate(45) # degrees counter-clockwise
|
py | 1a5521b528fdd43cf340e057b9aa2a21fe0993f5 | '''
2016 - 2017 ACSL American Computer Science League
SENIOR DIVISION
Contest #2 ASCENDING STRINGS
'''
from unittest import TestCase
def atFirst(snum):
'''
First thoughts
'''
h, r = 0, len(snum)
res = []
while r > h:
# forward
res.append(int(snum[s:e]))
# backward
res.append(int(snum[s:e:-1]))
return res
def parsNums(snum):
def cutforth(snum, h, r, maxn):
if h >= r: return None, h
l = 1
n = int(snum[h : h + l])
if not maxn: return n, h + 1
while n <= maxn and h + l < r:
l += 1
n = int(snum[h : h + l])
return n if h + l <= r and n > maxn else None, h + l
def cutback(snum, h, r, maxn):
if h >= r: return None, r
l = 1
n = int(snum[r - 1 : r - l - 1 : -1])
if not maxn: return n, r - 1
while n <= maxn and h + l < r:
l += 1
n = int(snum[r - 1 : r - l - 1 : -1])
return n if h + l <= r and n > maxn else None, r - l
h, r = 0, len(snum)
res = []
n = None
while r > h:
# forward, backward
n, h = cutforth(snum, h, r, n)
if n == None: break
res.append(n)
n, r = cutback(snum, h, r, n)
if n == None: break
res.append(n)
return res
if __name__ == "__main__":
t = TestCase()
t.assertCountEqual([3, 8, 14, 35, 159], parsNums('31415926538'))
t.assertCountEqual([3, 5, 14, 62, 159], parsNums('314159265'))
t.assertCountEqual([2, 7, 16], parsNums('201617'))
t.assertCountEqual([1, 9, 23, 87, 456], parsNums('123456789'))
t.assertCountEqual([1, 4, 22, 44, 333], parsNums('1223334444'))
t.assertCountEqual([2, 8, 71, 281, 828], parsNums('2718281828'))
t.assertCountEqual([1, 12, 22, 23], parsNums('12233221'))
t.assertCountEqual([5, 50], parsNums('5005'))
t.assertCountEqual([2, 5], parsNums('250'))
t.assertCountEqual([9], parsNums('9'))
print('OK!')
|
py | 1a5521bdde2cdde1477540d41e2a8f60965a8cf4 | pd = []
op = int(input('quantidade de produtos: '))
for x in range(op):
pd.append(float(input('preço do {}° produto'.format(x+1))))
pd.sort()
print('''
o produto que vc deve comprar é o que custa R${}
e o que voce nunca deve comprar é o que custa R${}'''.format(pd[0],pd[-1])) |
py | 1a5522cd4d2329376bb92207fce5fada17069c9d | import numpy as np
import os
import torch
class Dataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, trajs, device, steps=20):
'Initialization'
dim = trajs[0].shape[1]
self.x = []
self.x_n = np.zeros((0, dim))
for i in range(steps):
tr_i_all = np.zeros((0,dim))
for tr_i in trajs:
_trj = tr_i[i:i-steps,:]
tr_i_all = np.concatenate((tr_i_all, _trj), 0)
self.x_n = np.concatenate((self.x_n, tr_i[-1:,:]),0)
self.x.append(tr_i_all)
self.x = torch.from_numpy(np.array(self.x)).float().to(device)
self.x_n = torch.from_numpy(np.array(self.x_n)).float().to(device)
self.len_n = self.x_n.shape[0]
self.len = self.x.shape[1]
self.steps_length = steps
self.step = steps - 1 |
py | 1a552508240f1c6bfe6f5b102324254af4a00df8 | from keras.models import load_model
from keras.preprocessing.image import img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import applications
import numpy as np
img_width, img_height = 150, 150
base_model=applications.VGG16(include_top=False,weights='imagenet')
def predict(basedir, targetclass, start, end):
success = 0
for i in range(start, end):
path = basedir + str(i) + '.jpg'
img = load_img(path,False,target_size=(img_width,img_height))
img = img_to_array(img)
img=img.reshape((1,)+img.shape)
img=img/255
feature_img =base_model.predict(img) #then the shape is (1,512,4,4)
model=load_model('bottleneck_fc_model.h5') #my own model which the top is FC layers
classes=model.predict_classes(feature_img)
prob=model.predict_proba(feature_img)
print("class: {0}".format(classes[0][0]))
if classes[0][0] == targetclass:
success = success + 1
print(success)
return success
start = 1401
end = 1411
basedir = "data/test/class1."
success_total = 0
success_total = success_total + predict(basedir, 0, start, end)
basedir = "data/test/class2."
success_total = success_total + predict(basedir, 1, start, end)
percent = success_total*100/(2*(end - start))
print('Result: {0}'.format(percent))
print('done')
|
py | 1a55256203dc3cdf73d101260b1bc4c476f6e418 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
# the name of the project
name = 'nbformat'
#-----------------------------------------------------------------------------
# Minimal Python version sanity check
#-----------------------------------------------------------------------------
import sys
#-----------------------------------------------------------------------------
# get on with it
#-----------------------------------------------------------------------------
import os
from glob import glob
from distutils.core import setup
pjoin = os.path.join
here = os.path.abspath(os.path.dirname(__file__))
pkg_root = pjoin(here, name)
packages = []
for d, _, _ in os.walk(pjoin(here, name)):
if os.path.exists(pjoin(d, '__init__.py')):
packages.append(d[len(here)+1:].replace(os.path.sep, '.'))
package_data = {
'nbformat.tests' : [
'*.ipynb',
],
'nbformat.v3' : [
'nbformat.v3*.schema.json',
],
'nbformat.v4' : [
'nbformat.v4*.schema.json',
],
}
version_ns = {}
with open(pjoin(here, name, '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = dict(
name = name,
version = version_ns['__version__'],
scripts = glob(pjoin('scripts', '*')),
packages = packages,
package_data = package_data,
include_package_data = True,
description = "The Jupyter Notebook format",
long_description= """
This package contains the base implementation of the Jupyter Notebook format,
and Python APIs for working with notebooks.
""",
author = 'Jupyter Development Team',
author_email = '[email protected]',
url = 'http://jupyter.org',
license = 'BSD',
python_requires = '>=3.5',
platforms = "Linux, Mac OS X, Windows",
keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if 'develop' in sys.argv or any(a.startswith('bdist') for a in sys.argv):
import setuptools
setuptools_args = {}
install_requires = setuptools_args['install_requires'] = [
'ipython_genutils',
'traitlets>=4.1',
'jsonschema>=2.4,!=2.5.0',
'jupyter_core',
]
extras_require = setuptools_args['extras_require'] = {
'fast': ['fastjsonschema'],
'test': ['check-manifest', 'fastjsonschema', 'testpath', 'pytest', 'pytest-cov'],
}
if 'setuptools' in sys.modules:
setup_args.update(setuptools_args)
setup_args['entry_points'] = {
'console_scripts': [
'jupyter-trust = nbformat.sign:TrustNotebookApp.launch_instance',
]
}
setup_args.pop('scripts', None)
if __name__ == '__main__':
setup(**setup_args)
|
py | 1a552563766b89aaf46bdf76fd1c53c110133057 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************
# @Time : 2018/9/9 15:52
# @Author : Xiang Ling
# @Lab : nesa.zju.edu.cn
# @File : defenses.py
# **************************************
import os
from abc import ABCMeta
from abc import abstractmethod
class Defense(object):
__metaclass__ = ABCMeta
def __init__(self, model=None, defense_name=None):
self.model = model
self.defense_name = defense_name
defense_dir = '../DefenseEnhancedModels/{}'.format(self.defense_name)
if self.defense_name not in os.listdir('../DefenseEnhancedModels/'):
os.mkdir(defense_dir)
print('creating the {} folder for storing the {} defense'.format(defense_dir, self.defense_name))
else:
print('the storing {} folder is already existing'.format(defense_dir))
@abstractmethod
def defense(self):
print("abstract method of 'Defenses' is not implemented")
raise NotImplementedError
|
py | 1a55257a4273fb61eff1130f8f5f7e0869ae1b2c | import cloudmesh
from pprint import pprint
cloudmesh.logger(False)
username = cloudmesh.load().username()
cloudmesh.banner("INIT MONGO")
mesh = cloudmesh.mesh("mongo")
#
# authentication as a user - username is requried
# On webgui side, this is achieved by the framework,
# and the username is obtined from g.user.id
#
# On CLI side, a global user object or username variable
# should be maintained upon the start of the shell
# The username could be obtained from yaml file.
cloudmesh.banner("ACTIVATE")
mesh.activate(username)
cloudmesh.banner("GET FLAVOR")
mesh.refresh(username, types=['flavors'], names=["india"])
data = mesh.flavors(cm_user_id=username, clouds=["india"])
pprint(data)
cloudmesh.banner("GET IMAGE")
mesh.refresh(username, types=['images'], names=["india"])
data = mesh.images(cm_user_id=username, clouds=["india"])
pprint(data)
cloudmesh.banner("LIST KEYS")
#keyobj = cloudmesh.cm_keys_mongo(username)
# print keyobj.names()
#
# PROPOSAL FOR NEW MESH API
#
cloud = "india"
cloudmesh.banner("LAUNCH VM INSTANCE")
result = mesh.start(cloud, username)
cloudmesh.banner("TERMINATE VM INSTANCE")
server = result['server']['id']
mesh.delete(cloud, server, username)
cloudmesh.banner("LAUNCH 3 VM INSTANCES")
vm = {}
for i in range(1, 3):
vm[i] = mesh.start(cloud, username)
cloudmesh.banner("TERMINATE 3 VM INSTANCES")
for i in vm:
server = vm[i]['server']['id']
mesh.delete(cloud, server, username)
cloudmesh.banner("GET A FLAVOR")
flavor = mesh.flavor(cloudname="india", flavorname="m1.small")
flavor = mesh.flavor("india", "m1.small")
cloudmesh.banner("GET AN IMAGE")
image = mesh.image(cloudname="india", imagename="futuregrid/ubuntu-14.04")
image = mesh.image("india", "futuregrid/ubuntu-14.04")
cloudmesh.banner("GET A VM NAME")
vmname = mesh.vmname()
print vmname
cloudmesh.banner("SET A VM NAME")
vmname = mesh.vmname(prefix="albert", idx=10)
print vmname
vmname = mesh.vmname("James", 20)
print vmname
cloudmesh.banner("GET A NEXT VM NAME")
vmname = mesh.vmname_next()
print vmname
vmname = mesh.vmname("Brian", "+2")
print vmname
cloudmesh.banner("SET A DEFAULT IMAGE OR A DEFAULT FLAVOR")
mesh.default("india", "image", image)
mesh.default("india", "flavor", flavor)
cloudmesh.banner("START A VM WITH OPTIONS")
cloud = "india"
prefix = "gregor"
index = "10000"
flavor = mesh.flavor("india", "m1.small")
image = mesh.image("india", "futuregrid/ubuntu-14.04")
vm = mesh.start(cloud, username, prefix=prefix, index=index, flavor=flavor,
image=image)
vm = mesh.start("india", username, image=image, flavor=flavor)
server = vm['server']['id']
cloudmesh.banner("ASSIGN PUBLIC IP ADDRESS TO THE VM")
ip = mesh.assign_public_ip(cloud, server, username)
cloudmesh.banner("WAIT")
try:
result = mesh.wait(ipaddr=ip, command="uname -a", interval=10, retry=5)
cloudmesh.banner("RUN A COMMAND VIA SSH TO THE VM")
result = mesh.ssh_execute(ipaddr=ip, command="ls -al")
print result
except:
import sys
print sys.exc_info()[0]
cloudmesh.banner("DELETE THE VM: " + server)
mesh.delete(cloud, server, username)
|
py | 1a5525ce33beacbffd56c8c1df1246d3696c8917 | import argparse
import configparser
from collections import defaultdict
import itertools
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
import os
import random
import time
import numpy as np
import chainer
if chainer.backends.cuda.available:
import cupy as xp
else:
xp = np
from chainercv.utils import non_maximum_suppression
from PIL import ImageDraw, Image
from coco_dataset import get_coco_dataset
from mpii_dataset import get_mpii_dataset
from model import PoseProposalNet
from network_resnet import ResNet50
from utils import parse_size
COLOR_MAP = {}
DIRECTED_GRAPHS = [[]]
DATA_MODULE = None
def get_feature(model, image):
global inference_time
start = time.time()
image = xp.asarray(image)
processed_image = model.feature_layer.prepare(image)
resp, conf, x, y, w, h, e = model.predict(xp.expand_dims(processed_image, axis=0))
resp = chainer.backends.cuda.to_cpu(resp.array)
conf = chainer.backends.cuda.to_cpu(conf.array)
w = chainer.backends.cuda.to_cpu(w.array)
h = chainer.backends.cuda.to_cpu(h.array)
x = chainer.backends.cuda.to_cpu(x.array)
y = chainer.backends.cuda.to_cpu(y.array)
e = chainer.backends.cuda.to_cpu(e.array)
resp = np.squeeze(resp, axis=0)
conf = np.squeeze(conf, axis=0)
x = np.squeeze(x, axis=0)
y = np.squeeze(y, axis=0)
w = np.squeeze(w, axis=0)
h = np.squeeze(h, axis=0)
e = np.squeeze(e, axis=0)
inference_time=time.time() - start
logger.info('inference time {:.5f}'.format(inference_time))
return resp, conf, x, y, w, h, e
def estimate(model, image, detection_thresh=0.15, min_num_keypoints=-1):
feature_map = get_feature(model, image)
return get_humans_by_feature(model, feature_map, detection_thresh, min_num_keypoints)
def get_humans_by_feature(model, feature_map, detection_thresh=0.15, min_num_keypoints=-1):
resp, conf, x, y, w, h, e = feature_map
start = time.time()
delta = resp * conf
K = len(model.keypoint_names)
outW, outH = model.outsize
ROOT_NODE = 0 # instance
start = time.time()
rx, ry = model.restore_xy(x, y)
rw, rh = model.restore_size(w, h)
ymin, ymax = ry - rh / 2, ry + rh / 2
xmin, xmax = rx - rw / 2, rx + rw / 2
bbox = np.array([ymin, xmin, ymax, xmax])
bbox = bbox.transpose(1, 2, 3, 0)
root_bbox = bbox[ROOT_NODE]
score = delta[ROOT_NODE]
candidate = np.where(score > detection_thresh)
score = score[candidate]
root_bbox = root_bbox[candidate]
selected = non_maximum_suppression(
bbox=root_bbox, thresh=0.3, score=score)
root_bbox = root_bbox[selected]
logger.info('detect instance {:.5f}'.format(time.time() - start))
start = time.time()
humans = []
e = e.transpose(0, 3, 4, 1, 2)
ei = 0 # index of edges which contains ROOT_NODE as begin
# alchemy_on_humans
for hxw in zip(candidate[0][selected], candidate[1][selected]):
human = {ROOT_NODE: bbox[(ROOT_NODE, hxw[0], hxw[1])]} # initial
for graph in DIRECTED_GRAPHS:
eis, ts = graph
i_h, i_w = hxw
for ei, t in zip(eis, ts):
index = (ei, i_h, i_w) # must be tuple
u_ind = np.unravel_index(np.argmax(e[index]), e[index].shape)
j_h = i_h + u_ind[0] - model.local_grid_size[1] // 2
j_w = i_w + u_ind[1] - model.local_grid_size[0] // 2
if j_h < 0 or j_w < 0 or j_h >= outH or j_w >= outW:
break
if delta[t, j_h, j_w] < detection_thresh:
break
human[t] = bbox[(t, j_h, j_w)]
i_h, i_w = j_h, j_w
if min_num_keypoints <= len(human) - 1:
humans.append(human)
logger.info('alchemy time {:.5f}'.format(time.time() - start))
logger.info('num humans = {}'.format(len(humans)))
return humans
def draw_humans(keypoint_names, edges, pil_image, humans, mask=None, visbbox=True):
"""
This is what happens when you use alchemy on humans...
note that image should be PIL object
"""
start = time.time()
drawer = ImageDraw.Draw(pil_image)
for human in humans:
for k, b in human.items():
if mask:
fill = (255, 255, 255) if k == 0 else None
else:
fill = None
ymin, xmin, ymax, xmax = b
if k == 0: # human instance
# adjust size
t = 1
xmin = int(xmin * t + xmax * (1 - t))
xmax = int(xmin * (1 - t) + xmax * t)
ymin = int(ymin * t + ymax * (1 - t))
ymax = int(ymin * (1 - t) + ymax * t)
if mask:
resized = mask.resize(((xmax - xmin), (ymax - ymin)))
pil_image.paste(resized, (xmin, ymin), mask=resized)
else:
drawer.rectangle(xy=[xmin, ymin, xmax, ymax],
fill=fill,
outline=COLOR_MAP[keypoint_names[k]])
else:
if visbbox:
drawer.rectangle(xy=[xmin, ymin, xmax, ymax],
fill=fill,
outline=COLOR_MAP[keypoint_names[k]])
else:
r = 2
x = (xmin + xmax) / 2
y = (ymin + ymax) / 2
drawer.ellipse((x - r, y - r, x + r, y + r),
fill=COLOR_MAP[keypoint_names[k]])
for s, t in edges:
if s in human and t in human:
by = (human[s][0] + human[s][2]) / 2
bx = (human[s][1] + human[s][3]) / 2
ey = (human[t][0] + human[t][2]) / 2
ex = (human[t][1] + human[t][3]) / 2
drawer.line([bx, by, ex, ey],
fill=COLOR_MAP[keypoint_names[s]], width=3)
logger.info('draw humans {: .5f}'.format(time.time() - start))
return pil_image
def create_model(args, config):
global DIRECTED_GRAPHS, COLOR_MAP
dataset_type = config.get('dataset', 'type')
if dataset_type == 'mpii':
import mpii_dataset as x_dataset
elif dataset_type == 'coco':
import coco_dataset as x_dataset
else:
raise Exception('Unknown dataset {}'.format(dataset_type))
KEYPOINT_NAMES = x_dataset.KEYPOINT_NAMES
EDGES = x_dataset.EDGES
DIRECTED_GRAPHS = x_dataset.DIRECTED_GRAPHS
COLOR_MAP = x_dataset.COLOR_MAP
model = PoseProposalNet(
model_name=config.get('model_param', 'model_name'),
insize=parse_size(config.get('model_param', 'insize')),
keypoint_names=KEYPOINT_NAMES,
edges=np.array(EDGES),
local_grid_size=parse_size(config.get('model_param', 'local_grid_size')),
parts_scale=parse_size(config.get(dataset_type, 'parts_scale')),
instance_scale=parse_size(config.get(dataset_type, 'instance_scale')),
width_multiplier=config.getfloat('model_param', 'width_multiplier'),
)
logger.info('input size = {}'.format(model.insize))
logger.info('output size = {}'.format(model.outsize))
try:
result_dir = args.model
except:
result_dir = args
chainer.serializers.load_npz(
os.path.join(result_dir, 'bestmodel.npz'),
model
)
logger.info('cuda enable {}'.format(chainer.backends.cuda.available))
logger.info('ideep enable {}'.format(chainer.backends.intel64.is_ideep_available()))
if chainer.backends.cuda.available:
logger.info('gpu mode')
model.to_gpu()
elif chainer.backends.intel64.is_ideep_available():
logger.info('Indel64 mode')
model.to_intel64()
return model
def load_config(args):
config = configparser.ConfigParser()
config_path = os.path.join(args.model, 'src', 'config.ini')
logger.info(config_path)
config.read(config_path, 'UTF-8')
return config
def predict(args):
config = load_config(args)
detection_thresh = config.getfloat('predict', 'detection_thresh')
min_num_keypoints = config.getint('predict', 'min_num_keypoints')
dataset_type = config.get('dataset', 'type')
logger.info('loading {}'.format(dataset_type))
if dataset_type == 'mpii':
_, test_set = get_mpii_dataset(
insize=parse_size(config.get('model_param', 'insize')),
image_root=config.get(dataset_type, 'images'),
annotations=config.get(dataset_type, 'annotations'),
train_size=config.getfloat(dataset_type, 'train_size'),
min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
seed=config.getint('training_param', 'seed'),
)
elif dataset_type == 'coco':
test_set = get_coco_dataset(
insize=parse_size(config.get('model_param', 'insize')),
image_root=config.get(dataset_type, 'val_images'),
annotations=config.get(dataset_type, 'val_annotations'),
min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
)
else:
raise Exception('Unknown dataset {}'.format(dataset_type))
model = create_model(args, config)
# choose specific image
idx = random.choice(range(len(test_set)))
idx = 50
image = test_set.get_example(idx)['image']
humans = estimate(
model,
image.astype(np.float32),
detection_thresh,
min_num_keypoints,
)
pil_image = Image.fromarray(image.transpose(1, 2, 0).astype(np.uint8))
pil_image = draw_humans(
keypoint_names=model.keypoint_names,
edges=model.edges,
pil_image=pil_image,
humans=humans,
visbbox=config.getboolean('predict', 'visbbox')
)
#pil_image.save('result.png', 'PNG')
pil_image.save('result_' + 'X'.join((str(_.insize[0]), str(_.insize[1]))) + '_idx_' + str(idx) + '_time_' + str(round(inference_time, 3)) + 's.png', 'PNG')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('model', help='path/to/model', type=str)
return parser.parse_args()
def main():
args = parse_arguments()
predict(args)
if __name__ == '__main__':
main()
|
py | 1a5527e8b966a8229d1ce6db7cf3bb050bdaef4d | #!/usr/bin/env python3
import sys
import os
import argparse
ROOT = HERE = os.path.abspath(os.path.dirname(__file__))
READIES = os.path.join(ROOT, "deps/readies")
sys.path.insert(0, READIES)
import paella
#----------------------------------------------------------------------------------------------
class RedisTimeSeriesSetup(paella.Setup):
def __init__(self, nop=False):
paella.Setup.__init__(self, nop)
def common_first(self):
self.pip_install("wheel")
self.pip_install("setuptools --upgrade")
self.install("git jq curl")
self.run("%s/bin/enable-utf8" % READIES)
def debian_compat(self):
self.run("%s/bin/getgcc --modern" % READIES)
def redhat_compat(self):
self.install("redhat-lsb-core")
self.run("%s/bin/getepel" % READIES)
self.run("%s/bin/getgcc --modern" % READIES)
def arch_compat(self):
self.install("lcov-git", aur=True)
def fedora(self):
self.run("%s/bin/getgcc" % READIES)
self.install("python3-networkx")
def macos(self):
self.install_gnu_utils()
def common_last(self):
if not self.has_command("lcov"):
self.install("lcov")
self.run("{PYTHON} {READIES}/bin/getrmpytools".format(PYTHON=self.python, READIES=READIES))
self.pip_install("-r tests/flow/requirements.txt")
#----------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Set up system for build.')
parser.add_argument('-n', '--nop', action="store_true", help='no operation')
args = parser.parse_args()
RedisTimeSeriesSetup(nop = args.nop).setup()
|
py | 1a552851a0159287620c23782e92b0e75aa6274b | import scipy as sp
from timer import timer
def entropy(values):
"""A slow way to calculate the entropy of the input values"""
values = sp.asarray(values).flatten()
#calculate the probablility of a value in a vector
vUni = sp.unique(values)
lenval = float(values.size)
FreqData = sp.zeros(vUni.shape, dtype=float)
for i in xrange(FreqData.size):
FreqData[i] = sum(values==vUni[i])/lenval
return -sum([FreqData[i]*sp.math.log(FreqData[i],2) for i in xrange(FreqData.size)])
def entropy2(values):
"""Calculate the entropy of vector values.
values will be flattened to a 1d ndarray."""
values = sp.asarray(values).flatten()
p = sp.diff(sp.c_[0,sp.diff(sp.sort(values)).nonzero(), values.size])/float(values.size)
H = (p*sp.log2(p)).sum()
return -H
def chebyshev2(values, degree=1):
"""Calculate the Chebyshev Polynomials using previous results"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
for x in range(len(values)):
A[i,x] = 2*values[x]*A[i-1,x]-A[i-2,x]
return A
def chebyshev2_lc(values, degree=1):
"""Calculate the Chebyshev Polynomials using previous results"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
A[i,:] = [2*x for x in values]*A[i-1,:]-A[i-2,:]
return A
def chebyshev_sp(values, degree=1):
"""Calculate the Chebyshev Polynomials using the scipy functions"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
A[i,:] = sp.cos(i*sp.arccos(values))
return A
def chebyshev_vec(values, degree=1):
"""Calculate the Chebyshev Polynobials
This implementation uses sp.vectorize to vectorize python's math functions)"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
cos = sp.vectorize(sp.math.cos)
acos = sp.vectorize(sp.math.acos)
for i in range(2,degree):
A[i,:] = cos(i*acos(values))
return A
def chebyshev_lc(values, degree=1):
"""Calculate the Chebyshev Polynomials using list comprehensions"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
A[i,:] = [sp.math.cos(y) for y in [i*sp.math.acos(x) for x in values]]
return A
def chebyshev(values, degree=1):
"""Calculate the Chebyshev Polynomial using
Tn(x) = cos(n*cosh(x))"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
for x in values:
A[i,:] = sp.math.cos(i*sp.math.acos(x))
return A
if __name__ == '__main__':
from timer import timer
testvals = sp.linspace(-1,1,500)
funcs = [chebyshev, chebyshev_lc, chebyshev_vec, chebyshev_sp, chebyshev2, chebyshev2_lc]
with timer(loops=5) as t:
for f in funcs:
t.time(f, testvals, 100)
t.printTimes()
|
py | 1a5528f25fb0e2a620146eef5094eb2a05f4f78f | """
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
import django_heroku
from decouple import config
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
ENVIRONMENT = config('ENVIRONMENT', default='local')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', default='SECRET_KEY')
# SECURITY WARNING: define the correct hosts in production!
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=lambda v: [
s.strip() for s in v.split(',')], default='*')
INSTALLED_APPS = [
'home',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.modeladmin',
'wagtail.contrib.styleguide',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'django.contrib.sites',
]
SITE_ID = 1
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATIC_ROOT = os.path.join(PROJECT_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals())
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
DATA_UPLOAD_MAX_NUMBER_FIELDS = 100000
# Wagtail settings
WAGTAIL_SITE_NAME = "{{ project_name }}"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = config('BASE_URL', default='BASE_URL')
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
if ENVIRONMENT != 'local':
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
USER_AGENTS_CACHE = None
|
py | 1a55293cda5046330fb071dc6177c9c1e47f3587 | import pandas as pd
import seaborn as sns
import re
import praw
from matplotlib import pyplot as plt
import numpy as np
def sigmoid(x):
return 1/ ( 1 + np.exp(-x))
def logit(x):
if x != 0:
return x/(1 - x)
else:
return -999999999 # ...
|
py | 1a5529aaac90d242e03907e6b0994e4b5a1be7a8 | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_3 import models
class SubnetGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[Subnet]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.Subnet]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[Subnet])
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SubnetGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SubnetGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubnetGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a552b283f65ff70547aa903aeb199ab9a0b0257 | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""List of pre-trained StyleGAN2 networks located on Google Drive."""
import pickle
import dnnlib
import dnnlib.tflib as tflib
#----------------------------------------------------------------------------
# StyleGAN2 Google Drive root: https://drive.google.com/open?id=1QHc-yF5C3DChRwSdZKcx1w6K8JvSxQi7
gdrive_urls = {
'gdrive:networks/stylegan2-car-config-a.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-a.pkl',
'gdrive:networks/stylegan2-car-config-b.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-b.pkl',
'gdrive:networks/stylegan2-car-config-c.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-c.pkl',
'gdrive:networks/stylegan2-car-config-d.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-d.pkl',
'gdrive:networks/stylegan2-car-config-e.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-e.pkl',
'gdrive:networks/stylegan2-car-config-f.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-f.pkl',
'gdrive:networks/stylegan2-cat-config-a.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-cat-config-a.pkl',
'gdrive:networks/stylegan2-cat-config-f.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-cat-config-f.pkl',
'gdrive:networks/stylegan2-church-config-a.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-church-config-a.pkl',
'gdrive:networks/stylegan2-church-config-f.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-church-config-f.pkl',
'gdrive:networks/stylegan2-ffhq-config-a.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-a.pkl',
'gdrive:networks/stylegan2-ffhq-config-b.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-b.pkl',
'gdrive:networks/stylegan2-ffhq-config-c.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-c.pkl',
'gdrive:networks/stylegan2-ffhq-config-d.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-d.pkl',
'gdrive:networks/stylegan2-ffhq-config-e.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-e.pkl',
'gdrive:networks/stylegan2-ffhq-config-f.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl',
'gdrive:networks/stylegan2-horse-config-a.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-horse-config-a.pkl',
'gdrive:networks/stylegan2-horse-config-f.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-horse-config-f.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gorig-Dorig.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gorig-Dorig.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gorig-Dresnet.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gorig-Dresnet.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gorig-Dskip.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gorig-Dskip.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gresnet-Dorig.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gresnet-Dorig.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gresnet-Dresnet.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gresnet-Dresnet.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gresnet-Dskip.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gresnet-Dskip.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gskip-Dorig.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gskip-Dorig.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gskip-Dresnet.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gskip-Dresnet.pkl',
'gdrive:networks/table2/stylegan2-car-config-e-Gskip-Dskip.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-car-config-e-Gskip-Dskip.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gorig-Dorig.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gorig-Dorig.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gorig-Dresnet.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gorig-Dresnet.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gorig-Dskip.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gorig-Dskip.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gresnet-Dorig.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gresnet-Dorig.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gresnet-Dresnet.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gresnet-Dresnet.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gresnet-Dskip.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gresnet-Dskip.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gskip-Dorig.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gskip-Dorig.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gskip-Dresnet.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gskip-Dresnet.pkl',
'gdrive:networks/table2/stylegan2-ffhq-config-e-Gskip-Dskip.pkl': 'http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/table2/stylegan2-ffhq-config-e-Gskip-Dskip.pkl',
}
#----------------------------------------------------------------------------
def get_path_or_url(path_or_gdrive_path):
return gdrive_urls.get(path_or_gdrive_path, path_or_gdrive_path)
#----------------------------------------------------------------------------
_cached_networks = dict()
def load_networks(path_or_gdrive_path):
path_or_url = get_path_or_url(path_or_gdrive_path)
if path_or_url in _cached_networks:
return _cached_networks[path_or_url]
if dnnlib.util.is_url(path_or_url):
stream = dnnlib.util.open_url(path_or_url, cache_dir='.stylegan2-cache')
else:
stream = open(path_or_url, 'rb')
tflib.init_tf()
with stream:
G, D, Gs = pickle.load(stream, encoding='latin1')
_cached_networks[path_or_url] = G, D, Gs
return G, D, Gs
#----------------------------------------------------------------------------
|
py | 1a552b74a8e9d082e35afb3e53a1be1f899210c5 | import sys
import time
from typing import Any, List, Optional
import tempfile
import pytest
import inspect
import requests
from fastapi import (Cookie, Depends, FastAPI, Header, Query, Request,
APIRouter, BackgroundTasks)
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
import ray
from ray import serve
from ray.serve.http_util import make_fastapi_class_based_view
def test_fastapi_function(serve_instance):
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.deployment(name="f")
@serve.ingress(app)
class FastAPIApp:
pass
FastAPIApp.deploy()
resp = requests.get("http://localhost:8000/f/100")
assert resp.json() == {"result": 100}
resp = requests.get("http://localhost:8000/f/not-number")
assert resp.status_code == 422 # Unprocessable Entity
assert resp.json()["detail"][0]["type"] == "type_error.integer"
def test_ingress_prefix(serve_instance):
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.deployment(route_prefix="/api")
@serve.ingress(app)
class App:
pass
App.deploy()
resp = requests.get("http://localhost:8000/api/100")
assert resp.json() == {"result": 100}
def test_class_based_view(serve_instance):
app = FastAPI()
@app.get("/other")
def hello():
return "hello"
@serve.deployment(name="f")
@serve.ingress(app)
class A:
def __init__(self):
self.val = 1
@app.get("/calc/{i}")
def b(self, i: int):
return i + self.val
@app.post("/calc/{i}")
def c(self, i: int):
return i - self.val
def other(self, msg: str):
return msg
A.deploy()
# Test HTTP calls.
resp = requests.get("http://localhost:8000/f/calc/41")
assert resp.json() == 42
resp = requests.post("http://localhost:8000/f/calc/41")
assert resp.json() == 40
resp = requests.get("http://localhost:8000/f/other")
assert resp.json() == "hello"
# Test handle calls.
handle = A.get_handle()
assert ray.get(handle.b.remote(41)) == 42
assert ray.get(handle.c.remote(41)) == 40
assert ray.get(handle.other.remote("world")) == "world"
def test_make_fastapi_cbv_util():
app = FastAPI()
class A:
@app.get("/{i}")
def b(self, i: int):
pass
# before, "self" is treated as a query params
assert app.routes[-1].endpoint == A.b
assert app.routes[-1].dependant.query_params[0].name == "self"
assert len(app.routes[-1].dependant.dependencies) == 0
make_fastapi_class_based_view(app, A)
# after, "self" is treated as a dependency instead of query params
assert app.routes[-1].endpoint == A.b
assert len(app.routes[-1].dependant.query_params) == 0
assert len(app.routes[-1].dependant.dependencies) == 1
self_dep = app.routes[-1].dependant.dependencies[0]
assert self_dep.name == "self"
assert inspect.isfunction(self_dep.call)
assert "get_current_servable" in str(self_dep.call)
def test_fastapi_features(serve_instance):
app = FastAPI(openapi_url="/my_api.json")
@app.on_event("startup")
def inject_state():
app.state.state_one = "app.state"
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
class Nested(BaseModel):
val: int
class BodyType(BaseModel):
name: str
price: float = Field(None, gt=1.0, description="High price!")
nests: Nested
class RespModel(BaseModel):
ok: bool
vals: List[Any]
file_path: str
async def yield_db():
yield "db"
async def common_parameters(q: Optional[str] = None):
return {"q": q}
@app.exception_handler(ValueError)
async def custom_handler(_: Request, exc: ValueError):
return JSONResponse(
status_code=500,
content={
"custom_error": "true",
"message": str(exc)
})
def run_background(background_tasks: BackgroundTasks):
path = tempfile.mktemp()
def write_to_file(p):
with open(p, "w") as f:
f.write("hello")
background_tasks.add_task(write_to_file, path)
return path
app.add_middleware(CORSMiddleware, allow_origins="*")
@app.get("/{path_arg}", response_model=RespModel, status_code=201)
async def func(
path_arg: str,
query_arg: str,
body_val: BodyType,
backgrounds_tasks: BackgroundTasks,
do_error: bool = False,
query_arg_valid: Optional[str] = Query(None, min_length=3),
cookie_arg: Optional[str] = Cookie(None),
user_agent: Optional[str] = Header(None),
commons: dict = Depends(common_parameters),
db=Depends(yield_db),
):
if do_error:
raise ValueError("bad input")
path = run_background(backgrounds_tasks)
return RespModel(
ok=True,
vals=[
path_arg,
query_arg,
body_val.price,
body_val.nests.val,
do_error,
query_arg_valid,
cookie_arg,
user_agent.split("/")[0], # returns python-requests
commons,
db,
app.state.state_one,
],
file_path=path,
)
router = APIRouter(prefix="/prefix")
@router.get("/subpath")
def router_path():
return "ok"
app.include_router(router)
@serve.deployment(name="fastapi")
@serve.ingress(app)
class Worker:
pass
Worker.deploy()
url = "http://localhost:8000/fastapi"
resp = requests.get(f"{url}/")
assert resp.status_code == 404
assert "x-process-time" in resp.headers
resp = requests.get(f"{url}/my_api.json")
assert resp.status_code == 200
assert resp.json() # it returns a well-formed json.
resp = requests.get(f"{url}/docs")
assert resp.status_code == 200
assert "<!DOCTYPE html>" in resp.text
resp = requests.get(f"{url}/redoc")
assert resp.status_code == 200
assert "<!DOCTYPE html>" in resp.text
resp = requests.get(f"{url}/path_arg")
assert resp.status_code == 422 # Malformed input
resp = requests.get(
f"{url}/path_arg",
json={
"name": "serve",
"price": 12,
"nests": {
"val": 1
}
},
params={
"query_arg": "query_arg",
"query_arg_valid": "at-least-three-chars",
"q": "common_arg",
})
assert resp.status_code == 201, resp.text
assert resp.json()["ok"]
assert resp.json()["vals"] == [
"path_arg",
"query_arg",
12.0,
1,
False,
"at-least-three-chars",
None,
"python-requests",
{
"q": "common_arg"
},
"db",
"app.state",
]
assert open(resp.json()["file_path"]).read() == "hello"
resp = requests.get(
f"{url}/path_arg",
json={
"name": "serve",
"price": 12,
"nests": {
"val": 1
}
},
params={
"query_arg": "query_arg",
"query_arg_valid": "at-least-three-chars",
"q": "common_arg",
"do_error": "true"
})
assert resp.status_code == 500
assert resp.json()["custom_error"] == "true"
resp = requests.get(f"{url}/prefix/subpath")
assert resp.status_code == 200
resp = requests.get(
f"{url}/docs",
headers={
"Access-Control-Request-Method": "GET",
"Origin": "https://googlebot.com"
})
assert resp.headers["access-control-allow-origin"] == "*", resp.headers
def test_fast_api_mounted_app(serve_instance):
app = FastAPI()
subapp = FastAPI()
@subapp.get("/hi")
def hi():
return "world"
app.mount("/mounted", subapp)
@serve.deployment(route_prefix="/api")
@serve.ingress(app)
class A:
pass
A.deploy()
assert requests.get(
"http://localhost:8000/api/mounted/hi").json() == "world"
def test_fastapi_init_lifespan_should_not_shutdown(serve_instance):
app = FastAPI()
@app.on_event("shutdown")
async def shutdown():
1 / 0
@serve.deployment
@serve.ingress(app)
class A:
def f(self):
return 1
A.deploy()
# Without a proper fix, the actor won't be initialized correctly.
# Because it will crash on each startup.
assert ray.get(A.get_handle().f.remote()) == 1
def test_fastapi_duplicate_routes(serve_instance):
app = FastAPI()
@serve.deployment(route_prefix="/api/v1")
@serve.ingress(app)
class App1:
@app.get("/")
def func_v1(self):
return "first"
@serve.deployment(route_prefix="/api/v2")
@serve.ingress(app)
class App2:
@app.get("/")
def func_v2(self):
return "second"
@app.get("/ignored")
def ignored():
pass
App1.deploy()
App2.deploy()
resp = requests.get("http://localhost:8000/api/v1")
assert resp.json() == "first"
resp = requests.get("http://localhost:8000/api/v2")
assert resp.json() == "second"
for version in ["v1", "v2"]:
resp = requests.get(f"http://localhost:8000/api/{version}/ignored")
assert resp.status_code == 404
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows")
@pytest.mark.parametrize("route_prefix", [None, "/", "/subpath"])
def test_doc_generation(serve_instance, route_prefix):
app = FastAPI()
@serve.deployment(route_prefix=route_prefix)
@serve.ingress(app)
class App:
@app.get("/")
def func1(self, arg: str):
return "hello"
App.deploy()
if route_prefix is None:
prefix = "/App"
else:
prefix = route_prefix
if not prefix.endswith("/"):
prefix += "/"
r = requests.get(f"http://localhost:8000{prefix}openapi.json")
assert r.status_code == 200
assert len(r.json()["paths"]) == 1
assert "/" in r.json()["paths"]
assert len(r.json()["paths"]["/"]) == 1
assert "get" in r.json()["paths"]["/"]
r = requests.get(f"http://localhost:8000{prefix}docs")
assert r.status_code == 200
@serve.deployment(route_prefix=route_prefix)
@serve.ingress(app)
class App:
@app.get("/")
def func1(self, arg: str):
return "hello"
@app.post("/hello")
def func2(self, arg: int):
return "hello"
App.deploy()
r = requests.get(f"http://localhost:8000{prefix}openapi.json")
assert r.status_code == 200
assert len(r.json()["paths"]) == 2
assert "/" in r.json()["paths"]
assert len(r.json()["paths"]["/"]) == 1
assert "get" in r.json()["paths"]["/"]
assert "/hello" in r.json()["paths"]
assert len(r.json()["paths"]["/hello"]) == 1
assert "post" in r.json()["paths"]["/hello"]
r = requests.get(f"http://localhost:8000{prefix}docs")
assert r.status_code == 200
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
py | 1a552b972f038ea28c52001055e6f3b73fca4839 | """
WSGI config for ReproHack Hub project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# reprohack_hub directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "reprohack_hub"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
py | 1a552bcde9a9972a8c60db8c4bef5d69ff18b410 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('settings', '0003_settings_action_delete_confirm'),
]
operations = [
migrations.RemoveField(
model_name='settings',
name='id',
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.OneToOneField(default=None, primary_key=True, to=settings.AUTH_USER_MODEL, related_name='settings', serialize=False),
),
]
|
py | 1a552be2e8b15d53a5f65644b6cb7931153e2715 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
('unix', 'unix'),
('pyboard', 'the pyboard'),
('wipy', 'the WiPy'),
('esp8266', 'the ESP8266'),
))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'port_version':micropy_version,
'all_ports':[
(port_id, url_pattern % (micropy_version, port_id))
for port_id, port_name in ports.items()
],
'all_versions':[
(ver, url_pattern % (ver, micropy_port))
for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % (micropy_version, 'micropython-%s.pdf' % micropy_port)),
],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx_selective_exclude.modindex_exclude',
'sphinx_selective_exclude.eager_only',
'sphinx_selective_exclude.search_auto_exclude',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014-2017, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
# The full version, including alpha/beta/rc tags.
release = '1.8.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
modules_port_specific = {
'pyboard': ['pyb'],
'wipy': ['wipy'],
'esp8266': ['esp'],
}
modindex_exclude = []
for p, l in modules_port_specific.items():
if p != micropy_port:
modindex_exclude += l
# Exclude extra modules per port
modindex_exclude += {
'esp8266': ['cmath', 'select'],
'wipy': ['cmath'],
}.get(micropy_port, [])
|
py | 1a552dbe94945040a8ab97433366d57acf9efbc0 | from urllib.parse import urlparse
from aion_client.stateless_client import StatelessClient
from aion_client.job_client import JobClient
class AionClient:
def __init__(self, api_uri):
u = urlparse(api_uri)
self.api_url = f'https://{u.hostname}'
self.project_id = u.path[1:]
def get_stateless_client(self, scope):
return StatelessClient(self.api_url, self.project_id, scope)
def get_job_client(self, queue_name):
return JobClient(self.api_url, self.project_id, queue_name)
|
bzl | 1a552dc8f6bc7212618e42672ea8c3787beedbbe | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
"""Creates config_setting that allows selecting based on 'compiler' value."""
def create_msvc_config():
# The "do_not_use_tools_cpp_compiler_present" attribute exists to
# distinguish between older versions of Bazel that do not support
# "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do.
# In the future, the only way to select on the compiler will be through
# flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can
# be removed.
if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"):
native.config_setting(
name = "msvc",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "msvc-cl",
},
visibility = ["//visibility:public"],
)
else:
native.config_setting(
name = "msvc",
values = {"compiler": "msvc-cl"},
visibility = ["//visibility:public"],
)
|
py | 1a552e66b13e65b1d768e9c4b3d5bf272e94a1d8 | # Bregman - python toolkit for music information retrieval
__version__ = '1.0'
__author__ = 'Michael A. Casey'
__copyright__ = "Copyright (C) 2010 Michael Casey, Dartmouth College, All Rights Reserved"
__license__ = "GPL Version 2.0 or Higher"
__email__ = '[email protected]'
# Exception Handling class
class BregmanError(Exception):
def __init__(self, msg):
print "Bregman error:", msg
|
py | 1a552e73e8763640a5a1a4ffe8c84e190ba804d0 | import sys
import discord
token = sys.argv[1]
client = discord.Client()
@client.event
async def on_ready():
for server in client.guilds:
await server.leave()
await client.close()
client.run(token, bot=False)
|
py | 1a552ed7583b0866760f61d8dabd9e7d9e7bc618 | import hashlib
from scapy.all import IP, TCP, PcapReader, rdpcap, wrpcap
from tcp_reliable.packet_helper import getPacketTimestamp, changeTimestamp, writePcap, genKey
class Extractor:
def __init__(self, pcapConfig, BUFFER_SIZE):
self.pcapConfig = pcapConfig
self.BUFFER_SIZE = BUFFER_SIZE
def extract(self):
serverPcap = rdpcap(self.pcapConfig['CLIENT_PCAP_PATH_OUTPUT'])
output = []
lastTimestamp = 0
serverPcap.sort(key=self.getKeySort)
last_seq = 0
limit = 0
buff = [None]* self.BUFFER_SIZE
sol = []
for pkt in serverPcap:
if pkt[TCP].dport == self.pcapConfig['CLIENT_PORT']:
timestamp = getPacketTimestamp(pkt)[0]
if timestamp == None:
continue
seq = pkt[TCP].seq
if lastTimestamp != timestamp and limit < timestamp and seq!= last_seq:
# if count >= 179 and count <= 281:
# print('seq:', pkt[TCP].seq, 'timestamp', timestamp, 'value', timestamp%2,'last_tm:', lastTimestamp)
# text+=str(timestamp%2)
# print("seq:", seq, "timestamp:", timestamp, "bit:", timestamp%2)
output.append(timestamp%2)
idx = self.getBufferIdx(seq)
buff[idx] = timestamp%2
# print("******",len(sol)+1,"***** seq",seq,"*****","idx",idx,"******* bit:",timestamp%2)
if idx == 0 and timestamp%2 == 1:
has_none = False
for i in buff[1:]:
if i == None:
has_none = True
if not has_none:
sol.append(buff[1:])
buff = [None]* self.BUFFER_SIZE
lastTimestamp = timestamp
limit = max(limit, timestamp)
last_seq = seq
return sol
def getKeySort(self, pkt):
seq = pkt[TCP].seq
timestamp = getPacketTimestamp(pkt)[0]
if timestamp == None:
return int(str(seq)+'0')
return int(str(timestamp)+str(seq))
def genHashNumber(self, num):
return int(hashlib.sha256(str(num).encode()).hexdigest(), base=16)
def getBufferIdx(self, seq):
return self.genHashNumber(seq) % self.BUFFER_SIZE
# if __name__ == '__main__':
# readMessage() |
py | 1a552f7ea1118acd261f710308083a60f663dbe8 | """
Submodule for working with geochemical data.
"""
import logging
import pandas as pd
import numpy as np
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
from ..util.meta import update_docstring_references
from ..util import units
from . import parse
from . import transform
from . import norm
from .ind import (
common_elements,
common_oxides,
__common_elements__,
__common_oxides__,
REE,
)
# note that only some of these methods will be valid for series
@pd.api.extensions.register_series_accessor("pyrochem")
@pd.api.extensions.register_dataframe_accessor("pyrochem")
class pyrochem(object):
"""
Custom dataframe accessor for pyrolite geochemistry.
"""
def __init__(self, obj):
self._validate(obj)
self._obj = obj
@staticmethod
def _validate(obj):
pass
# pyrolite.geochem.ind functions
@property
def list_elements(self):
"""
Get the subset of columns which are element names.
Returns
--------
:class:`list`
Notes
-------
The list will have the same ordering as the source DataFrame.
"""
fltr = self._obj.columns.isin(__common_elements__)
return self._obj.columns[fltr].tolist()
@property
def list_REE(self):
"""
Get the subset of columns which are Rare Earth Element names.
Returns
--------
:class:`list`
Notes
-------
The returned list will reorder REE based on atomic number.
"""
return [i for i in REE() if i in self._obj.columns]
@property
def list_oxides(self):
"""
Get the subset of columns which are oxide names.
Returns
--------
:class:`list`
Notes
-------
The list will have the same ordering as the source DataFrame.
"""
fltr = self._obj.columns.isin(__common_oxides__)
return self._obj.columns[fltr].tolist()
@property
def list_compositional(self):
return list(self.list_oxides + self.list_elements)
@property
def elements(self):
"""
Get an elemental subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj[self.list_elements]
@elements.setter
def elements(self, df):
self._obj.loc[:, self.list_elements] = df
@property
def REE(self):
"""
Get a Rare Earth Element subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj[self.list_REE]
@REE.setter
def REE(self, df):
self._obj.loc[:, self.list_REE] = df
@property
def oxides(self):
"""
Get an oxide subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj.loc[:, self.list_oxides]
@oxides.setter
def oxides(self, df):
self._obj.loc[:, self.list_oxides] = df
@property
def compositional(self):
"""
Get an oxide & elemental subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj.loc[:, self.list_compositional]
@compositional.setter
def compositional(self, df):
self._obj.loc[:, self.list_compositional] = df
# pyrolite.geochem.parse functions
def check_multiple_cation_inclusion(self, exclude=["LOI", "FeOT", "Fe2O3T"]):
"""
Returns cations which are present in both oxide and elemental form.
Parameters
-----------
exclude : :class:`list`, :code:`["LOI", "FeOT", "Fe2O3T"]`
List of components to exclude from the duplication check.
Returns
--------
:class:`set`
Set of elements for which multiple components exist in the dataframe.
"""
return parse.check_multiple_cation_inclusion(self._obj, exclude=exclude)
# pyrolite.geochem.transform functions
def to_molecular(self, renorm=True):
"""
Converts mass quantities to molar quantities.
Parameters
-----------
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after converting to relative moles.
Notes
------
Does not convert units (i.e. mass% --> mol%; mass-ppm --> mol-ppm).
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.to_molecular(self._obj, renorm=renorm)
return self._obj
def to_weight(self, renorm=True):
"""
Converts molar quantities to mass quantities.
Parameters
-----------
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after converting to relative moles.
Notes
------
Does not convert units (i.e. mol% --> mass%; mol-ppm --> mass-ppm).
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.to_weight(self._obj, renorm=renorm)
return self._obj
def devolatilise(
self, exclude=["H2O", "H2O_PLUS", "H2O_MINUS", "CO2", "LOI"], renorm=True
):
"""
Recalculates components after exclusion of volatile phases (e.g. H2O, CO2).
Parameters
-----------
exclude : :class:`list`
Components to exclude from the dataset.
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after devolatilisation.
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.devolatilise(self._obj, exclude=exclude, renorm=renorm)
return self._obj
def elemental_sum(
self, component=None, to=None, total_suffix="T", logdata=False, molecular=False
):
"""
Sums abundance for a cation to a single series, starting from a
dataframe containing multiple componnents with a single set of units.
Parameters
----------
component : :class:`str`
Component indicating which element to aggregate.
to : :class:`str`
Component to cast the output as.
logdata : :class:`bool`, :code:`False`
Whether data has been log transformed.
molecular : :class:`bool`, :code:`False`
Whether to perform a sum of molecular data.
Returns
-------
:class:`pandas.Series`
Series with cation aggregated.
"""
return transform.elemental_sum(
self._obj,
component=component,
to=to,
total_suffix=total_suffix,
logdata=logdata,
molecular=molecular,
)
def aggregate_element(
self, to, total_suffix="T", logdata=False, renorm=False, molecular=False
):
"""
Aggregates cation information from oxide and elemental components to either a
single species or a designated mixture of species.
Parameters
----------
to : :class:`str` | :class:`~periodictable.core.Element` | :class:`~periodictable.formulas.Formula` | :class:`dict`
Component(s) to convert to. If one component is specified, the element will be
converted to the target species.
If more than one component is specified with proportions in a dictionary
(e.g. :code:`{'FeO': 0.9, 'Fe2O3': 0.1}`), the components will be split as a
fraction of the elemental sum.
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after recalculation.
total_suffix : :class:`str`, 'T'
Suffix of 'total' variables. E.g. 'T' for FeOT, Fe2O3T.
logdata : :class:`bool`, :code:`False`
Whether the data has been log transformed.
molecular : :class:`bool`, :code:`False`
Whether to perform a sum of molecular data.
Notes
-------
This won't convert units, so need to start from single set of units.
Returns
-------
:class:`pandas.Series`
Series with cation aggregated.
"""
return transform.aggregate_element(
self._obj,
to,
total_suffix=total_suffix,
logdata=logdata,
renorm=renorm,
molecular=molecular,
)
def recalculate_Fe(
self, to="FeOT", renorm=False, total_suffix="T", logdata=False, molecular=False
):
"""
Recalculates abundances of iron, and normalises a dataframe to contain either
a single species, or multiple species in certain proportions.
Parameters
-----------
to : :class:`str` | :class:`~periodictable.core.Element` | :class:`~periodictable.formulas.Formula` | :class:`dict`
Component(s) to convert to.
If one component is specified, all iron will be
converted to the target species.
If more than one component is specified with proportions in a dictionary
(e.g. :code:`{'FeO': 0.9, 'Fe2O3': 0.1}`), the components will be split as a
fraction of Fe.
renorm : :class:`bool`, :code:`False`
Whether to renormalise the dataframe after recalculation.
total_suffix : :class:`str`, 'T'
Suffix of 'total' variables. E.g. 'T' for FeOT, Fe2O3T.
logdata : :class:`bool`, :code:`False`
Whether the data has been log transformed.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.recalculate_Fe(
self._obj,
to,
total_suffix=total_suffix,
logdata=logdata,
renorm=renorm,
molecular=molecular,
)
return self._obj
def get_ratio(self, ratio: str, alias: str = None, norm_to=None, molecular=False):
"""
Add a ratio of components A and B, given in the form of string 'A/B'.
Returned series be assigned an alias name.
Parameters
-----------
ratio : :class:`str`
String decription of ratio in the form A/B[_n].
alias : :class:`str`
Alternate name for ratio to be used as column name.
norm_to : :class:`str` | :class:`pyrolite.geochem.norm.Composition`, `None`
Reference composition to normalise to.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
-------
:class:`pandas.DataFrame`
Dataframe with ratio appended.
See Also
--------
:func:`~pyrolite.geochem.transform.add_MgNo`
"""
return transform.get_ratio(
self._obj, ratio, alias, norm_to=norm_to, molecular=molecular
)
def add_ratio(self, ratio: str, alias: str = None, norm_to=None, molecular=False):
"""
Add a ratio of components A and B, given in the form of string 'A/B'.
Returned series be assigned an alias name.
Parameters
-----------
ratio : :class:`str`
String decription of ratio in the form A/B[_n].
alias : :class:`str`
Alternate name for ratio to be used as column name.
norm_to : :class:`str` | :class:`pyrolite.geochem.norm.Composition`, `None`
Reference composition to normalise to.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
-------
:class:`pandas.DataFrame`
Dataframe with ratio appended.
See Also
--------
:func:`~pyrolite.geochem.transform.add_MgNo`
"""
r = self.get_ratio(ratio, alias, norm_to=norm_to, molecular=molecular)
self._obj[r.name] = r
return self._obj
def add_MgNo(
self, molecular=False, use_total_approx=False, approx_Fe203_frac=0.1, name="Mg#"
):
"""
Append the magnesium number to a dataframe.
Parameters
----------
molecular : :class:`bool`, :code:`False`
Whether the input data is molecular.
use_total_approx : :class:`bool`, :code:`False`
Whether to use an approximate calculation using total iron rather than just FeO.
approx_Fe203_frac : :class:`float`
Fraction of iron which is oxidised, used in approximation mentioned above.
name : :class:`str`
Name to use for the Mg Number column.
Returns
-------
:class:`pandas.DataFrame`
Dataframe with ratio appended.
See Also
--------
:func:`~pyrolite.geochem.transform.add_ratio`
"""
transform.add_MgNo(
self._obj,
molecular=molecular,
use_total_approx=use_total_approx,
approx_Fe203_frac=approx_Fe203_frac,
name=name,
)
return self._obj
def lambda_lnREE(
self,
norm_to="Chondrite_PON",
exclude=["Pm", "Eu"],
params=None,
degree=4,
append=[],
scale="ppm",
**kwargs
):
"""
Calculates orthogonal polynomial coefficients (lambdas) for a given set of REE data,
normalised to a specific composition [#localref_1]_. Lambda factors are given for the
radii vs. ln(REE/NORM) polynomial combination.
Parameters
------------
norm_to : :class:`str` | :class:`~pyrolite.geochem.norm.Composition` | :class:`numpy.ndarray`
Which reservoir to normalise REE data to (defaults to :code:`"Chondrite_PON"`).
exclude : :class:`list`, :code:`["Pm", "Eu"]`
Which REE elements to exclude from the fit. May wish to include Ce for minerals
in which Ce anomalies are common.
params : :class:`list`, :code:`None`
Set of predetermined orthagonal polynomial parameters.
degree : :class:`int`, 5
Maximum degree polynomial fit component to include.
append : :class:`list`, :code:`None`
Whether to append lambda function (i.e. :code:`["function"]`).
scale : :class:`str`
Current units for the REE data, used to scale the reference dataset.
References
-----------
.. [#localref_1] O’Neill HSC (2016) The Smoothness and Shapes of Chondrite-normalized
Rare Earth Element Patterns in Basalts. J Petrology 57:1463–1508.
doi: `10.1093/petrology/egw047 <https://dx.doi.org/10.1093/petrology/egw047>`__
See Also
---------
:func:`~pyrolite.geochem.ind.get_ionic_radii`
:func:`~pyrolite.util.math.lambdas`
:func:`~pyrolite.util.math.OP_constants`
:func:`~pyrolite.plot.REE_radii_plot`
"""
return transform.lambda_lnREE(
self._obj,
norm_to=norm_to,
exclude=exclude,
params=params,
degree=degree,
append=append,
scale=scale,
**kwargs
)
def convert_chemistry(self, to=[], logdata=False, renorm=False, molecular=False):
"""
Attempts to convert a dataframe with one set of components to another.
Parameters
-----------
to : :class:`list`
Set of columns to try to extract from the dataframe.
Can also include a dictionary for iron speciation.
See :func:`pyrolite.geochem.recalculate_Fe`.
logdata : :class:`bool`, :code:`False`
Whether chemical data has been log transformed. Necessary for aggregation
functions.
renorm : :class:`bool`, :code:`False`
Whether to renormalise the data after transformation.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
--------
:class:`pandas.DataFrame`
Dataframe with converted chemistry.
Todo
------
* Check for conflicts between oxides and elements
* Aggregator for ratios
* Implement generalised redox transformation.
* Add check for dicitonary components (e.g. Fe) in tests
"""
return transform.convert_chemistry(
self._obj, to=to, logdata=logdata, renorm=renorm, molecular=molecular
) # can't update the source nicely here, need to assign output
# pyrolite.geochem.norm functions
def normalize_to(self, reference=None, units=None, convert_first=False):
"""
Normalise a dataframe to a given reference composition.
Parameters
-----------
reference : :class:`str` | :class:`~pyrolite.geochem.norm.Composition` | :class:`numpy.ndarray`
Reference composition to normalise to.
units : :class:`str`
Units of the input dataframe, to convert the reference composition.
convert_first : :class:`bool`
Whether to first convert the referenece compostion before normalisation.
This is useful where elements are presented as different components (e.g.
Ti, TiO2).
Returns
--------
:class:`pandas.DataFrame`
Dataframe with normalised chemistry.
Notes
------
This assumes that dataframes have a single set of units.
"""
if isinstance(reference, (str, norm.Composition)):
if not isinstance(reference, norm.Composition):
N = norm.get_reference_composition(reference)
else:
N = reference
if units is not None:
N.set_units(units)
if convert_first:
N.comp = transform.convert_chemistry(N.comp, self.list_compositional)
norm_abund = N[self.list_compositional]
else: # list, iterable, pd.Index etc
norm_abund = np.array(reference)
assert len(norm_abund) == len(self.list_compositional)
# this list should have the same ordering as the input dataframe
return self._obj[self.list_compositional].div(norm_abund)
def denormalize_from(self, reference=None, units=None):
"""
De-normalise a dataframe from a given reference composition.
Parameters
-----------
reference : :class:`str` | :class:`~pyrolite.geochem.norm.Composition` | :class:`numpy.ndarray`
Reference composition which the composition is normalised to.
units : :class:`str`
Units of the input dataframe, to convert the reference composition.
Returns
--------
:class:`pandas.DataFrame`
Dataframe with normalised chemistry.
Notes
------
This assumes that dataframes have a single set of units.
"""
if isinstance(reference, (str, norm.Composition)):
if not isinstance(reference, norm.Composition):
N = norm.get_reference_composition(reference)
else:
N = reference
if units is not None:
N.set_units(units)
N.comp = transform.convert_chemistry(N.comp, self.list_compositional)
norm_abund = N[self.list_compositional]
else: # list, iterable, pd.Index etc
norm_abund = np.array(reference)
assert len(norm_abund) == len(self.list_compositional)
return self._obj[self.list_compositional] * norm_abund
def scale(self, in_unit, target_unit="ppm"):
"""
Scale a dataframe from one set of units to another.
Parameters
-----------
in_unit : :class:`str`
Units to be converted from
target_unit : :class:`str`, :code:`"ppm"`
Units to scale to.
Returns
--------
:class:`pandas.DataFrame`
Dataframe with new scale.
"""
return self._obj * units.scale(in_unit, target_unit)
pyrochem.lambda_lnREE = update_docstring_references(
pyrochem.lambda_lnREE, ref="localref"
)
|
py | 1a552fde73b7ba0a39f2057a424ed638ce94b558 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.spider import SitemapSpider
from scrapy.http.request import Request
import json
from urllib.parse import urlencode
class BibaSpider(scrapy.Spider):
name = 'biba'
allowed_domains = ['www.biba.in']
start_urls = [
'https://www.biba.in/new-arrivals',
'https://www.biba.in/mix-and-match',
'https://www.biba.in/suit-sets',
'https://www.biba.in/girls',
'https://www.biba.in/easy-stitch',
'https://www.biba.in/jewellery',
'https://www.biba.in/factory-outlet',
'https://www.biba.in/factory-outlet'
]
def parse(self, response):
ignore_paths = [
'/registration/', '/careers', 'sitemap', 'privacy', 'terms-of-use', 'about-us'
'/payments-options', 'help-faq', 'delivery-and-shipping-policy', 'business-enquiries'
'/returns-and-cancellation-policy', 'contact-us', 'trackorder', 'store-locator', '/faq/'
]
pages = len(response.xpath(".//*[@class='pager']/text()").extract())
if pages:
# print(pages)
pattern_match_text = r"var\sobjShowCaseInputs\s=\s({.*});"
try:
data = json.loads(response.xpath(".//script[@type='text/javascript']/text()").re_first(pattern_match_text))
for i in range(1, pages + 1):
data['PageNo'] = i
# print(data)
encoded_url = "https://www.biba.in/Handler/ProductShowcaseHandler.ashx?ProductShowcaseInput=" + \
json.dumps(data)
# print(encoded_url)
yield Request(
encoded_url,
callback=self.parse
)
except Exception as e:
print(e)
links = set([link for link in response.xpath(".//a/@href").extract()])
links = [link for ipath in ignore_paths for link in links
if (ipath not in str(link).lower().strip()) and self.allowed_domains[0] in link]
for link in links:
# print(link)
if '/p/' in link:
yield Request(link, callback=self.extract_items)
if self.allowed_domains[0] in link:
yield Request(link, callback=self.parse)
def extract_items(self, response):
pattern_match_text = r"MartJack\s=({.*})"
data = response.xpath(".//script[@type='text/javascript']/text()").re_first(pattern_match_text)
product = json.loads(data)['PageInfo']
if product['PageType'] == 'product':
product['url'] = response.url
yield product
|
py | 1a55309d5ea1fdc54d5594d5c27fd89cb94cb94d | import paho.mqtt.client as mqtt
import json
import mqttsqlite.settings.private_settings as Settings
MANAGEMENT_PASSWORD = Settings.MANAGEMENT_PASSWORD
MQTT_HOST = Settings.MQTT_HOST
MQTT_PORT = Settings.MQTT_PORT
ROOT_TOPIC = Settings.ROOT_TOPIC
desired_topic = 'salon/humedad'
payload = {}
payload['client'] = 'simple_example'
payload['topic'] = desired_topic
payload['password'] = MANAGEMENT_PASSWORD
def on_connect(client, userdata, flags, rc):
client_topic = ROOT_TOPIC + 'topic/add'
client.subscribe(ROOT_TOPIC + 'response')
client.publish(client_topic, json.dumps(payload))
def on_message(client, userdata, msg):
received_data = json.loads(msg.payload)
print(received_data)
if 'client' in received_data:
if received_data['client'] == payload['client']:
print('Received Meesage from Logger: ')
print(received_data)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_forever() |
py | 1a5530a661b9d44d6a3543c157989fcb4b24aaa1 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, OneLogin, Inc.
# All rights reserved.
import unittest
from onelogin.api.client import OneLoginClient
class OneLogin_API_Client_Test(unittest.TestCase):
def testClientWithData(self):
"""
Tests the constructor method of the OneLoginClient class
Build a OneLoginClient object with a client_id and client_secret
"""
client = OneLoginClient(
client_id='test_client_id',
client_secret='test_client_secret'
)
self.assertIsNot(client, None)
self.assertEqual('test_client_id', client.client_id)
self.assertEqual('test_client_secret', client.client_secret)
self.assertEqual('us', client.url_builder.region)
def testClientWithDataAndRegion(self):
"""
Tests the constructor method of the OneLoginClient class
Build a OneLoginClient object with a client_id, client_secret and region
"""
client = OneLoginClient(
client_id='test_client_id',
client_secret='test_client_secret',
region='eu'
)
self.assertIsNot(client, None)
self.assertEqual('test_client_id', client.client_id)
self.assertEqual('test_client_secret', client.client_secret)
self.assertEqual('eu', client.url_builder.region)
def testClientWithNoData(self):
"""
Tests the constructor method of the OneLoginClient class
Build a OneLoginClient object with no data
"""
with self.assertRaises(Exception):
client = OneLoginClient()
def testClientDefaultErrorValues(self):
"""
Tests the constructor method of the OneLoginClient class
Build a OneLoginClient object and check if the error attributes exist and are None
"""
client = OneLoginClient(
client_id='test_client_id',
client_secret='test_client_secret',
region='eu'
)
self.assertIsNone(client.error)
self.assertIsNone(client.error_description)
|
py | 1a5530f6e962d93f60385f080f3adcea0c5dad47 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def fertil1(path):
"""fertil1
Data loads lazily. Type data(fertil1) into the console.
A data.frame with 1129 rows and 27 variables:
- year. 72 to 84, even
- educ. years of schooling
- meduc. mother's education
- feduc. father's education
- age. in years
- kids. # children ever born
- black. = 1 if black
- east. = 1 if lived in east at 16
- northcen. = 1 if lived in nc at 16
- west. = 1 if lived in west at 16
- farm. = 1 if on farm at 16
- othrural. = 1 if other rural at 16
- town. = 1 if lived in town at 16
- smcity. = 1 if in small city at 16
- y74. = 1 if year = 74
- y76.
- y78.
- y80.
- y82.
- y84.
- agesq. age^2
- y74educ.
- y76educ.
- y78educ.
- y80educ.
- y82educ.
- y84educ.
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `fertil1.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1129 rows and 27 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'fertil1.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/fertil1.csv'
maybe_download_and_extract(path, url,
save_file_name='fertil1.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
py | 1a5531d10338ed1ef110e08a6707ebcb08360d6a | # This file contains dictionaries used in the Dalvik Format.
# https://source.android.com/devices/tech/dalvik/dex-format#type-codes
TYPE_MAP_ITEM = {
0x0: "TYPE_HEADER_ITEM",
0x1: "TYPE_STRING_ID_ITEM",
0x2: "TYPE_TYPE_ID_ITEM",
0x3: "TYPE_PROTO_ID_ITEM",
0x4: "TYPE_FIELD_ID_ITEM",
0x5: "TYPE_METHOD_ID_ITEM",
0x6: "TYPE_CLASS_DEF_ITEM",
0x1000: "TYPE_MAP_LIST",
0x1001: "TYPE_TYPE_LIST",
0x1002: "TYPE_ANNOTATION_SET_REF_LIST",
0x1003: "TYPE_ANNOTATION_SET_ITEM",
0x2000: "TYPE_CLASS_DATA_ITEM",
0x2001: "TYPE_CODE_ITEM",
0x2002: "TYPE_STRING_DATA_ITEM",
0x2003: "TYPE_DEBUG_INFO_ITEM",
0x2004: "TYPE_ANNOTATION_ITEM",
0x2005: "TYPE_ENCODED_ARRAY_ITEM",
0x2006: "TYPE_ANNOTATIONS_DIRECTORY_ITEM",
}
# https://source.android.com/devices/tech/dalvik/dex-format#access-flags
ACCESS_FLAGS = {
0x1: 'public',
0x2: 'private',
0x4: 'protected',
0x8: 'static',
0x10: 'final',
0x20: 'synchronized',
0x40: 'bridge',
0x80: 'varargs',
0x100: 'native',
0x200: 'interface',
0x400: 'abstract',
0x800: 'strictfp',
0x1000: 'synthetic',
0x4000: 'enum',
0x8000: 'unused',
0x10000: 'constructor',
0x20000: 'synchronized',
}
# https://source.android.com/devices/tech/dalvik/dex-format#typedescriptor
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
|
py | 1a55336e8acf5682e617b59015e28eb399d1ef30 | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .visualization import * # noqa: F401, F403
|
py | 1a5533dc5752ba071bb00e09ddee133738ab4474 | import webapp2
from webapp2_extras import sessions
class MainHandler(webapp2.RequestHandler):
def get(self):
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandlerWithArguments(webapp2.RequestHandler):
def get(self, photo_key): # even with arguments, we call with dispatch(self)
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext import blobstore
class MyUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def my_post_dispatch(self, *args, **kwargs):
''' A Fake dispatch method that you want to call inside your Route()
Just an imitation of the webapp2 style dispatch() with limited functions
'''
self.session_store = sessions.get_store(request=self.request)
try:
if self.request.method == 'POST':
self.post(*args, **kwargs) # since webapp doesn't have dispatch() method like webapp2, we do it manually
else:
self.error(405)
self.response.out.write('Method not allowed')
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
def wrapper(func):
def dest(self, *args, **kwargs):
print 'before decorated' # for your future use. you can write wrapper like 'user_required'
func(self,*args, **kwargs)
print 'after decorated'
return dest
@wrapper
def post(self):
# Get all the uploaded file info
myfiles = self.get_uploads('file') # this is a list of blob key info
# You do some operations on the myfiles, maybe transform them
# maybe associate them with other ndb entities in your database
# ...
# But we also want to manipulate with the session, RIGHT ???
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# Finally, I delete them,just in case you won't let it go.
[blobstore.delete(each.key()) for each in self.get_uploads('file')]
class ServeBlobHandler(blobstore_handlers.BlobstoreDownloadHandler):
''' Serve the images to the public '''
def my_get_dispatch(self, *args, **kwargs):
''' A Fake dispatch method that you want to call inside your Route()
Just an imitation of the webapp2 style dispatch() with limited functions
'''
self.session_store = sessions.get_store(request=self.request)
try:
if self.request.method == 'GET':
self.get(*args, **kwargs) # this is the real get method we want here
else:
self.error(405)
self.response.out.write('Method not allowed')
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
def wrapper(func):
def dest(self, *args, **kwargs):
print 'before decorated' # for your future use. you can write wrapper like 'user_required'
func(self,*args, **kwargs)
print 'after decorated'
return dest
@wrapper
def get(self, photo_key):
if not blobstore.get(photo_key):
self.error(404)
else:
self.send_blob(photo_key) |
py | 1a553488808d381e84b7f4f5c7d0abb799fe3a1f | """
WSGI config for gitlang project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gitlang.settings')
application = get_wsgi_application()
|
py | 1a5534f3b930db542599fcd82a63d8c791bafa14 | import FWCore.ParameterSet.Config as cms
#
# module to make simple analyses of muons
#
analyzeMuon = cms.EDAnalyzer("TopMuonAnalyzer",
input = cms.InputTag("selectedPatMuons"),
verbose = cms.bool(True)
)
|
py | 1a553547d3c422a5c120c9868f5e34a8d848c292 | """
Django settings for home project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'egt(7526^#)4no08z@jyq1m8&!6j-1o7#di^cv=%ya$a2-omdj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'neighbourhood.apps.NeighbourhoodConfig',
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'home.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'home.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'home',
# 'USER': 'maurine',
# 'PASSWORD':'maurinesinami'
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
django_heroku.settings(locals()) |
py | 1a553745483b8b987800b08da43e5167731adb9c | import json
def Serialize(path: str, settings: dict):
with open (path, "w", encoding="utf-8") as f:
f.write(json.dumps(settings, indent=4))
def Deserialize(path: str):
settings = dict()
with open(path, "r", encoding="utf-8") as f:
settings = json.load(f)
return settings |
py | 1a5538286b879faccc7f79df08a39158b6118de7 |
from urlparse import parse_qs, urlparse
import urllib
import json
'''
Access user fbid from post being scrapped,
example : /ajax/hovercard/user.php?id=684848775&extragetparams=%7B%22hc_ref%22%3A%22NEWSFEED%22%2C%22fref%22%3A%22nf%22%7D
'''
def getFbId(hovercard_url):
url = hovercard_url.split('?')[1]
return parse_qs(url)['id'][0].replace("\r\n", "")
'''
Facebook have wide variety of URL formatting but each URL contains postid for the particular post that we scrapping,
below are most use URL for post.
- https://www.facebook.com/photo.php?fbid=336576623397916&set=a.152082791847301.1073741828.100011367422621&type=3
- /permalink.php?story_fbid=336990686689843&id=100011367422621
- https://www.facebook.com/media/set/?set=a.381794285338690.1073741947.100005243644525&type=3
- /bazooka.penaka.3720/posts/161654117597544
- /shekhanuar.alkhattab/videos/172420779845422/
- /groups/643968408983786/permalink/1069548199759136/
'''
def getPostId(url):
proc_url = urlparse(url)
proc_query = parse_qs(proc_url.query)
if 'fbid' in proc_query:
post_id = proc_query['fbid'][0]
elif 'story_fbid' in proc_query:
post_id = proc_query['story_fbid'][0]
elif 'set' in proc_query:
pre_post_id = proc_query['set'][0].split('.')
post_id = pre_post_id[1]
else:
proc_path = proc_url.path
pre_post_id = proc_path.split('/')
if len(pre_post_id) <= 5:
post_id = pre_post_id[3]
else:
post_id = pre_post_id[4]
return post_id
'''
Sample facebook video url, it is important to note the video post id must be correct in order to get the real url
Embed video url
- https://www.facebook.com/video/embed?video_id=1051396234975127
Direct link to video (the request will return json data with HD source and SD source of the video)
- https://www.facebook.com/video/video_data/?video_id=668662573312263
!important:
Issues with direct link url, it has expiry based to the timestamp the url has been generated on request,
if the url expired, the video wont show anymore.
'''
def buildEmbedVideoUrl(postId):
json_data = urllib.urlopen('https://www.facebook.com/video/video_data/?video_id=' + postId).read()
try:
parse_json = json.loads(json_data)
video_direct_url = ""
video_embed_url = "https://www.facebook.com/video/embed?video_id=" + postId
if 'hd_src_no_ratelimit' in parse_json:
video_direct_url = parse_json['hd_src_no_ratelimit']
else:
video_direct_url = parse_json['sd_src_no_ratelimit']
except Exception, e:
video_direct_url = ""
video_embed_url = "https://www.facebook.com/video/embed?video_id=" + postId
return video_direct_url, video_embed_url
'''
Building hashtag from message / shared message / attachment description
'''
def buildHashtag(textContent):
hashtag = {tag.strip("#").replace('.', '').replace(',', '').lower() for tag in textContent.split() if tag.startswith("#")}
return list(hashtag) |
py | 1a55385c7044df6d8980e507d560cae5372c887d | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import sys
import pytest
from testing_support.fixtures import (
capture_transaction_metrics,
override_generic_settings,
validate_transaction_errors,
)
from newrelic.api.background_task import background_task
from newrelic.api.function_trace import function_trace
from newrelic.api.message_transaction import message_transaction
from newrelic.api.transaction import current_transaction
from newrelic.api.web_transaction import web_transaction
from newrelic.core.config import global_settings
if sys.version_info >= (3, 5):
from _test_async_coroutine_transaction import native_coroutine_test
else:
native_coroutine_test = None
settings = global_settings()
def coroutine_test(event_loop, transaction, nr_enabled=True, does_hang=False, call_exit=False, runtime_error=False):
@transaction
@asyncio.coroutine
def task():
txn = current_transaction()
if not nr_enabled:
assert txn is None
else:
assert txn._loop_time == 0.0
if call_exit:
txn.__exit__(None, None, None)
else:
assert current_transaction() is txn
try:
if does_hang:
yield from loop.create_future()
else:
yield from asyncio.sleep(0.0)
if nr_enabled and txn.enabled:
# Validate loop time is recorded after suspend
assert txn._loop_time > 0.0
except GeneratorExit:
if runtime_error:
yield from asyncio.sleep(0.0)
return task
test_matrix = [coroutine_test]
if native_coroutine_test:
test_matrix.append(native_coroutine_test)
@pytest.mark.parametrize("num_coroutines", (2,))
@pytest.mark.parametrize("create_test_task", test_matrix)
@pytest.mark.parametrize(
"transaction,metric",
[
(background_task(name="test"), "OtherTransaction/Function/test"),
(
message_transaction("lib", "dest_type", "dest_name"),
"OtherTransaction/Message/lib/dest_type/Named/dest_name",
),
],
)
@pytest.mark.parametrize(
"nr_enabled,call_exit",
(
(False, False),
(True, False),
(True, True),
),
)
def test_async_coroutine_send(event_loop, num_coroutines, create_test_task, transaction, metric, call_exit, nr_enabled):
metrics = []
tasks = [
create_test_task(event_loop, transaction, nr_enabled=nr_enabled, call_exit=call_exit)
for _ in range(num_coroutines)
]
@override_generic_settings(settings, {"enabled": nr_enabled})
@capture_transaction_metrics(metrics)
def _test_async_coroutine_send():
driver = asyncio.gather(*[t() for t in tasks])
event_loop.run_until_complete(driver)
_test_async_coroutine_send()
if nr_enabled:
assert metrics.count((metric, "")) == num_coroutines, metrics
else:
assert not metrics, metrics
@pytest.mark.parametrize("num_coroutines", (2,))
@pytest.mark.parametrize("create_test_task", test_matrix)
@pytest.mark.parametrize(
"transaction,metric",
[
(background_task(name="test"), "OtherTransaction/Function/test"),
(
message_transaction("lib", "dest_type", "dest_name"),
"OtherTransaction/Message/lib/dest_type/Named/dest_name",
),
],
)
def test_async_coroutine_send_disabled(event_loop, num_coroutines, create_test_task, transaction, metric):
metrics = []
tasks = [create_test_task(event_loop, transaction, call_exit=True) for _ in range(num_coroutines)]
@capture_transaction_metrics(metrics)
def _test_async_coroutine_send():
driver = asyncio.gather(*[t() for t in tasks])
event_loop.run_until_complete(driver)
_test_async_coroutine_send()
assert metrics.count((metric, "")) == num_coroutines, metrics
@pytest.mark.parametrize("num_coroutines", (2,))
@pytest.mark.parametrize("create_test_task", test_matrix)
@pytest.mark.parametrize(
"transaction,metric",
[
(background_task(name="test"), "OtherTransaction/Function/test"),
(
message_transaction("lib", "dest_type", "dest_name"),
"OtherTransaction/Message/lib/dest_type/Named/dest_name",
),
],
)
@validate_transaction_errors([])
def test_async_coroutine_throw_cancel(event_loop, num_coroutines, create_test_task, transaction, metric):
metrics = []
tasks = [create_test_task(event_loop, transaction) for _ in range(num_coroutines)]
@asyncio.coroutine
def task_c():
futures = [asyncio.ensure_future(t()) for t in tasks]
yield from asyncio.sleep(0.0)
[f.cancel() for f in futures]
@capture_transaction_metrics(metrics)
def _test_async_coroutine_throw_cancel():
event_loop.run_until_complete(task_c())
_test_async_coroutine_throw_cancel()
assert metrics.count((metric, "")) == num_coroutines, metrics
@pytest.mark.parametrize("num_coroutines", (2,))
@pytest.mark.parametrize("create_test_task", test_matrix)
@pytest.mark.parametrize(
"transaction,metric",
[
(background_task(name="test"), "OtherTransaction/Function/test"),
(
message_transaction("lib", "dest_type", "dest_name"),
"OtherTransaction/Message/lib/dest_type/Named/dest_name",
),
],
)
@validate_transaction_errors(["builtins:ValueError"])
def test_async_coroutine_throw_error(event_loop, num_coroutines, create_test_task, transaction, metric):
metrics = []
tasks = [create_test_task(event_loop, transaction) for _ in range(num_coroutines)]
@asyncio.coroutine
def task_c():
coros = [t() for t in tasks]
for coro in coros:
with pytest.raises(ValueError):
coro.throw(ValueError)
@capture_transaction_metrics(metrics)
def _test_async_coroutine_throw_error():
event_loop.run_until_complete(task_c())
_test_async_coroutine_throw_error()
assert metrics.count((metric, "")) == num_coroutines, metrics
assert metrics.count(("Errors/" + metric, "")) == num_coroutines, metrics
assert metrics.count(("Errors/all", "")) == num_coroutines, metrics
@pytest.mark.parametrize("num_coroutines", (1,))
@pytest.mark.parametrize("create_test_task", test_matrix)
@pytest.mark.parametrize(
"transaction,metric",
[
(background_task(name="test"), "OtherTransaction/Function/test"),
(
message_transaction("lib", "dest_type", "dest_name"),
"OtherTransaction/Message/lib/dest_type/Named/dest_name",
),
],
)
@pytest.mark.parametrize("start_coroutines", (False, True))
def test_async_coroutine_close(event_loop, num_coroutines, create_test_task, transaction, metric, start_coroutines):
metrics = []
tasks = [create_test_task(event_loop, transaction) for _ in range(num_coroutines)]
@asyncio.coroutine
def task_c():
coros = [t() for t in tasks]
if start_coroutines:
[asyncio.ensure_future(coro) for coro in coros]
yield from asyncio.sleep(0.0)
[coro.close() for coro in coros]
@capture_transaction_metrics(metrics)
def _test_async_coroutine_close():
event_loop.run_until_complete(task_c())
_test_async_coroutine_close()
if start_coroutines:
assert metrics.count((metric, "")) == num_coroutines, metrics
else:
assert not metrics
@pytest.mark.parametrize("num_coroutines", (1,))
@pytest.mark.parametrize("create_test_task", test_matrix)
@pytest.mark.parametrize(
"transaction,metric",
[
(background_task(name="test"), "OtherTransaction/Function/test"),
(
message_transaction("lib", "dest_type", "dest_name"),
"OtherTransaction/Message/lib/dest_type/Named/dest_name",
),
],
)
@validate_transaction_errors(["builtins:RuntimeError"])
def test_async_coroutine_close_raises_error(event_loop, num_coroutines, create_test_task, transaction, metric):
metrics = []
tasks = [create_test_task(event_loop, transaction, runtime_error=True) for _ in range(num_coroutines)]
@asyncio.coroutine
def task_c():
coros = [t() for t in tasks]
[c.send(None) for c in coros]
yield from asyncio.sleep(0.0)
for coro in coros:
with pytest.raises(RuntimeError):
coro.close()
@capture_transaction_metrics(metrics)
def _test_async_coroutine_close_raises_error():
event_loop.run_until_complete(task_c())
_test_async_coroutine_close_raises_error()
assert metrics.count((metric, "")) == num_coroutines, metrics
assert metrics.count(("Errors/all", "")) == num_coroutines, metrics
@pytest.mark.parametrize(
"transaction,metric,arguments",
[
(web_transaction, "Apdex/Function/%s", lambda name: ([], {"name": name})),
(
message_transaction,
"OtherTransaction/Message/lib/dest_type/Named/%s",
lambda name: (["lib", "dest_type", name], {}),
),
(background_task, "OtherTransaction/Function/%s", lambda name: ([], {"name": name})),
],
)
def test_deferred_async_background_task(event_loop, transaction, metric, arguments):
deferred_metric = (metric % "deferred", "")
args, kwargs = arguments("deferred")
@transaction(*args, **kwargs)
@asyncio.coroutine
def child_task():
yield from asyncio.sleep(0)
main_metric = (metric % "main", "")
args, kwargs = arguments("main")
@transaction(*args, **kwargs)
@asyncio.coroutine
def parent_task():
yield from asyncio.sleep(0)
return event_loop.create_task(child_task())
@asyncio.coroutine
def test_runner():
child = yield from parent_task()
yield from child
metrics = []
@capture_transaction_metrics(metrics)
def _test():
event_loop.run_until_complete(test_runner())
_test()
assert main_metric in metrics
assert deferred_metric in metrics
@pytest.mark.parametrize(
"transaction,metric,arguments",
[
(web_transaction, "Apdex/Function/%s", lambda name: ([], {"name": name})),
(
message_transaction,
"OtherTransaction/Message/lib/dest_type/Named/%s",
lambda name: (["lib", "dest_type", name], {}),
),
(background_task, "OtherTransaction/Function/%s", lambda name: ([], {"name": name})),
],
)
def test_child_transaction_when_parent_is_running(event_loop, transaction, metric, arguments):
deferred_metric = (metric % "deferred", "")
args, kwargs = arguments("deferred")
@transaction(*args, **kwargs)
@asyncio.coroutine
def child_task():
yield from asyncio.sleep(0)
main_metric = (metric % "main", "")
args, kwargs = arguments("main")
@transaction(*args, **kwargs)
@asyncio.coroutine
def parent_task():
yield from event_loop.create_task(child_task())
metrics = []
@capture_transaction_metrics(metrics)
def _test():
event_loop.run_until_complete(parent_task())
_test()
assert main_metric in metrics
assert deferred_metric in metrics
@pytest.mark.parametrize(
"transaction,metric,arguments",
[
(web_transaction, "Apdex/Function/%s", lambda name: ([], {"name": name})),
(
message_transaction,
"OtherTransaction/Message/lib/dest_type/Named/%s",
lambda name: (["lib", "dest_type", name], {}),
),
(background_task, "OtherTransaction/Function/%s", lambda name: ([], {"name": name})),
],
)
def test_nested_coroutine_inside_sync(event_loop, transaction, metric, arguments):
child_metric = (metric % "child", "")
args, kwargs = arguments("child")
@transaction(*args, **kwargs)
@asyncio.coroutine
def child_task():
yield from asyncio.sleep(0)
main_metric = (metric % "main", "")
args, kwargs = arguments("main")
metrics = []
@capture_transaction_metrics(metrics)
@transaction(*args, **kwargs)
def parent():
event_loop.run_until_complete(child_task())
parent()
assert main_metric in metrics
assert child_metric not in metrics
@pytest.mark.parametrize(
"transaction,metric,arguments",
[
(web_transaction, "Apdex/Function/%s", lambda name: ([], {"name": name})),
(
message_transaction,
"OtherTransaction/Message/lib/dest_type/Named/%s",
lambda name: (["lib", "dest_type", name], {}),
),
(background_task, "OtherTransaction/Function/%s", lambda name: ([], {"name": name})),
],
)
def test_nested_coroutine_task_already_active(event_loop, transaction, metric, arguments):
deferred_metric = (metric % "deferred", "")
args, kwargs = arguments("deferred")
@transaction(*args, **kwargs)
@asyncio.coroutine
def child_task():
yield from asyncio.sleep(0)
@function_trace()
def child_trace():
yield from child_task()
main_metric = (metric % "main", "")
args, kwargs = arguments("main")
@transaction(*args, **kwargs)
@asyncio.coroutine
def parent_task():
yield from event_loop.create_task(child_trace())
metrics = []
@capture_transaction_metrics(metrics)
def _test():
event_loop.run_until_complete(parent_task())
_test()
assert main_metric in metrics
assert deferred_metric not in metrics
|
py | 1a55393e77c9e2c2cc97cbfb1f732bb2c7433bb1 | from subprocess import PIPE, Popen
def cmdline(command):
process = Popen(
args=command,
stdout=PIPE,
shell=True
)
return process.communicate()[0]
|
py | 1a553a74b4d13d1d40474ee86dddc7aebdeb4c35 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ProviderPaged(Paged):
"""
A paging container for iterating over a list of :class:`Provider <azure.mgmt.resource.resources.v2017_05_10.models.Provider>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Provider]'}
}
def __init__(self, *args, **kwargs):
super(ProviderPaged, self).__init__(*args, **kwargs)
|
py | 1a553aced6d2009d7a49f0d3bfef3c23cd237a65 | # NOTE: This only works for module-based PyMOL builds.
# Embedded versions of PyMOL call PyUnicode_SetDefaultEncoding at startup
import sys
sys.setdefaultencoding("utf-8")
|
py | 1a553ae2276958eb9c3319bcce8fdad9fd74dfaf | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: win_region
version_added: "2.3"
short_description: Set the region and format settings
description:
- Set the location settings of a Windows Server.
- Set the format settings of a Windows Server.
- Set the unicode language settings of a Windows Server.
- Copy across these settings to the default profile.
options:
location:
description:
- The location to set for the current user, see
U(https://msdn.microsoft.com/en-us/library/dd374073.aspx)
for a list of GeoIDs you can use and what location it relates to.
This needs to be set if C(format) or C(unicode_language) is not
set.
format:
description:
- The language format to set for the current user, see
U(https://msdn.microsoft.com/en-us/library/system.globalization.cultureinfo.aspx)
for a list of culture names to use. This needs to be set if
C(location) or C(unicode_language) is not set.
unicode_language:
description:
- The unicode language format to set for all users, see
U(https://msdn.microsoft.com/en-us/library/system.globalization.cultureinfo.aspx)
for a list of culture names to use. This needs to be set if
C(location) or C(format) is not set. After setting this
value a reboot is required for it to take effect.
copy_settings:
description:
- This will copy the current format and location values to new user
profiles and the welcome screen. This will only run if
C(location), C(format) or C(unicode_language) has resulted in a
change. If this process runs then it will always result in a
change.
type: bool
default: 'no'
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
# Set the region format to English United States
- win_region:
format: en-US
# Set the region format to English Australia and copy settings to new profiles
- win_region:
format: en-AU
copy_settings: yes
# Set the unicode language to English Great Britain, reboot if required
- win_region:
unicode_language: en-GB
register: result
- win_reboot:
when: result.restart_required
# Set the location to United States
- win_region:
location: 244
# Set format, location and unicode to English Australia and copy settings, reboot if required
- win_region:
location: 12
format: en-AU
unicode_language: en-AU
register: result
- win_reboot:
when: result.restart_required
'''
RETURN = r'''
restart_required:
description: Whether a reboot is required for the change to take effect
returned: success
type: boolean
sample: True
'''
|
py | 1a553b0a974879291819589f063dce249908bc5b | from ROAR.agent_module.agent import Agent
from pathlib import Path
from ROAR.control_module.pid_controller import PIDController
from ROAR.planning_module.local_planner.rl_local_planner import RLLocalPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.waypoint_following_mission_planner import WaypointFollowingMissionPlanner
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
import logging
from ROAR.utilities_module.occupancy_map import OccupancyGridMap
from ROAR.perception_module.obstacle_from_depth import ObstacleFromDepth
from ROAR.planning_module.local_planner.simple_waypoint_following_local_planner import \
SimpleWaypointFollowingLocalPlanner
import numpy as np
from typing import Any
class RLLocalPlannerAgent(Agent):
def __init__(self, target_speed=40, **kwargs):
super().__init__(**kwargs)
self.target_speed = target_speed
self.logger = logging.getLogger("PID Agent")
self.route_file_path = Path(self.agent_settings.waypoint_file_path)
self.pid_controller = PIDController(agent=self, steering_boundary=(-1, 1), throttle_boundary=(0, 1))
self.mission_planner = WaypointFollowingMissionPlanner(agent=self)
# initiated right after mission plan
self.behavior_planner = BehaviorPlanner(agent=self)
self.local_planner = RLLocalPlanner(
agent=self,
controller=self.pid_controller)
self.traditional_local_planner = SimpleWaypointFollowingLocalPlanner(
agent=self,
controller=self.pid_controller,
mission_planner=self.mission_planner,
behavior_planner=self.behavior_planner,
closeness_threshold=1.5
)
self.absolute_maximum_map_size, self.map_padding = 1000, 40
self.occupancy_map = OccupancyGridMap(agent=self, threaded=True)
self.obstacle_from_depth_detector = ObstacleFromDepth(agent=self,threaded=True)
self.add_threaded_module(self.obstacle_from_depth_detector)
# self.add_threaded_module(self.occupancy_map)
self.logger.debug(
f"Waypoint Following Agent Initiated. Reading f"
f"rom {self.route_file_path.as_posix()}")
def run_step(self, vehicle: Vehicle,
sensors_data: SensorsData) -> VehicleControl:
super(RLLocalPlannerAgent, self).run_step(vehicle=vehicle,
sensors_data=sensors_data)
self.traditional_local_planner.run_in_series()
self.transform_history.append(self.vehicle.transform)
option = "obstacle_coords" # ground_coords, point_cloud_obstacle_from_depth
if self.kwargs.get(option, None) is not None:
points = self.kwargs[option]
self.occupancy_map.update(points)
control = self.local_planner.run_in_series()
return control
def get_obs(self):
ch1 = self.occupancy_map.get_map(transform=self.vehicle.transform,
view_size=(100, 100))
ch1 = np.expand_dims((ch1 * 255).astype(np.uint8), -1)
ch2 = np.zeros(shape=(100, 100, 1))
ch3 = np.zeros(shape=ch2.shape)
obs = np.concatenate([ch1, ch2, ch3], axis=2)
print(np.shape(obs))
return obs
|
py | 1a553b3dcbeb783f467e905b97e49bda57aedad2 | """
Read parameters from AWS Systems Manager - Parameter Store
"""
import os
import yaml
import boto3
redis = {}
ssm = boto3.client('ssm', region_name='us-east-1')
redis['dev'] = ssm.get_parameter(Name='redis/dev', WithDecryption=True)['Parameter']['Value']
redis['qa'] = ssm.get_parameter(Name='redis/qa', WithDecryption=True)['Parameter']['Value']
|
py | 1a553c4cad2d9c67c6c65b4631c7004ee85c0804 | # -*- coding: utf-8 -*-
# Рекурсивная версия с @lru_cache
from functools import lru_cache
import timeit
@lru_cache(maxsize=24)
def factorial(n):
if n == 0:
return 1
elif n == 1:
return 1
else:
return n * factorial(n - 1)
@lru_cache(maxsize=24)
def fib(n):
if n == 0 or n == 1:
return n
else:
return fib(n - 2) + fib(n - 1)
if __name__ == '__main__':
r_fib = fib(30)
r_factorial = factorial(30)
print(timeit.timeit("r_factorial", setup="from __main__ import r_factorial"))
print(timeit.timeit("r_fib", setup="from __main__ import r_fib")) |
py | 1a553cc252910b5249a088ada68041b2ba80a739 | # coding=utf-8
# Copyright 2019 SK T-Brain Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import requests
import hashlib
import torch
from transformers import BertModel, BertConfig
import gluonnlp as nlp
from .utils import download as _download
from .utils import tokenizer
pytorch_kobert = {
'url':
'https://kobert.blob.core.windows.net/models/kobert/pytorch/pytorch_kobert_2439f391a6.params',
'fname': 'pytorch_kobert_2439f391a6.params',
'chksum': '2439f391a6'
}
bert_config = {
'attention_probs_dropout_prob': 0.1,
'hidden_act': 'gelu',
'hidden_dropout_prob': 0.1,
'hidden_size': 768,
'initializer_range': 0.02,
'intermediate_size': 3072,
'max_position_embeddings': 512,
'num_attention_heads': 12,
'num_hidden_layers': 12,
'type_vocab_size': 2,
'vocab_size': 8002
}
def get_pytorch_kobert_model(ctx='cpu', cachedir='~/kobert/'):
# download model
model_info = pytorch_kobert
model_path = _download(model_info['url'],
model_info['fname'],
model_info['chksum'],
cachedir=cachedir)
# download vocab
vocab_info = tokenizer
vocab_path = _download(vocab_info['url'],
vocab_info['fname'],
vocab_info['chksum'],
cachedir=cachedir)
return get_kobert_model(model_path, vocab_path, ctx)
def get_kobert_model(model_file, vocab_file, ctx="cpu"):
bertmodel = BertModel(config=BertConfig.from_dict(bert_config))
bertmodel.load_state_dict(torch.load(model_file))
device = torch.device(ctx)
bertmodel.to(device)
bertmodel.eval()
vocab_b_obj = nlp.vocab.BERTVocab.from_sentencepiece(vocab_file,
padding_token='[PAD]')
return bertmodel, vocab_b_obj
|
py | 1a553d2a52b249926a1e8b4d9a50e4b0fe52843e | import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers, models
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import os
import string
import numpy as np
from utils.convert import Convert
from conf.config import Config
def get_sample_data(path, size=500, height=70, width=130, cap_len=4, characters=""):
sample_dir = os.path.join(os.getcwd(), "img", path)
file_names = os.listdir(sample_dir)
sample_x = np.zeros([size, height*width])
sample_y = np.zeros([size, cap_len*len(characters)])
for seq in range(size):
captcha_image = np.array(Image.open(os.path.join(sample_dir, file_names[seq])))
captcha_text = file_names[seq].split(".png")[0]
image = Convert.convert_to_gray(captcha_image)
sample_x[seq, :] = image.flatten() / 255
sample_y[seq, :] = Convert.convert_to_vector(captcha_text, cap_len, characters)
return sample_x, sample_y
if __name__ == "__main__":
batch_size = 128
epochs = 15
IMG_HEIGHT = 70
IMG_WIDTH = 130
env_config = Config.load_env()
captcha_str_length = env_config["captcha_length"]
chars = ""
if env_config["captcha_has_number"]:
chars += string.digits
if env_config["captcha_has_lowercase"]:
chars += string.ascii_lowercase
if env_config["captcha_has_uppercase"]:
chars += string.ascii_uppercase
train_dir = os.path.join(os.getcwd(), "img", "train")
test_dir = os.path.join(os.getcwd(), "img", "test")
sample_images, sample_labels = get_sample_data(train_dir, size=500, height=IMG_HEIGHT, width=IMG_WIDTH, cap_len=captcha_str_length, characters=chars)
input_layer = tf.keras.Input()
x = layers.Conv2D(32, 3, activation='relu')(input_layer)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D((2, 2))(x)
# x = layers.Flatten()(x)
# x = layers.Dense(1024, activation='relu')(x)
# # x = layers.Dropout(0.5)(x)
#
# x = layers.Dense(D * N_LABELS, activation='softmax')(x)
# x = layers.Reshape((D, N_LABELS))(x)
model = models.Model(inputs=input_layer, outputs=x)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
|
py | 1a553dfdd3fce37fe4693108f7ca21720d9b1f31 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RktCompilerLib(RacketPackage):
"""Stub package for packages which are currently part of core
Racket installation (but which may change in the future)."""
git = "ssh://[email protected]/racket/racket.git"
maintainers = ['elfprince13']
version('8.3', commit='cab83438422bfea0e4bd74bc3e8305e6517cf25f') # tag='v8.3'
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
name = 'compiler-lib'
pkgs = True
subdirectory = "pkgs/{0}".format(name)
|
py | 1a553e0bc7a91236c8c6f32aa5b9e53893168c82 | """Exercise 1:
1.)
Write a function that takes an integer number n as an input.
The function returns a list with all power of twos (2^n) from 0 to n-1.
Please use a list comprehension.
2.)
Write a function that takes the result list from 1.) as an input.
Iterate over all values of the list and print the current index and the current
value in each iteration.
"""
def exercise1(n):
return [2 ** i for i in range(n)]
def exercise2(lst):
for idx, val in enumerate(lst):
print(idx, val)
def main():
n = 10
lst = exercise1(n)
print(lst)
exercise2(lst)
if __name__ == "__main__":
main()
|
py | 1a553f0e65ac9448da80fb3b9cfee640e57a193d | #! /usr/bin/env python3
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import shutil
import sys
import subprocess
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not shutil.which(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, str):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True,
start_new_session=True)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
background = False
redirect_stdout = True
# In remote_args, %s will be replaced with the requested URL. %action will
# be replaced depending on the value of 'new' passed to open.
# remote_action is used for new=0 (open). If newwin is not None, it is
# used for new=1 (open_new). If newtab is not None, it is used for
# new=3 (open_new_tab). After both substitutions are made, any empty
# strings in the transformed remote_args list will be removed.
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = subprocess.DEVNULL
else:
# for TTY browsers, we need stdin/out
inout = None
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, start_new_session=True)
if remote:
# wait at most five seconds. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
try:
rc = p.wait(5)
# if remote call failed, open() will try direct invocation
return not rc
except subprocess.TimeoutExpired:
return True
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=True):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
args = [arg for arg in args if arg]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla browsers."""
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "-new-window"
remote_action_newtab = "-new-tab"
background = True
class Netscape(UnixBrowser):
"""Launcher class for Netscape browser."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Chrome(UnixBrowser):
"Launcher class for Google Chrome browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
Chromium = Chrome
class Opera(UnixBrowser):
"Launcher class for Opera browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=True):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = subprocess.DEVNULL
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except OSError:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except OSError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=True):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# use xdg-open if around
if shutil.which("xdg-open"):
register("xdg-open", None, BackgroundBrowser("xdg-open"))
# The default GNOME3 browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gvfs-open"):
register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and shutil.which("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
if shutil.which("x-www-browser"):
register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
# The Mozilla browsers
for browser in ("firefox", "iceweasel", "iceape", "seamonkey"):
if shutil.which(browser):
register(browser, None, Mozilla(browser))
# The Netscape and old Mozilla browsers
for browser in ("mozilla-firefox",
"mozilla-firebird", "firebird",
"mozilla", "netscape"):
if shutil.which(browser):
register(browser, None, Netscape(browser))
# Konqueror/kfm, the KDE browser.
if shutil.which("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif shutil.which("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if shutil.which(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if shutil.which("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Google Chrome/Chromium browsers
for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
if shutil.which(browser):
register(browser, None, Chrome(browser))
# Opera, quite popular
if shutil.which("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if shutil.which("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if shutil.which("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
if shutil.which("www-browser"):
register("www-browser", None, GenericBrowser("www-browser"))
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if shutil.which("links"):
register("links", None, GenericBrowser("links"))
if shutil.which("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if shutil.which("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if shutil.which("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=True):
try:
os.startfile(url)
except OSError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if shutil.which(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=True):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
class MacOSXOSAScript(BaseBrowser):
def __init__(self, name):
self._name = name
def open(self, url, new=0, autoraise=True):
if self._name == 'default':
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
script = '''
tell application "%s"
activate
open location "%s"
end
'''%(self._name, url.replace('"', '%22'))
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("safari", None, MacOSXOSAScript('safari'), -1)
register("firefox", None, MacOSXOSAScript('firefox'), -1)
register("chrome", None, MacOSXOSAScript('chrome'), -1)
register("MacOSX", None, MacOSXOSAScript('default'), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, -1)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print(usage, file=sys.stderr)
sys.exit(1)
url = args[0]
open(url, new_win)
print("\a")
if __name__ == "__main__":
main()
|
py | 1a553f3d8a0e02a31b2a0a54b56245d73a118b77 | # Conversor de Bases Numéricas
num = int(input("Informe um valor para conversao: "))
op = int(input("Escolha [1] para binaria, [2] octal, [3] decimal "))
if op == 1:
print(f"O numero {num} em binario e {bin(num)[2:]}")
elif op == 2:
print(f"O numero {num} em octal e {oct(num)[2:]}")
elif op == 3:
print(f"O numero {num} em hexadecimal e {hex(num)[2:]}")
else:
print("Opcao invalida") |
py | 1a553fa3f92869f45fd947444019f2ab48a4081f | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import json
from mock import Mock
from synapse.http.server import JsonResource
from synapse.rest.client.v1.admin import register_servlets
from synapse.util import Clock
from tests import unittest
from tests.server import (
ThreadedMemoryReactorClock,
make_request,
render,
setup_test_homeserver,
)
class UserRegisterTestCase(unittest.TestCase):
def setUp(self):
self.clock = ThreadedMemoryReactorClock()
self.hs_clock = Clock(self.clock)
self.url = "/_matrix/client/r0/admin/register"
self.registration_handler = Mock()
self.identity_handler = Mock()
self.login_handler = Mock()
self.device_handler = Mock()
self.device_handler.check_device_registered = Mock(return_value="FAKE")
self.datastore = Mock(return_value=Mock())
self.datastore.get_current_state_deltas = Mock(return_value=[])
self.secrets = Mock()
self.hs = setup_test_homeserver(
self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock
)
self.hs.config.registration_shared_secret = u"shared"
self.hs.get_media_repository = Mock()
self.hs.get_deactivate_account_handler = Mock()
self.resource = JsonResource(self.hs)
register_servlets(self.hs, self.resource)
def test_disabled(self):
"""
If there is no shared secret, registration through this method will be
prevented.
"""
self.hs.config.registration_shared_secret = None
request, channel = make_request("POST", self.url, b'{}')
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(
'Shared secret registration is not enabled', channel.json_body["error"]
)
def test_get_nonce(self):
"""
Calling GET on the endpoint will return a randomised nonce, using the
homeserver's secrets provider.
"""
secrets = Mock()
secrets.token_hex = Mock(return_value="abcd")
self.hs.get_secrets = Mock(return_value=secrets)
request, channel = make_request("GET", self.url)
render(request, self.resource, self.clock)
self.assertEqual(channel.json_body, {"nonce": "abcd"})
def test_expired_nonce(self):
"""
Calling GET on the endpoint will return a randomised nonce, which will
only last for SALT_TIMEOUT (60s).
"""
request, channel = make_request("GET", self.url)
render(request, self.resource, self.clock)
nonce = channel.json_body["nonce"]
# 59 seconds
self.clock.advance(59)
body = json.dumps({"nonce": nonce})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('username must be specified', channel.json_body["error"])
# 61 seconds
self.clock.advance(2)
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('unrecognised nonce', channel.json_body["error"])
def test_register_incorrect_nonce(self):
"""
Only the provided nonce can be used, as it's checked in the MAC.
"""
request, channel = make_request("GET", self.url)
render(request, self.resource, self.clock)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(b"notthenonce\x00bob\x00abc123\x00admin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"mac": want_mac,
}
)
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("HMAC incorrect", channel.json_body["error"])
def test_register_correct_nonce(self):
"""
When the correct nonce is provided, and the right key is provided, the
user is registered.
"""
request, channel = make_request("GET", self.url)
render(request, self.resource, self.clock)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"mac": want_mac,
}
)
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
def test_nonce_reuse(self):
"""
A valid unrecognised nonce.
"""
request, channel = make_request("GET", self.url)
render(request, self.resource, self.clock)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"mac": want_mac,
}
)
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
# Now, try and reuse it
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('unrecognised nonce', channel.json_body["error"])
def test_missing_parts(self):
"""
Synapse will complain if you don't give nonce, username, password, and
mac. Admin is optional. Additional checks are done for length and
type.
"""
def nonce():
request, channel = make_request("GET", self.url)
render(request, self.resource, self.clock)
return channel.json_body["nonce"]
#
# Nonce check
#
# Must be present
body = json.dumps({})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('nonce must be specified', channel.json_body["error"])
#
# Username checks
#
# Must be present
body = json.dumps({"nonce": nonce()})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('username must be specified', channel.json_body["error"])
# Must be a string
body = json.dumps({"nonce": nonce(), "username": 1234})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid username', channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": u"abcd\u0000"})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid username', channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid username', channel.json_body["error"])
#
# Username checks
#
# Must be present
body = json.dumps({"nonce": nonce(), "username": "a"})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('password must be specified', channel.json_body["error"])
# Must be a string
body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid password', channel.json_body["error"])
# Must not have null bytes
body = json.dumps(
{"nonce": nonce(), "username": "a", "password": u"abcd\u0000"}
)
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid password', channel.json_body["error"])
# Super long
body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
request, channel = make_request("POST", self.url, body.encode('utf8'))
render(request, self.resource, self.clock)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual('Invalid password', channel.json_body["error"])
|
py | 1a554026047a254934a8da2f7587c35bc089627e | import argparse
import imp
import os
import re
from functools import wraps
from operator import methodcaller
import orca
from flask import (
Flask, abort, jsonify, request, render_template, redirect, url_for)
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from six import StringIO
app = Flask(__name__)
_GROUPBY_AGG_MAP = {
'sum': methodcaller('sum'),
'mean': methodcaller('mean'),
'median': methodcaller('median'),
'std': methodcaller('std'),
'size': methodcaller('size')
}
def import_file(filename):
"""
Import a file that will trigger the population of Orca.
Parameters
----------
filename : str
"""
pathname, filename = os.path.split(filename)
modname = re.match(
r'(?P<modname>\w+)\.py', filename).group('modname')
file, path, desc = imp.find_module(modname, [pathname])
try:
imp.load_module(modname, file, path, desc)
finally:
file.close()
def check_is_table(func):
"""
Decorator that will check whether the "table_name" keyword argument
to the wrapped function matches a registered Orca table.
"""
@wraps(func)
def wrapper(**kwargs):
if not orca.is_table(kwargs['table_name']):
abort(404)
return func(**kwargs)
return wrapper
def check_is_column(func):
"""
Decorator that will check whether the "table_name" and "col_name"
keyword arguments to the wrapped function match a registered Orca
table and column.
"""
@wraps(func)
def wrapper(**kwargs):
table_name = kwargs['table_name']
col_name = kwargs['col_name']
if not orca.is_table(table_name):
abort(404)
if col_name not in orca.get_table(table_name).columns:
abort(404)
return func(**kwargs)
return wrapper
def check_is_injectable(func):
"""
Decorator that will check whether the "inj_name" keyword argument to
the wrapped function matches a registered Orca injectable.
"""
@wraps(func)
def wrapper(**kwargs):
name = kwargs['inj_name']
if not orca.is_injectable(name):
abort(404)
return func(**kwargs)
return wrapper
@app.route('/schema')
def schema():
"""
All tables, columns, steps, injectables and broadcasts registered with
Orca. Includes local columns on tables.
"""
tables = orca.list_tables()
cols = {t: orca.get_table(t).columns for t in tables}
steps = orca.list_steps()
injectables = orca.list_injectables()
broadcasts = orca.list_broadcasts()
return jsonify(
tables=tables, columns=cols, steps=steps, injectables=injectables,
broadcasts=broadcasts)
@app.route('/tables')
def list_tables():
"""
List all registered tables.
"""
tables = orca.list_tables()
return jsonify(tables=tables)
@app.route('/tables/<table_name>/info')
@check_is_table
def table_info(table_name):
"""
Return the text result of table.info(verbose=True).
"""
table = orca.get_table(table_name).to_frame()
buf = StringIO()
table.info(verbose=True, buf=buf)
info = buf.getvalue()
return info, 200, {'Content-Type': 'text/plain'}
@app.route('/tables/<table_name>/preview')
@check_is_table
def table_preview(table_name):
"""
Returns the first five rows of a table as JSON. Inlcudes all columns.
Uses Pandas' "split" JSON format.
"""
preview = orca.get_table(table_name).to_frame().head()
return (
preview.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/describe')
@check_is_table
def table_describe(table_name):
"""
Return summary statistics of a table as JSON. Includes all columns.
Uses Pandas' "split" JSON format.
"""
desc = orca.get_table(table_name).to_frame().describe()
return (
desc.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/definition')
@check_is_table
def table_definition(table_name):
"""
Get the source of a table function.
If a table is registered DataFrame and not a function then all that is
returned is {'type': 'dataframe'}.
If the table is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
if orca.table_type(table_name) == 'dataframe':
return jsonify(type='dataframe')
filename, lineno, source = \
orca.get_raw_table(table_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html)
@app.route('/tables/<table_name>/csv')
@check_is_table
def table_csv(table_name):
"""
Returns a table as text/csv using Pandas default csv output.
"""
csv = orca.get_table(table_name).to_frame().to_csv()
return csv, 200, {'Content-Type': 'text/csv'}
@app.route('/tables/<table_name>/groupbyagg')
@check_is_table
def table_groupbyagg(table_name):
"""
Perform a groupby on a table and return an aggregation on a single column.
This depends on some request parameters in the URL.
"column" and "agg" must always be present, and one of "by" or "level"
must be present. "column" is the table column on which aggregation will
be performed, "agg" is the aggregation that will be performed, and
"by"/"level" define how to group the data.
Supported "agg" parameters are: mean, median, std, sum, and size.
"""
table = orca.get_table(table_name)
# column to aggregate
column = request.args.get('column', None)
if not column or column not in table.columns:
abort(400)
# column or index level to group by
by = request.args.get('by', None)
level = request.args.get('level', None)
if (not by and not level) or (by and level):
abort(400)
# aggregation type
agg = request.args.get('agg', None)
if not agg or agg not in _GROUPBY_AGG_MAP:
abort(400)
column = table.get_column(column)
# level can either be an integer level number or a string level name.
# try converting to integer, but if that doesn't work
# we go ahead with the string.
if level:
try:
level = int(level)
except ValueError:
pass
gby = column.groupby(level=level)
else:
by = table.get_column(by)
gby = column.groupby(by)
result = _GROUPBY_AGG_MAP[agg](gby)
return (
result.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/columns')
@check_is_table
def list_table_columns(table_name):
"""
List columns for a specific table.
"""
return jsonify(columns=orca.get_table(table_name).columns)
@app.route('/tables/<table_name>/columns/<col_name>/preview')
@check_is_column
def column_preview(table_name, col_name):
"""
Return the first ten elements of a column as JSON in Pandas'
"split" format.
"""
col = orca.get_table(table_name).get_column(col_name).head(10)
return (
col.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/columns/<col_name>/definition')
@check_is_column
def column_definition(table_name, col_name):
"""
Get the source of a column function.
If a column is a registered Series and not a function then all that is
returned is {'type': 'series'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
col_type = orca.get_table(table_name).column_type(col_name)
if col_type != 'function':
return jsonify(type=col_type)
filename, lineno, source = \
orca.get_raw_column(table_name, col_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html)
@app.route('/tables/<table_name>/columns/<col_name>/describe')
@check_is_column
def column_describe(table_name, col_name):
"""
Return summary statistics of a column as JSON.
Uses Pandas' "split" JSON format.
"""
col_desc = orca.get_table(table_name).get_column(col_name).describe()
return (
col_desc.to_json(orient='split'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/columns/<col_name>/csv')
@check_is_column
def column_csv(table_name, col_name):
"""
Return a column as CSV using Pandas' default CSV output.
"""
csv = orca.get_table(table_name).get_column(col_name).to_csv(path_or_buf=None)
return csv, 200, {'Content-Type': 'text/csv'}
@app.route('/injectables')
def list_injectables():
"""
List all registered injectables.
"""
return jsonify(injectables=orca.list_injectables())
@app.route('/injectables/<inj_name>/repr')
@check_is_injectable
def injectable_repr(inj_name):
"""
Returns the type and repr of an injectable. JSON response has
"type" and "repr" keys.
"""
i = orca.get_injectable(inj_name)
return jsonify(type=str(type(i)), repr=repr(i))
@app.route('/injectables/<inj_name>/definition')
@check_is_injectable
def injectable_definition(inj_name):
"""
Get the source of an injectable function.
If an injectable is a registered Python variable and not a function
then all that is returned is {'type': 'variable'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
inj_type = orca.injectable_type(inj_name)
if inj_type == 'variable':
return jsonify(type='variable')
else:
filename, lineno, source = \
orca.get_injectable_func_source_data(inj_name)
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html)
@app.route('/broadcasts')
def list_broadcasts():
"""
List all registered broadcasts as a list of objects with
keys "cast" and "onto".
"""
casts = [{'cast': b[0], 'onto': b[1]} for b in orca.list_broadcasts()]
return jsonify(broadcasts=casts)
@app.route('/broadcasts/<cast_name>/<onto_name>/definition')
def broadcast_definition(cast_name, onto_name):
"""
Return the definition of a broadcast as an object with keys
"cast", "onto", "cast_on", "onto_on", "cast_index", and "onto_index".
These are the same as the arguments to the ``broadcast`` function.
"""
if not orca.is_broadcast(cast_name, onto_name):
abort(404)
b = orca.get_broadcast(cast_name, onto_name)
return jsonify(
cast=b.cast, onto=b.onto, cast_on=b.cast_on, onto_on=b.onto_on,
cast_index=b.cast_index, onto_index=b.onto_index)
@app.route('/steps')
def list_steps():
"""
List all registered Orca steps.
"""
return jsonify(steps=orca.list_steps())
@app.route('/steps/<step_name>/definition')
def step_definition(step_name):
"""
Get the source of a step function. Returned object has keys
"filename", "lineno", "text" and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
if not orca.is_step(step_name):
abort(404)
filename, lineno, source = \
orca.get_step(step_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(filename=filename, lineno=lineno, text=source, html=html)
@app.route('/ui')
def ui():
return render_template('ui.html')
@app.route('/')
def root():
return redirect(url_for('ui'))
def parse_args(args=None):
parser = argparse.ArgumentParser(
description=(
'Start a Flask server that has HTTP endpoints that provide data '
'about an Orca configuration and data registered with Orca.'))
parser.add_argument(
'-d', '--debug', action='store_true',
help='Enable Flask\'s debug mode')
parser.add_argument(
'-H', '--host', type=str, help='Hostname on which to run the server')
parser.add_argument(
'-p', '--port', type=int, help='Port on which to run server')
parser.add_argument('filename', type=str, help='File with Orca config')
return parser.parse_args(args)
def main(args=None):
args = parse_args(args)
import_file(args.filename)
app.run(host=args.host, port=args.port, debug=args.debug)
|
py | 1a55412b647bbe2de94524b4221d64f78056fdfd | import turtle
# -- Function Definitions
def draw_board(x, y, size):
color = "red"
turtle.color("red")
start = 1
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
for n in range(8):
for n in range(8):
if start == 0:
if color == "red":
color = "black"
turtle.color("black")
elif color == "black":
color = "red"
turtle.color("red")
else:
start = 0
turtle.begin_fill()
for n in range(4):
turtle.forward(size)
turtle.right(90)
turtle.end_fill()
turtle.forward(size)
if color == "red":
turtle.color("black")
color="black"
elif color == "black":
turtle.color("red")
color="red"
turtle.right(90)
turtle.forward(size)
turtle.right(90)
turtle.forward(size * 8)
turtle.right(180)
def main():
turtle.speed(0)
draw_board(-100, 100, 50)
def gotomain():
main()
gotomain()
|
py | 1a5541a6c88db13b9d8db56ff9a1444cd14cc2a8 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: orientationMsg.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='orientationMsg.proto',
package='mateROV',
syntax='proto2',
serialized_pb=_b('\n\x14orientationMsg.proto\x12\x07mateROV\":\n\x0eOrientationMsg\x12\x0c\n\x04roll\x18\x01 \x02(\x02\x12\r\n\x05pitch\x18\x02 \x02(\x02\x12\x0b\n\x03yaw\x18\x03 \x02(\x02')
)
_ORIENTATIONMSG = _descriptor.Descriptor(
name='OrientationMsg',
full_name='mateROV.OrientationMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='roll', full_name='mateROV.OrientationMsg.roll', index=0,
number=1, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pitch', full_name='mateROV.OrientationMsg.pitch', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='yaw', full_name='mateROV.OrientationMsg.yaw', index=2,
number=3, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['OrientationMsg'] = _ORIENTATIONMSG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OrientationMsg = _reflection.GeneratedProtocolMessageType('OrientationMsg', (_message.Message,), dict(
DESCRIPTOR = _ORIENTATIONMSG,
__module__ = 'orientationMsg_pb2'
# @@protoc_insertion_point(class_scope:mateROV.OrientationMsg)
))
_sym_db.RegisterMessage(OrientationMsg)
# @@protoc_insertion_point(module_scope)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.