code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkworkorder.endpoint import endpoint_data
class ListTicketsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workorder', '2020-03-26', 'ListTickets','workorder')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductCode(self):
return self.get_query_params().get('ProductCode')
def set_ProductCode(self,ProductCode):
self.add_query_param('ProductCode',ProductCode)
def get_Language(self):
return self.get_query_params().get('Language')
def set_Language(self,Language):
self.add_query_param('Language',Language)
def get_SubUserId(self):
return self.get_query_params().get('SubUserId')
def set_SubUserId(self,SubUserId):
self.add_query_param('SubUserId',SubUserId)
def get_CreatedBeforeTime(self):
return self.get_query_params().get('CreatedBeforeTime')
def set_CreatedBeforeTime(self,CreatedBeforeTime):
self.add_query_param('CreatedBeforeTime',CreatedBeforeTime)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Ids(self):
return self.get_query_params().get('Ids')
def set_Ids(self,Ids):
self.add_query_param('Ids',Ids)
def get_TicketStatus(self):
return self.get_query_params().get('TicketStatus')
def set_TicketStatus(self,TicketStatus):
self.add_query_param('TicketStatus',TicketStatus)
def get_PageStart(self):
return self.get_query_params().get('PageStart')
def set_PageStart(self,PageStart):
self.add_query_param('PageStart',PageStart)
def get_CreatedAfterTime(self):
return self.get_query_params().get('CreatedAfterTime')
def set_CreatedAfterTime(self,CreatedAfterTime):
self.add_query_param('CreatedAfterTime',CreatedAfterTime) | [
"aliyunsdkworkorder.endpoint.endpoint_data.getEndpointRegional",
"aliyunsdkworkorder.endpoint.endpoint_data.getEndpointMap",
"aliyunsdkcore.request.RpcRequest.__init__"
] | [((954, 1039), 'aliyunsdkcore.request.RpcRequest.__init__', 'RpcRequest.__init__', (['self', '"""Workorder"""', '"""2020-03-26"""', '"""ListTickets"""', '"""workorder"""'], {}), "(self, 'Workorder', '2020-03-26', 'ListTickets', 'workorder'\n )\n", (973, 1039), False, 'from aliyunsdkcore.request import RpcRequest\n'), ((1103, 1133), 'aliyunsdkworkorder.endpoint.endpoint_data.getEndpointMap', 'endpoint_data.getEndpointMap', ([], {}), '()\n', (1131, 1133), False, 'from aliyunsdkworkorder.endpoint import endpoint_data\n'), ((1214, 1249), 'aliyunsdkworkorder.endpoint.endpoint_data.getEndpointRegional', 'endpoint_data.getEndpointRegional', ([], {}), '()\n', (1247, 1249), False, 'from aliyunsdkworkorder.endpoint import endpoint_data\n')] |
import torch
import dgl
from torch_geometric.utils import to_dense_adj
# from utils.dense import to_dense_adj
def basis_transform(g,
norm,
power,
epsilon,
identity):
(e_idx0, e_idx1) = g.edges()
edge_idx = torch.stack([e_idx0, e_idx1], dim=0)
adj = to_dense_adj(edge_idx).squeeze(0) # Graphs may have only one node.
if norm == 'Sym' and identity == 0:
adj = adj - torch.eye(adj.shape[0], dtype=adj.dtype) * (1 - identity)
deg_nopad = adj.sum(1)
isolated = torch.where(deg_nopad <= 1e-6, torch.ones_like(deg_nopad), torch.zeros_like(deg_nopad))
if isolated.sum(0) != 0:
adj = adj + isolated.diag_embed()
if isinstance(epsilon, str):
eps = epsilon.split('d')
epsilon = float(eps[0]) / float(eps[1])
if norm == 'Eps':
eig_val, eig_vec = torch.linalg.eigh(adj)
padding = torch.ones_like(eig_val)
eig_sign = torch.where(eig_val >= 0, padding, padding * -1)
eig_val_nosign = eig_val.abs()
eig_val_nosign = torch.where(eig_val_nosign > 1e-6, eig_val_nosign, torch.zeros_like(eig_val_nosign)) # Precision limitation
eig_val_smoothed = eig_val_nosign.pow(epsilon) * eig_sign
graph_matrix = torch.matmul(eig_vec, torch.matmul(eig_val_smoothed.diag_embed(), eig_vec.transpose(-2, -1)))
elif norm == 'Sym':
deg = adj.sum(1)
sym_norm = deg.pow(epsilon).unsqueeze(-1)
graph_matrix = torch.matmul(sym_norm, sym_norm.transpose(0, 1)) * adj
else:
raise ValueError('Unknown norm called {}'.format(norm))
identity = torch.eye(graph_matrix.shape[0], dtype=graph_matrix.dtype)
bases = [identity.flatten(0)]
graph_matrix_n = identity
for shift in range(power):
graph_matrix_n = torch.matmul(graph_matrix_n, graph_matrix)
bases = bases + [graph_matrix_n.flatten(0)]
bases = torch.stack(bases, dim=0).transpose(-2, -1).contiguous()
full_one = torch.ones_like(graph_matrix, dtype=graph_matrix.dtype).nonzero(as_tuple=True)
# print(full_one)
new_g = dgl.graph(full_one)
assert (new_g.num_nodes() == g.num_nodes())
# new_g = DGLHeteroGraph(full_one, ['_U'], ['_E'])
new_g.ndata['feat'] = g.ndata['feat']
# new_g.ndata['_ID'] = g.ndata['_ID']
new_g.edata['bases'] = bases
if 'feat' in g.edata.keys():
edge_attr = g.edata.pop('feat')
# print(edge_attr)
if len(edge_attr.shape) == 1:
edge_attr = edge_attr.unsqueeze(-1)
edge_attr_dense = to_dense_adj(edge_idx, edge_attr=edge_attr).squeeze(0).view(-1, edge_attr.shape[-1])
assert (len(edge_attr_dense.shape) == 2)
assert (bases.shape[0] == edge_attr_dense.shape[0])
new_g.edata['feat'] = edge_attr_dense
# new_g.edata['_ID'] = g.edata['_ID']
# print(new_g)
return new_g
| [
"torch.ones_like",
"dgl.graph",
"torch_geometric.utils.to_dense_adj",
"torch.eye",
"torch.stack",
"torch.zeros_like",
"torch.matmul",
"torch.linalg.eigh",
"torch.where"
] | [((296, 332), 'torch.stack', 'torch.stack', (['[e_idx0, e_idx1]'], {'dim': '(0)'}), '([e_idx0, e_idx1], dim=0)\n', (307, 332), False, 'import torch\n'), ((1669, 1727), 'torch.eye', 'torch.eye', (['graph_matrix.shape[0]'], {'dtype': 'graph_matrix.dtype'}), '(graph_matrix.shape[0], dtype=graph_matrix.dtype)\n', (1678, 1727), False, 'import torch\n'), ((2143, 2162), 'dgl.graph', 'dgl.graph', (['full_one'], {}), '(full_one)\n', (2152, 2162), False, 'import dgl\n'), ((912, 934), 'torch.linalg.eigh', 'torch.linalg.eigh', (['adj'], {}), '(adj)\n', (929, 934), False, 'import torch\n'), ((953, 977), 'torch.ones_like', 'torch.ones_like', (['eig_val'], {}), '(eig_val)\n', (968, 977), False, 'import torch\n'), ((997, 1045), 'torch.where', 'torch.where', (['(eig_val >= 0)', 'padding', '(padding * -1)'], {}), '(eig_val >= 0, padding, padding * -1)\n', (1008, 1045), False, 'import torch\n'), ((1849, 1891), 'torch.matmul', 'torch.matmul', (['graph_matrix_n', 'graph_matrix'], {}), '(graph_matrix_n, graph_matrix)\n', (1861, 1891), False, 'import torch\n'), ((343, 365), 'torch_geometric.utils.to_dense_adj', 'to_dense_adj', (['edge_idx'], {}), '(edge_idx)\n', (355, 365), False, 'from torch_geometric.utils import to_dense_adj\n'), ((611, 637), 'torch.ones_like', 'torch.ones_like', (['deg_nopad'], {}), '(deg_nopad)\n', (626, 637), False, 'import torch\n'), ((639, 666), 'torch.zeros_like', 'torch.zeros_like', (['deg_nopad'], {}), '(deg_nopad)\n', (655, 666), False, 'import torch\n'), ((1161, 1193), 'torch.zeros_like', 'torch.zeros_like', (['eig_val_nosign'], {}), '(eig_val_nosign)\n', (1177, 1193), False, 'import torch\n'), ((2030, 2085), 'torch.ones_like', 'torch.ones_like', (['graph_matrix'], {'dtype': 'graph_matrix.dtype'}), '(graph_matrix, dtype=graph_matrix.dtype)\n', (2045, 2085), False, 'import torch\n'), ((472, 512), 'torch.eye', 'torch.eye', (['adj.shape[0]'], {'dtype': 'adj.dtype'}), '(adj.shape[0], dtype=adj.dtype)\n', (481, 512), False, 'import torch\n'), ((1957, 1982), 'torch.stack', 'torch.stack', (['bases'], {'dim': '(0)'}), '(bases, dim=0)\n', (1968, 1982), False, 'import torch\n'), ((2595, 2638), 'torch_geometric.utils.to_dense_adj', 'to_dense_adj', (['edge_idx'], {'edge_attr': 'edge_attr'}), '(edge_idx, edge_attr=edge_attr)\n', (2607, 2638), False, 'from torch_geometric.utils import to_dense_adj\n')] |
# -*- coding: utf-8 -*-
"""
@date: 2022/5/11 下午10:40
@file: mobilenet.py
@author: zj
@description: Custom MobileNetV3, derived from torchvision
"""
from typing import Any, List, Optional, Callable, Dict
from functools import partial
from torch import Tensor, nn
from torchvision.models.mobilenetv3 import MobileNetV3 as TMobileNetV3
from torchvision.models.mobilenetv3 import _mobilenet_v3_conf, model_urls, load_state_dict_from_url, \
InvertedResidualConfig
from zcls2.config.key_word import KEY_OUTPUT
from simpleir.configs.key_words import KEY_FEAT
__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"]
class MobileNetV3(TMobileNetV3):
_feat_list = [
'blocks', 'avgpool', 'linear', 'hardswish', 'classifier'
]
def __init__(self, inverted_residual_setting: List[InvertedResidualConfig], last_channel: int,
num_classes: int = 1000, block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
feat_type='hardswish', **kwargs: Any) -> None:
super().__init__(inverted_residual_setting, last_channel, num_classes, block, norm_layer, **kwargs)
assert feat_type in self._feat_list
self.feature_modules = {
"blocks": self.features[16][2],
"avgpool": self.avgpool,
"linear": self.classifier[0],
"hardswish": self.classifier[1],
"classifier": self.classifier[3],
}
self.feature_buffer = dict()
self.feat_type = feat_type
self._register_hook()
def _register_hook(self) -> None:
"""
Register hooks to output inner feature map.
"""
def hook(feature_buffer, fea_name, module, input, output):
feature_buffer[fea_name] = output.data
for fea_name in self._feat_list:
assert fea_name in self.feature_modules, 'unknown feature {}!'.format(fea_name)
self.feature_modules[fea_name].register_forward_hook(partial(hook, self.feature_buffer, fea_name))
def forward(self, x: Tensor) -> Dict:
x = super().forward(x)
feat = self.feature_buffer[self.feat_type]
return {
KEY_OUTPUT: x,
KEY_FEAT: feat
}
def _mobilenet_v3_model(
arch: str,
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
pretrained: bool,
progress: bool,
**kwargs: Any
):
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
raise ValueError("No checkpoint is available for model type {}".format(arch))
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
# If the number of model outputs is different from the model setting,
# the corresponding pretraining model weight will not be loaded
assert isinstance(model.classifier[3], nn.Linear)
if model.classifier[3].out_features != 1000:
state_dict.pop('classifier.3.weight')
state_dict.pop('classifier.3.bias')
ret = model.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == {'classifier.3.weight', 'classifier.3.bias'}, \
f'Missing keys when loading pretrained weights: {ret.missing_keys}'
return model
def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3:
"""
Constructs a large MobileNetV3 architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
arch = "mobilenet_v3_large"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)
return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, **kwargs)
def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3:
"""
Constructs a small MobileNetV3 architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
arch = "mobilenet_v3_small"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs)
return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, **kwargs)
| [
"torchvision.models.mobilenetv3._mobilenet_v3_conf",
"functools.partial",
"torchvision.models.mobilenetv3.model_urls.get",
"torchvision.models.mobilenetv3.load_state_dict_from_url"
] | [((3910, 3944), 'torchvision.models.mobilenetv3._mobilenet_v3_conf', '_mobilenet_v3_conf', (['arch'], {}), '(arch, **kwargs)\n', (3928, 3944), False, 'from torchvision.models.mobilenetv3 import _mobilenet_v3_conf, model_urls, load_state_dict_from_url, InvertedResidualConfig\n'), ((4549, 4583), 'torchvision.models.mobilenetv3._mobilenet_v3_conf', '_mobilenet_v3_conf', (['arch'], {}), '(arch, **kwargs)\n', (4567, 4583), False, 'from torchvision.models.mobilenetv3 import _mobilenet_v3_conf, model_urls, load_state_dict_from_url, InvertedResidualConfig\n'), ((2746, 2807), 'torchvision.models.mobilenetv3.load_state_dict_from_url', 'load_state_dict_from_url', (['model_urls[arch]'], {'progress': 'progress'}), '(model_urls[arch], progress=progress)\n', (2770, 2807), False, 'from torchvision.models.mobilenetv3 import _mobilenet_v3_conf, model_urls, load_state_dict_from_url, InvertedResidualConfig\n'), ((2599, 2625), 'torchvision.models.mobilenetv3.model_urls.get', 'model_urls.get', (['arch', 'None'], {}), '(arch, None)\n', (2613, 2625), False, 'from torchvision.models.mobilenetv3 import _mobilenet_v3_conf, model_urls, load_state_dict_from_url, InvertedResidualConfig\n'), ((2028, 2072), 'functools.partial', 'partial', (['hook', 'self.feature_buffer', 'fea_name'], {}), '(hook, self.feature_buffer, fea_name)\n', (2035, 2072), False, 'from functools import partial\n')] |
"""EMC's setup script."""
import sys
from setuptools import setup
# Give setuptools a hint to complain if it's too old a version
# 30.3.0 allows us to put most metadata in setup.cfg
# Should match pyproject.toml
# Not going to help us much without numpy or new pip, but gives us a shot
SETUP_REQUIRES = ["setuptools >= 40.8", "setuptools_scm", "setuptools_scm_git_archive"]
# This enables setuptools to install wheel on-the-fly
SETUP_REQUIRES += ['wheel'] if 'bdist_wheel' in sys.argv else []
if __name__ == '__main__':
setup(
name="eddymotion",
use_scm_version=True,
setup_requires=SETUP_REQUIRES,
)
| [
"setuptools.setup"
] | [((527, 604), 'setuptools.setup', 'setup', ([], {'name': '"""eddymotion"""', 'use_scm_version': '(True)', 'setup_requires': 'SETUP_REQUIRES'}), "(name='eddymotion', use_scm_version=True, setup_requires=SETUP_REQUIRES)\n", (532, 604), False, 'from setuptools import setup\n')] |
"""
Settings and configuration for Aasaanjobs Elasticsearch
Values will be read from the module specified by the AJELASTIC_SETTINGS_MODULE environment variable or
DJANGO_SETTINGS_MODULE if a Django project (in that order).
"""
import importlib
import os
import sys
from contextlib import contextmanager
import lazy_object_proxy
from ajelastic.exceptions import AJElasticSettingsError
ENVIRONMENT_VARIABLE = "AJELASTIC_SETTINGS_MODULE"
DJANGO_ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
def _setup():
"""
Load the settings module pointed to by the environment variable.
"""
settings_module = os.environ[ENVIRONMENT_VARIABLE] \
if os.environ.get(ENVIRONMENT_VARIABLE) else os.environ.get(DJANGO_ENVIRONMENT_VARIABLE)
if not settings_module:
raise AJElasticSettingsError(
"Settings are not configured. "
"You must define one of the environment variables {} or {}."
.format(ENVIRONMENT_VARIABLE, DJANGO_ENVIRONMENT_VARIABLE)
)
return Settings(settings_module)
@contextmanager
def cwd_in_path():
cwd = os.getcwd()
if cwd in sys.path:
yield
else:
sys.path.insert(0, cwd)
try:
yield cwd
finally:
try:
sys.path.remove(cwd)
except ValueError: # pragma: no cover
pass
class BaseSettings(object):
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
class EsIndices:
pass
class Settings(BaseSettings):
_mandatory_settings = (
"ES_HOST", "ES_ENV"
)
def __init__(self, settings_module):
with cwd_in_path():
mod = importlib.import_module(settings_module)
self.settings_module = settings_module
for setting in dir(mod):
if setting.isupper() and setting.startswith("ES_"):
setting_value = getattr(mod, setting)
setattr(self, setting, setting_value)
self.validate()
self.ES_INDICES = self.init_es_indices()
def validate(self):
for setting in self._mandatory_settings:
if not getattr(self, setting):
raise AJElasticSettingsError("The {} setting must not be empty.".format(setting))
self.validate_es_indices()
@staticmethod
def validate_es_index(key: str, setting: dict):
_mandatory_fields = ("name", "doc_type")
for _ in _mandatory_fields:
if not setting.get(_):
raise AJElasticSettingsError("The {}.{} setting must not be empty.".format(key, _))
allowed_fields = _mandatory_fields + ("data_functions", "mapping_path")
for _ in setting.keys():
if _ not in allowed_fields:
raise AJElasticSettingsError("Unknown {}.{} setting provided.".format(key, _))
def validate_es_indices(self):
if not hasattr(self, "ES_INDICES"):
return
if not isinstance(self.ES_INDICES, dict):
raise AJElasticSettingsError("The ES_INDICES setting must be a dict.")
for key, value in self.ES_INDICES.items():
self.validate_es_index(key, value)
def init_es_indices(self):
if not hasattr(self, "ES_INDICES"):
return None
indices = EsIndices()
for key, value in self.ES_INDICES.items():
from ..definition import ElasticIndex
setattr(indices, key, ElasticIndex.from_dict(self.ES_ENV, key, value))
return indices
def __repr__(self):
return "<AJElasticSettings {}".format(self.settings_module)
settings = lazy_object_proxy.Proxy(_setup)
| [
"sys.path.insert",
"importlib.import_module",
"os.environ.get",
"os.getcwd",
"ajelastic.exceptions.AJElasticSettingsError",
"sys.path.remove",
"lazy_object_proxy.Proxy"
] | [((3622, 3653), 'lazy_object_proxy.Proxy', 'lazy_object_proxy.Proxy', (['_setup'], {}), '(_setup)\n', (3645, 3653), False, 'import lazy_object_proxy\n'), ((1098, 1109), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1107, 1109), False, 'import os\n'), ((664, 700), 'os.environ.get', 'os.environ.get', (['ENVIRONMENT_VARIABLE'], {}), '(ENVIRONMENT_VARIABLE)\n', (678, 700), False, 'import os\n'), ((706, 749), 'os.environ.get', 'os.environ.get', (['DJANGO_ENVIRONMENT_VARIABLE'], {}), '(DJANGO_ENVIRONMENT_VARIABLE)\n', (720, 749), False, 'import os\n'), ((1166, 1189), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cwd'], {}), '(0, cwd)\n', (1181, 1189), False, 'import sys\n'), ((1694, 1734), 'importlib.import_module', 'importlib.import_module', (['settings_module'], {}), '(settings_module)\n', (1717, 1734), False, 'import importlib\n'), ((3016, 3080), 'ajelastic.exceptions.AJElasticSettingsError', 'AJElasticSettingsError', (['"""The ES_INDICES setting must be a dict."""'], {}), "('The ES_INDICES setting must be a dict.')\n", (3038, 3080), False, 'from ajelastic.exceptions import AJElasticSettingsError\n'), ((1275, 1295), 'sys.path.remove', 'sys.path.remove', (['cwd'], {}), '(cwd)\n', (1290, 1295), False, 'import sys\n')] |
from math import floor
from decimal import *
from time import time
# get primes the standard way (sieve)
# find the first ~500,000 primes in about 1sec
class Primes:
def __init__(self, n):
self.size = n + 1
self.primes = []
self.bs = [True] * self.size
self.bs[0] = False
self.bs[1] = False
for i in range(2, self.size):
if (self.bs[i]):
for j in range(i * i, self.size, i):
self.bs[j] = False
self.primes.append(i)
def isPrime(self, N):
if (N < self.size):
return self.bs[N]
for p in self.primes:
if (N % p == 0):
return False
return True
# get initial constant
def getF(p):
total = Decimal(0)
mult = Decimal(1)
for i in range(len(p.primes)):
prime = p.primes[i]
total += Decimal(prime - 1) / mult
mult *= Decimal(prime)
if i % 100 == 0:
print("Getting F:", round(100.0 * i / len(p.primes), 4), end="%\r")
print("Done!")
return total
# get next constant: floored = primes
def next(f):
return floor(f) * (f - floor(f) + 1)
# show time in minutes
def formatTime(t):
return round(t / 60, 4)
if __name__ == '__main__':
getcontext().prec = int(1e4) # number of decimal digits
startTime = time()
p = Primes(int(1e7)) # get primes(standard way)
primeTime = time()
f = getF(p) # get constant
fTime = time()
good = True
for i in range(len(p.primes)): # testing primes
prime = int(floor(f))
if prime != p.primes[i]:
good = False # failed
print("Correct:", 100.0 * i / len(p.primes), "%")
print(i, "primes found", len(p.primes), "total primes")
break
f = next(f)
compTime = time()
if good:
print("PASS ALL")
print("Gen Time:", formatTime(primeTime - startTime), "min")
print("Get F:", formatTime(fTime - primeTime), "min")
print("Compare Time", formatTime(compTime - fTime), "min")
# record:20,448 primes found using the constant (~hour)
# sieve record: 664,579 (~3 seconds)
| [
"time.time",
"math.floor"
] | [((1365, 1371), 'time.time', 'time', ([], {}), '()\n', (1369, 1371), False, 'from time import time\n'), ((1442, 1448), 'time.time', 'time', ([], {}), '()\n', (1446, 1448), False, 'from time import time\n'), ((1494, 1500), 'time.time', 'time', ([], {}), '()\n', (1498, 1500), False, 'from time import time\n'), ((1851, 1857), 'time.time', 'time', ([], {}), '()\n', (1855, 1857), False, 'from time import time\n'), ((1156, 1164), 'math.floor', 'floor', (['f'], {}), '(f)\n', (1161, 1164), False, 'from math import floor\n'), ((1590, 1598), 'math.floor', 'floor', (['f'], {}), '(f)\n', (1595, 1598), False, 'from math import floor\n'), ((1172, 1180), 'math.floor', 'floor', (['f'], {}), '(f)\n', (1177, 1180), False, 'from math import floor\n')] |
from domain.Problem.database.problem_repository import ProblemRepository
from domain.Problem.usecase.problem_interactor import ProblemInteracor
from infrastructure.database.postgres.sqlhandler import SqlHandler
class ProblemController:
def __init__(self, sqlhandler: SqlHandler, fastapi):
self.interactor = ProblemInteracor(ProblemRepository(sqlhandler))
self.HTTPException = fastapi.HTTPException
async def problems(self, contest_id: str):
problems = []
for problem in self.interactor.problems(contest_id):
problems.append(problem.as_json())
resp = {"problems": problems}
return resp
async def problem(self, contest_id: str, problem_id: str):
problem = self.interactor.problem(problem_id)
if problem is None:
raise self.HTTPException(
status_code=404, detail="problem not found"
)
res_data = problem.as_json()
resp = {"problem": res_data}
return resp
| [
"domain.Problem.database.problem_repository.ProblemRepository"
] | [((338, 367), 'domain.Problem.database.problem_repository.ProblemRepository', 'ProblemRepository', (['sqlhandler'], {}), '(sqlhandler)\n', (355, 367), False, 'from domain.Problem.database.problem_repository import ProblemRepository\n')] |
#!/usr/bin/env python3
"""
Finds CloudWatch metric alarms that are pulling from a non-existent source,
e.g. an alarm that's based on the size of a now-deleted queue.
This script doesn't delete the alarm, it just warns you that the alarm
is never going to change in its current configuration. You might want
to delete the alarm, or point it at a different source.
I've implemented checks for the Alarm sources that I use most often,
but CloudWatch Alarms can pull from other sources. The script should be
extensible to other sources/namespaces if that's useful to you.
From https://alexwlchan.net/2021/08/finding-misconfigured-or-dangling-cloudwatch-alarms/
"""
import functools
import boto3
from botocore.exceptions import ClientError
def get_metric_alarm_descriptions(sess):
resource = sess.resource("cloudwatch")
client = sess.client("cloudwatch")
for alarm in resource.alarms.all():
resp = client.describe_alarms(AlarmNames=[alarm.name])
yield from resp["MetricAlarms"]
# Note: often you'll have more than one alarm that reads from
# a given source (e.g. one alarm for table usage high and another
# for table usage low).
#
# There's no point doing repeated, successive checks for the existence
# of a table/queue/function/whatever, so cache the results here.
@functools.cache
def dynamodb_table_exists(sess, *, table_name):
"""
Returns True if this DynamoDB table exists, False otherwise.
"""
client = sess.client("dynamodb")
try:
client.describe_table(TableName=table_name)
return True
except ClientError as err: # pragma: no cover
if err.response["Error"]["Code"] == "ResourceNotFoundException":
return False
else:
raise
@functools.cache
def sqs_queue_exists(sess, *, queue_name):
"""
Returns True if this SQS queue exists, False otherwise.
"""
client = sess.client("sqs")
try:
client.get_queue_url(QueueName=queue_name)
return True
except ClientError as err: # pragma: no cover
if err.response["Error"]["Code"] == "AWS.SimpleQueueService.NonExistentQueue":
return False
else:
raise
@functools.cache
def lambda_function_exists(sess, *, function_name):
"""
Returns True if this Lambda function exists, False otherwise.
"""
client = sess.client("lambda")
try:
client.get_function(FunctionName=function_name)
return True
except ClientError as err: # pragma: no cover
if err.response["Error"]["Code"] == "ResourceNotFoundException":
return False
else:
raise
if __name__ == "__main__":
sess = boto3.Session()
for alarm in get_metric_alarm_descriptions(sess):
dimensions = {dim["Name"]: dim["Value"] for dim in alarm["Dimensions"]}
# Is this alarm based on a non-existent table?
if alarm.get("Namespace") == "AWS/DynamoDB":
table_name = dimensions["TableName"]
if not dynamodb_table_exists(sess, table_name=table_name):
print(
f"!!! Alarm {alarm['AlarmArn']} is based on non-existent table {table_name}"
)
continue
# Is this alarm based on a non-existent queue?
if alarm.get("Namespace") == "AWS/SQS":
queue_name = dimensions["QueueName"]
if not sqs_queue_exists(sess, queue_name=queue_name):
print(
f"!!! Alarm {alarm['AlarmArn']} is based on non-existent SQS queue {queue_name}"
)
continue
# Is this alarm based on a non-existent Lambda function?
if alarm.get("Namespace") == "AWS/Lambda":
function_name = dimensions["FunctionName"]
if not lambda_function_exists(sess, function_name=function_name):
print(
f"!!! Alarm {alarm['AlarmArn']} is based on non-existent Lambda function {function_name}"
)
continue
| [
"boto3.Session"
] | [((2693, 2708), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (2706, 2708), False, 'import boto3\n')] |
# coding=utf-8
from flask import Blueprint
from ..models import Permission
main = Blueprint('main',__name__)
from . import views, errors
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
| [
"flask.Blueprint"
] | [((82, 109), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (91, 109), False, 'from flask import Blueprint\n')] |
# Objective: download and cache data from SteamSpy
import json
import pathlib
import steamspypi
def download_steam_spy_data(json_filename="steamspy.json", genre=None):
# Data folder
data_path = "data/"
# Reference of the following line: https://stackoverflow.com/a/14364249
pathlib.Path(data_path).mkdir(parents=True, exist_ok=True)
data_filename = data_path + json_filename
try:
with open(data_filename, 'r', encoding="utf8") as in_json_file:
data = json.load(in_json_file)
except FileNotFoundError:
print("Downloading and caching data from SteamSpy")
if genre is None:
data = steamspypi.load()
else:
data_request = dict()
data_request['request'] = 'genre'
data_request['genre'] = genre
data = steamspypi.download(data_request)
steamspypi.print_data(data, data_filename)
return data
def get_appid_by_keyword(keyword):
import time
json_filename_suffixe = "_steamspy.json"
# Get current day as yyyymmdd format
date_format = "%Y%m%d"
current_date = time.strftime(date_format)
# Database filename
json_filename = current_date + json_filename_suffixe
# Download data which meta-data includes this keyword as genre
data_genre = download_steam_spy_data("genre_" + keyword + "_" + json_filename, keyword)
app_ids = set(data_genre.keys())
return app_ids
def get_appid_by_keyword_list_to_include(keyword_list):
app_ids = None # This variable will be initialized during the first iteration.
is_first_iteration = True
for keyword in keyword_list:
current_app_ids = get_appid_by_keyword(keyword)
if len(current_app_ids) == 0:
print("The keyword " + keyword + " does not return any appID.")
if is_first_iteration:
app_ids = current_app_ids
is_first_iteration = False
else:
# Intersection of appIDs so that the result are appIDs which correspond to every keyword
app_ids = app_ids.intersection(current_app_ids)
return app_ids
def get_appid_by_keyword_list_to_exclude(keyword_list):
app_ids = set() # This is the true initialization of this variable.
for keyword in keyword_list:
current_app_ids = get_appid_by_keyword(keyword)
if len(current_app_ids) == 0:
print("The keyword " + keyword + " does not return any appID.")
# Union of appIDs so that the result are appIDs which correspond to at least one keyword
app_ids = app_ids.union(current_app_ids)
return app_ids
if __name__ == "__main__":
steamspypi.load()
| [
"pathlib.Path",
"steamspypi.print_data",
"time.strftime",
"steamspypi.download",
"steamspypi.load",
"json.load"
] | [((1125, 1151), 'time.strftime', 'time.strftime', (['date_format'], {}), '(date_format)\n', (1138, 1151), False, 'import time\n'), ((2665, 2682), 'steamspypi.load', 'steamspypi.load', ([], {}), '()\n', (2680, 2682), False, 'import steamspypi\n'), ((294, 317), 'pathlib.Path', 'pathlib.Path', (['data_path'], {}), '(data_path)\n', (306, 317), False, 'import pathlib\n'), ((501, 524), 'json.load', 'json.load', (['in_json_file'], {}), '(in_json_file)\n', (510, 524), False, 'import json\n'), ((878, 920), 'steamspypi.print_data', 'steamspypi.print_data', (['data', 'data_filename'], {}), '(data, data_filename)\n', (899, 920), False, 'import steamspypi\n'), ((661, 678), 'steamspypi.load', 'steamspypi.load', ([], {}), '()\n', (676, 678), False, 'import steamspypi\n'), ((835, 868), 'steamspypi.download', 'steamspypi.download', (['data_request'], {}), '(data_request)\n', (854, 868), False, 'import steamspypi\n')] |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Main file for performing Metallic Integration steps with existing commcell .
This file has all the classes related to Metallic Integration Operations.
Metallic: Class for representing all the metallic integration steps
Metallic:
__init__(commcell_object) -- initialize the Metallic class
instance for the commcell
_metallic_commcell_object() -- returns the metallic commcell object
metallic_subscribe() -- linking on metallic side
_cv_metallic_subscribe() -- linking on commvault side
is_metallic_registered() -- returns boolean value
true - if metallic is subscribed for a user
false - if metallic is not subscribed for a user
metallic_completed_solutions() -- returns all the completed solutions on linked company of metalic
metallic_unsubscribe() -- unlinking on metallic side
_cv_metallic_unsubscibe() -- unlinking on commvault side
_get_eligible_metallic_commcells() -- gets the eligible metallic commcells for the logged in user
"""
from past.builtins import basestring
from .exception import SDKException
from .organization import Organization
from .security.user import User
class Metallic(object):
"""Class for representing Metallic related operations."""
def __init__(self, commcell_object):
"""Intializes object of the Metallic class.
Args:
commcell_object (object) -instance of the commcell class
Returns:
object - instance of the Metallic class
"""
self._commcell_object = commcell_object
self._update_response_ = self._commcell_object._update_response_
self._metallic_details = None
self._metallic_web_url = None
self._metallic_obj = None
def _metallic_commcell_object(self, cloud_webconsole_hostname, cloud_username, cloud_password):
"""Gets the metallic commcell object.
Args:
cloud_webconsole_hostname (str) -- hostname of the cloud
cloud_username (str) -- username of the cloud
cloud_password (str) -- password of the cloud
Raises:
SDKException:
if inputs are not valid
if failed to create the object
if response is empty
if response is not success
"""
if not (isinstance(cloud_webconsole_hostname, basestring) and
isinstance(cloud_username, basestring) and
isinstance(cloud_password, basestring)):
raise SDKException('Metallic', '101')
from cvpysdk.commcell import Commcell
metallic_cell = self._get_eligible_metallic_commcells(cloud_username, cloud_webconsole_hostname)
if (len(metallic_cell)) > 0:
cloud_webconsole_hostname = metallic_cell[0]
self._metallic_obj = Commcell(cloud_webconsole_hostname, cloud_username, cloud_password)
def metallic_subscribe(self, cloud_webconsole_hostname, cloud_username, cloud_password, msp_company_name=None):
"""Adds a new Monitoring Policy to the Commcell.
Args:
cloud_webconsole_hostname (str) -- hostname of the cloud
cloud_username (str) -- username of the cloud
cloud_password (str) -- password of the cloud
msp_company_name (str or object) -- name of the company or company object
default: None
Raises:
SDKException:
if metallic is already subscribed
if inputs are not valid
if failed to subscribe to metallic
if response is empty
if response is not success
"""
if not (isinstance(cloud_webconsole_hostname, basestring) and
isinstance(cloud_username, basestring) and
isinstance(cloud_password, basestring)):
raise SDKException('Metallic', '101')
if msp_company_name and not (isinstance(msp_company_name, basestring)):
raise SDKException('Metallic', '101')
self._metallic_commcell_object(cloud_webconsole_hostname, cloud_username, cloud_password)
if msp_company_name and not isinstance(msp_company_name, Organization):
msp_company_name = msp_company_name.lower()
msp_company_obj = self._commcell_object.organizations.get(msp_company_name)
request = {
"thirdpartyAppReq": {
"opType": 1,
"clientThirdPartyApps": [
{
"isCloudApp": False,
"appName": self._commcell_object.commserv_guid,
"appDisplayName": self._commcell_object.commserv_name,
"flags": 0,
"isCloudServiceSubscription": True,
"appType": 3,
"isEnabled": True,
"props": {
"nameValues": [
{
"name": "RedirectUrl",
"value": self._commcell_object.commserv_metadata['commserv_redirect_url']
},
{
"name": "SP Certificate Data",
"value": self._commcell_object.commserv_metadata['commserv_certificate']
},
{
"name": "CommcellId",
"value": str(self._commcell_object.commcell_id)
},
{
"name": "Enable Sso Redirect",
"value": "1"
}
]
}
}
]
}
}
if msp_company_name:
test_dict = {
'subscriberCompany': {
'GUID': self._commcell_object.organizations.all_organizations_props[msp_company_name]['GUID'],
'providerDomainName': msp_company_obj.organization_name
}
}
request.update(test_dict)
flag, response = self._metallic_obj._cvpysdk_object.make_request(
'POST', self._metallic_obj._services['METALLIC_LINKING'], request)
if flag:
if response.json():
error_code = response.json()['error']['errorCode']
self._metallic_details = {}
if (error_code == 2 or error_code == 0) and 'cloudServiceDetails' in response.json():
self._metallic_details = response.json()['cloudServiceDetails']
if error_code < 0:
error_string = response.json()['errorMessage']
raise SDKException(
'Metallic',
'102',
'Failed to create TPA\nError: "{0}"'.format(
error_string
)
)
else:
raise SDKException('Response', '102')
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
if msp_company_name:
self._cv_metallic_subscribe(msp_company_name)
else:
self._cv_metallic_subscribe()
def _cv_metallic_subscribe(self, msp_company_name=None):
"""Subscribing on on-prim or msp side.
Args:
msp_company_name (str) -- name of the company or company object
default: None
Raises:
SDKException:
if inputs are not valid
if failed to subscribe on on-prim or msp side
if response is empty
if response is not success
"""
if msp_company_name and not (isinstance(msp_company_name, basestring)):
raise SDKException('Metallic', '101')
if msp_company_name and not isinstance(msp_company_name, Organization):
msp_company_obj = self._commcell_object.organizations.get(msp_company_name)
request = {
"opType": 3,
"cloudServiceDetails": self._metallic_details
}
if msp_company_name:
request['subscriberCompany'] = {'providerId': int(msp_company_obj.organization_id)}
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', self._commcell_object._services['CV_METALLIC_LINKING'], request)
if flag:
if response and response.json():
error_code = response.json().get('error', {}).get('errorCode')
if error_code != 0:
error_string = response.json()['error']['errorString']
raise SDKException(
'Metallic',
'102',
'Failed to update linking details on onprim/msp: "{0}"'.format(
error_string
)
)
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def is_metallic_registered(self):
"""This function says whether metallic is registered for a user or not.
Args:
username (str) -- name of the user to which we need to check if metallic is registered
Returns:
Boolean -- True if metallic is returned in response
False if metallic is not returned in response
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', self._commcell_object._services['METALLIC_REGISTERED']
)
if flag:
if response.json() and 'cloudServices' in response.json():
if response.json().get('cloudServices', [])[0].get('cloudService', {}).get('redirectUrl', {}):
self._metallic_web_url = \
response.json().get('cloudServices', [])[0].get('cloudService', {}).get('redirectUrl', {})
return True
else:
return False
else:
raise SDKException('Response', '102')
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def metallic_completed_solutions(self):
"""This function returns the completed solutions for metallic.
Returns:
dict of completed solutions
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', self._commcell_object._services['METALLIC_COMPLETED_SETUPS']
)
if flag:
if response.json() and 'completedSetupsDetails' in response.json():
completed_solns = response.json()['completedSetupsDetails'][0]['completedSetups']
return completed_solns
else:
raise SDKException('Metallic', '102', 'No metallic solutions are configured')
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def metallic_unsubscribe(self):
"""This function is for unsubscribing metallic
Raises:
SDKException:
if failed to unsubcribe on metallic
if response is empty
if response is not success
"""
saml_token_for_user = self._commcell_object.get_saml_token()
user_obj = User(self._commcell_object, self._commcell_object.commcell_username)
company_name = user_obj.user_company_name
request = {
"cloudServiceDetails": {
"cloudService": {
"redirectUrl": self._metallic_web_url if self.is_metallic_registered() else None,
"appName": self._commcell_object.commserv_guid
}
}
}
if company_name:
test_dict = {
'subscriberCompany': {
'GUID': self._commcell_object.organizations.all_organizations_props[company_name]['GUID'],
'providerId': self._commcell_object.organizations.all_organizations[company_name],
'providerDomainName': company_name
}
}
request.update(test_dict)
url1 = self._metallic_web_url + "/api/CloudService/Unsubscribe"
flag, response = self._commcell_object._cvpysdk_object.make_request(
method='POST',
url=url1,
payload=request,
headers={'Authtoken': saml_token_for_user,
'Accept': 'application/json'}
)
if flag:
if response.json() and 'cloudServiceDetails' in response.json():
error_code = response.json()['error']['errorCode']
error_message = response.json()['error']['errorString']
if not error_code == 0:
raise SDKException('Metallic', '102', error_message)
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
self._cv_metallic_unsubscribe(user_obj)
def _cv_metallic_unsubscribe(self, user):
"""This function says whether metallic is registered for a user or not.
Args:
user (str or object) -- username or user object who has rights to unsubscribe on on-prim or msp side
Returns:
Boolean -- True if metallic is returned in response
False if metallic is not returned in response
Raises:
SDKException:
if failed to unsubscribe on on-prim or msp side
if response is empty
if response is not success
"""
if not isinstance(user, User):
user = self._commcell_object.users.get(self._commcell_object.commcell_username)
company_name = user.user_company_name
request = {
"opType": 4,
"cloudServiceDetails": {
"cloudService": {
"redirectUrl": self._metallic_web_url if self.is_metallic_registered() else None
}
}
}
if company_name:
request['subscriberCompany'] = \
{'providerId': self._commcell_object.organizations.all_organizations[company_name]}
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', self._commcell_object._services['CV_METALLIC_LINKING'], request)
if flag:
if response.json():
error_code = response.json().get('error', {}).get('errorCode')
error_message = response.json().get('error', {}).get('errorString')
if not error_code == 0:
raise SDKException('Metallic', '102', error_message)
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def _get_eligible_metallic_commcells(self, login_name_or_email=None, cloud_webconsole_hostname=None):
"""
Gets the redirect metallic commcells based on login_name or email provided
Args:
login_name_or_email (str) -- Login name or email of the user
default: current logged in user
cloud_webconsole_hostname (str) -- cloud webconsole hostname
default: None
Raises:
if the response is empty
if there is no response
Returns:
list_of_metallic_commcells (list) -- list of metallic commcells
"""
login_name_or_email = login_name_or_email.lower()
url1 = r'http://{0}/webconsole/api/CloudService/Routing?username={1}'.format(
cloud_webconsole_hostname, login_name_or_email)
flag, response = self._commcell_object._cvpysdk_object.make_request(method='GET', url=url1)
if flag:
if response.json() and 'cloudServiceCommcells' in response.json():
cloud_commcell_list = []
for ser_comm in response.json()['cloudServiceCommcells']:
cloud_commcell_list.append(ser_comm['url'])
return cloud_commcell_list
else:
return []
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
@property
def cloud_hostname(self):
""" Returns cloudhostname"""
return self._cloud_hostname
@cloud_hostname.setter
def cloud_hostname(self, value):
""" Sets cloud hostname """
self._cloud_hostname = value | [
"cvpysdk.commcell.Commcell"
] | [((3827, 3894), 'cvpysdk.commcell.Commcell', 'Commcell', (['cloud_webconsole_hostname', 'cloud_username', 'cloud_password'], {}), '(cloud_webconsole_hostname, cloud_username, cloud_password)\n', (3835, 3894), False, 'from cvpysdk.commcell import Commcell\n')] |
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os.path import join
from fabric.context_managers import cd
from fabric.decorators import task
from fabric.operations import local
from fabric.tasks import execute
from . import dist
from . import req
from . import run
from . import src
from ._settings import PROJECT_DIR
@task
def clean():
"""Полная очистка от рабочих файлов."""
execute(dist.clean)
execute(src.clean)
with cd(PROJECT_DIR):
for path in ('.eggs', 'dist'):
path = join(PROJECT_DIR, path)
local('rm -f -r -d "{}"'.format(path))
local('git gc --quiet')
local('rm -f -r -d "{}"'.format('.tox'))
local('rm -f .coverage')
| [
"fabric.operations.local",
"fabric.tasks.execute",
"os.path.join",
"fabric.context_managers.cd"
] | [((481, 500), 'fabric.tasks.execute', 'execute', (['dist.clean'], {}), '(dist.clean)\n', (488, 500), False, 'from fabric.tasks import execute\n'), ((505, 523), 'fabric.tasks.execute', 'execute', (['src.clean'], {}), '(src.clean)\n', (512, 523), False, 'from fabric.tasks import execute\n'), ((534, 549), 'fabric.context_managers.cd', 'cd', (['PROJECT_DIR'], {}), '(PROJECT_DIR)\n', (536, 549), False, 'from fabric.context_managers import cd\n'), ((693, 716), 'fabric.operations.local', 'local', (['"""git gc --quiet"""'], {}), "('git gc --quiet')\n", (698, 716), False, 'from fabric.operations import local\n'), ((775, 799), 'fabric.operations.local', 'local', (['"""rm -f .coverage"""'], {}), "('rm -f .coverage')\n", (780, 799), False, 'from fabric.operations import local\n'), ((609, 632), 'os.path.join', 'join', (['PROJECT_DIR', 'path'], {}), '(PROJECT_DIR, path)\n', (613, 632), False, 'from os.path import join\n')] |
from graph import Graph
class Polygon:
def __init__(self, points):
self.points = points
def area(self):
""" Calculating area via Shoelace Algorithm """
ans = 0
n = len(self.points)
for i in xrange(n):
curr_point = self.points[i]
next_point = self.points[(i+1)%n]
ans += (curr_point.x * next_point.y)
ans -= (curr_point.y * next_point.x)
ans /= 2
return ans
def size(self):
return len(self.points)
def isConvex(self):
return all([self.isConvexPoint(i) for i in xrange(self.size())])
def isVertex(self, p):
return p in self.points
def isConvexPoint(self, i):
n = self.size()
i %= n
prev_point = self.points[(i-1)%n]
curr_point = self.points[i]
next_point = self.points[(i+1)%n]
p = Polygon([prev_point, curr_point, next_point])
return p.area() > 0
def isEar(self, i):
n = self.size()
i %= n
prev_point = self.points[(i-1)%n]
curr_point = self.points[i]
next_point = self.points[(i+1)%n]
p = Polygon([prev_point, curr_point, next_point])
f = p.area() > 0
for v in xrange(n):
if v == i or v == (i+1)%n or v == (i-1)%n:
continue
f = f and (not p.isInTriangle(self.points[v]))
return f
def isTriangle(self):
return self.size() == 3
def isInTriangle(self, p):
if not self.isTriangle(): return False
d1 = Polygon([p, self.points[0], self.points[1]]).area() < 0
d2 = Polygon([p, self.points[1], self.points[2]]).area() < 0
d3 = Polygon([p, self.points[2], self.points[0]]).area() < 0
return (d1 == d2) and (d2 == d3)
def triangulatedGraph(self):
if self.size() < 3: return False
g = Graph()
org_idx = {}
poly = Polygon(self.points[:])
for i in xrange(poly.size()):
org_idx[poly.points[i]] = i
while poly.size() >= 3:
for i in xrange(poly.size()):
if not poly.isEar(i): continue
n = poly.size()
# pprev_point = poly.points[(i-2)%n]
prev_point = poly.points[(i-1)%n]
curr_point = poly.points[i]
next_point = poly.points[(i+1)%n]
# nnext_point = poly.points[(i+2)%n]
g.add_edge((org_idx[prev_point], org_idx[curr_point]))
g.add_edge((org_idx[curr_point], org_idx[next_point]))
g.add_edge((org_idx[next_point], org_idx[prev_point]))
poly.points.remove(curr_point)
# if prev_point.collinear(pprev_point, next_point):
# poly.points.remove(prev_point)
# if next_point.collinear(nnext_point, prev_point):
# poly.points.remove(next_point)
break
return g
def artGallery(self):
g = self.triangulatedGraph()
c = g.threeColourize()
cs = dict([(i, c.count(i)) for i in set(c)])
color = min(cs, key=cs.get)
p = []
for i in xrange(self.size()):
if c[i] == color:
p.append(self.points[i])
return p
def __str__(self):
s = "Polygon["
n = self.size()
for i in xrange(n-1):
s += "%s, " % self.points[i]
if n: s += "%s]" % self.points[n-1]
return s
| [
"graph.Graph"
] | [((1593, 1600), 'graph.Graph', 'Graph', ([], {}), '()\n', (1598, 1600), False, 'from graph import Graph\n')] |
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import render
from aurora_application.models import IMG
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import confusion_matrix
import h5py
import numpy as np
import pandas as pd
import numpy.random as rand
# import example_feat_extract
# efe = example_feat_extract
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
#
# import argparse
# import numpy as np
# import time
# from datetime import datetime
# import os
# # import sys
# te/aurora_application/feature_extractor/feature_extractor.py"
# sys.path.append("/Users/xcliang/Documents/GitHub/project-21-aurora_borealis_classification/website/aurora_application/feature_extractor")
# extractor =
# import utils
# from feature_extractor.feature_extractor import FeatureExtractor
# Create your views here.
def index(request):
# template = loader.get_template('templates/aurora_main.html')
return render(request, 'aurora_main.html')
def home(request):
return render(request, 'aurora_main.html')
def contact(request):
return render(request, "contact.html")
def upload(request):
# if request.method == 'POST':
# img = IMG(img_url=request.FILES.get('img'))
# img.save()
return render(request, 'upload.html')
# def show(request):
# new_img = IMG(img=request.FILES.get('img'))
# new_img.save()
# content = {
# 'aaa': new_img,
# }
# return render(request, 'show.html', content)
def show(request):
new_img = IMG(img=request.FILES.get('img'))
new_img.save()
# efe.get_features()
y_pred = train()
category = get_category(y_pred)
content = {
'category': category,
}
return render(request, 'show.html', content)
def train():
base_dir = "/Users/xcliang/Documents/Github/project-21-aurora_borealis_classification/oath_v1.1/"
# read classifications
df = pd.read_csv(base_dir + "classifications/classifications.csv", skiprows=18)
ndata = len(df["picNum"])
f = h5py.File(base_dir + "features/auroral_feat.h5", "r")
features = f["Logits"].value
f.close()
alpha = 0.03
avgscore = np.zeros(5)
idxs = np.loadtxt(base_dir + "classifications/train_test_split.csv", delimiter=",").astype(int)
cnt = 0
for idx in idxs:
ntrain = int(np.round(0.7 * ndata))
idx_train = idx[0:ntrain]
idx_test = idx[ntrain:]
# use 'class2' here to train machine on two classes
# only, aurora and non-aurora (instead of 'class6')
X_train = features[idx_train, :]
y_train = df["class6"][idx_train]
X_test = features[idx_test, :]
y_test = df["class6"][idx_test]
clf = RidgeClassifier(random_state=10 * cnt, normalize=False, alpha=alpha)
clf.fit(X_train, y_train)
avgscore[cnt] = clf.score(X_test, y_test)
cnt += 1
y_pred = clf.predict(X_test[:10])
return(y_pred[3])
# data_dir = "/Users/xcliang/Documents/GitHub/project-21-aurora_borealis_classification/website/media/"
# trained_classifier = clf
#
# f = h5py.File(data_dir + "features/auroral_feat.h5", "r")
# features_test = f["Logits"].value
# f.close()
#
# pred = trained_classifier.predict(features_test)
# print(pred)
def get_category(y_pred):
d = {1 : 'Arc',
2 : 'Diffuse',
3 : 'Discrete',
4 : 'Cloudy',
5 : 'Moon',
6 : 'Clear /Noaurora'}
category = d[y_pred]
return(category)
| [
"django.shortcuts.render",
"sklearn.linear_model.RidgeClassifier",
"pandas.read_csv",
"h5py.File",
"numpy.zeros",
"numpy.loadtxt",
"numpy.round"
] | [((1048, 1083), 'django.shortcuts.render', 'render', (['request', '"""aurora_main.html"""'], {}), "(request, 'aurora_main.html')\n", (1054, 1083), False, 'from django.shortcuts import render\n'), ((1115, 1150), 'django.shortcuts.render', 'render', (['request', '"""aurora_main.html"""'], {}), "(request, 'aurora_main.html')\n", (1121, 1150), False, 'from django.shortcuts import render\n'), ((1185, 1216), 'django.shortcuts.render', 'render', (['request', '"""contact.html"""'], {}), "(request, 'contact.html')\n", (1191, 1216), False, 'from django.shortcuts import render\n'), ((1360, 1390), 'django.shortcuts.render', 'render', (['request', '"""upload.html"""'], {}), "(request, 'upload.html')\n", (1366, 1390), False, 'from django.shortcuts import render\n'), ((1827, 1864), 'django.shortcuts.render', 'render', (['request', '"""show.html"""', 'content'], {}), "(request, 'show.html', content)\n", (1833, 1864), False, 'from django.shortcuts import render\n'), ((2018, 2092), 'pandas.read_csv', 'pd.read_csv', (["(base_dir + 'classifications/classifications.csv')"], {'skiprows': '(18)'}), "(base_dir + 'classifications/classifications.csv', skiprows=18)\n", (2029, 2092), True, 'import pandas as pd\n'), ((2132, 2185), 'h5py.File', 'h5py.File', (["(base_dir + 'features/auroral_feat.h5')", '"""r"""'], {}), "(base_dir + 'features/auroral_feat.h5', 'r')\n", (2141, 2185), False, 'import h5py\n'), ((2266, 2277), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2274, 2277), True, 'import numpy as np\n'), ((2820, 2888), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {'random_state': '(10 * cnt)', 'normalize': '(False)', 'alpha': 'alpha'}), '(random_state=10 * cnt, normalize=False, alpha=alpha)\n', (2835, 2888), False, 'from sklearn.linear_model import RidgeClassifier\n'), ((2290, 2366), 'numpy.loadtxt', 'np.loadtxt', (["(base_dir + 'classifications/train_test_split.csv')"], {'delimiter': '""","""'}), "(base_dir + 'classifications/train_test_split.csv', delimiter=',')\n", (2300, 2366), True, 'import numpy as np\n'), ((2433, 2454), 'numpy.round', 'np.round', (['(0.7 * ndata)'], {}), '(0.7 * ndata)\n', (2441, 2454), True, 'import numpy as np\n')] |
import socket
import predictor
import os, sys
service = ('localhost', 4444)
data=''
while True:
try:
if __name__ == "__main__":
print('loading model')
# load model config
# load model entity
pdt = predictor.Predictor(sys.argv[1])
# load infer batch(tied with cpu)i
# create a socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind
#sock.settimeout(15)
sock.bind(service)
# socket, start to listen
sock.listen(64)
while True:
# receieve data#print('reading data')
print('listening')
con, meat = sock.accept()
data=''
input_texts = []
while True:
buff=con.recv(4096)
if buff:
#print('\n',buff[-4:], '\n')
if buff[-4:] == b'\x02\x02\x02\x02':
data+=buff[:-4].decode("utf-8", "ignore")
break
data+=buff.decode("utf-8", "ignore")
#con.sendall(buff)
else:
break
tag=False
data=data.split('\n')
if len(data) < 0:
con.close()
continue
elif len(data) == 1:
data = [data[0], data[0]]
tag=True
for line in data:
input_texts.append(line.strip("\n"))
print('predicting')
predict_label_namez=pdt.predict_batch(input_texts)
if tag:
con.sendall([predict_label_namez[0]].__str__().encode('utf-8'))
else:
con.sendall(predict_label_namez.__str__().encode('utf-8'))
con.close()
except Exception as E:
print('socket server:', data, E, 'restarting')
| [
"predictor.Predictor",
"socket.socket"
] | [((263, 295), 'predictor.Predictor', 'predictor.Predictor', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (282, 295), False, 'import predictor\n'), ((392, 441), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (405, 441), False, 'import socket\n')] |
from unittest import TestCase
from Family_Tree.person import Person
from Family_Tree.family_tree import FamilyTree
from geektrust import Geektrust
from Family_Tree import constants
class TestIntegration(TestCase):
def setUp(self):
self.geektrust_app = Geektrust()
def test_invalid_input_throws_exception(self):
# check if exception is thrown for invalid input
self.assertRaises(ValueError, self.geektrust_app.run, './tests/test_files/test_translate_exception.txt')
def test_run_for_add_member(self):
# check if run adds member from input file
added_person = self.geektrust_app.get_names(self.geektrust_app.run('./tests/test_files/test_add_member.txt'))
self.assertEqual(added_person, ['TEST'])
# check if adding person with same name throws exception
self.assertRaises(ValueError, self.geektrust_app.run, './tests/test_files/test_add_member.txt')
def test_run_for_add_spouse(self):
# check if run adds spouse from input file
results = self.geektrust_app.run('./tests/test_files/test_add_spouse.txt')
self.assertEqual(results[1:], ["SPOUSE_ADDITION_FAILED", "SPOUSE_ADDITION_FAILED", "SPOUSE_ADDITION_SUCCEEDED"])
def test_run_for_add_child(self):
# check if run adds child from input file
results = self.geektrust_app.run('./tests/test_files/test_add_child.txt')
self.assertEqual(results[3:], ['PERSON_NOT_FOUND', 'CHILD_ADDITION_FAILED', 'CHILD_ADDITION_FAILED', 'CHILD_ADDITION_FAILED', 'CHILD_ADDITION_SUCCEEDED'])
def test_run_for_get_relationships(self):
# set up family tree
self.geektrust_app.run('./initiation.txt')
# check for all relationship types
results = self.geektrust_app.run('./tests/test_files/test_get_relationships.txt')
formatted_results = self.geektrust_app.format_result(results)
self.assertEqual(formatted_results, ['Vyas', 'Ahit', 'Satya', 'Vyas', 'Krpi', '<NAME>', 'Ahit', '<NAME>', 'PERSON_NOT_FOUND'])
| [
"geektrust.Geektrust"
] | [((277, 288), 'geektrust.Geektrust', 'Geektrust', ([], {}), '()\n', (286, 288), False, 'from geektrust import Geektrust\n')] |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
from ggrc.app import app
from ggrc.login import login_required
from flask import render_template
from ggrc import settings
def init_mockup_views():
# Do not load mockup views in production
if settings.PRODUCTION:
return
@app.route("/mockups/request")
@login_required
def mockup_request():
return render_template("mockups/request.haml")
@app.route("/mockups/assessor")
@login_required
def mockup_assessor():
return render_template("mockups/assessor.haml")
| [
"flask.render_template",
"ggrc.app.app.route"
] | [((441, 470), 'ggrc.app.app.route', 'app.route', (['"""/mockups/request"""'], {}), "('/mockups/request')\n", (450, 470), False, 'from ggrc.app import app\n'), ((568, 598), 'ggrc.app.app.route', 'app.route', (['"""/mockups/assessor"""'], {}), "('/mockups/assessor')\n", (577, 598), False, 'from ggrc.app import app\n'), ((524, 563), 'flask.render_template', 'render_template', (['"""mockups/request.haml"""'], {}), "('mockups/request.haml')\n", (539, 563), False, 'from flask import render_template\n'), ((653, 693), 'flask.render_template', 'render_template', (['"""mockups/assessor.haml"""'], {}), "('mockups/assessor.haml')\n", (668, 693), False, 'from flask import render_template\n')] |
from pathlib import Path
import json
def check_json(result):
assert(result)
assert( 'frameStats' in result )
assert( 'movie' in result )
assert( 'contents' in result )
def check_json_file(path):
p = Path(path)
assert( p.is_file() )
with open( p ) as f:
result = json.load(f)
check_json(result)
| [
"json.load",
"pathlib.Path"
] | [((226, 236), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (230, 236), False, 'from pathlib import Path\n'), ((307, 319), 'json.load', 'json.load', (['f'], {}), '(f)\n', (316, 319), False, 'import json\n')] |
from flask import Flask, requests # import the request object and the Flask class from flask
app = Flask(__name__) #app is now a flask application
@app.route('/', methods=['GET','POST'])
def login():
if request.method == 'POST':
return 'FORM POSTED'
else:
return 'Login Page Loaded'
'''
By default the route decorator only response to GET http requests.
So whether you specify the methods arguement or not to be Get it will still work.
However, to post handle a post request, you must explicitly tell the application
by defining the methods argument of the route decorator and adding Post to it.
''' | [
"flask.Flask"
] | [((100, 115), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (105, 115), False, 'from flask import Flask, requests\n')] |
from datetime import date
from typing import Dict, List, Union
from belvo.resources.base import Resource
class TaxReturns(Resource):
endpoint = "/api/tax-returns/"
def create(
self,
link: str,
year_from: str,
year_to: str,
*,
attach_pdf: bool = False,
encryption_key: str = None,
save_data: bool = True,
raise_exception: bool = False,
**kwargs: Dict,
) -> Union[List[Dict], Dict]:
year_to = year_to or str(date.today().year)
data = {
"link": link,
"year_from": year_from,
"year_to": year_to,
"attach_pdf": attach_pdf,
"save_data": save_data,
}
if encryption_key:
data.update(encryption_key=encryption_key)
return self.session.post(
self.endpoint, data=data, raise_exception=raise_exception, **kwargs
)
def resume(
self,
session: str,
token: str,
*,
link: str = None,
raise_exception: bool = False,
**kwargs: Dict,
) -> Dict:
raise NotImplementedError()
| [
"datetime.date.today"
] | [((511, 523), 'datetime.date.today', 'date.today', ([], {}), '()\n', (521, 523), False, 'from datetime import date\n')] |
import os
APPNAME: str ="pilot"
DEBUG: str | None =os.getenv("DEBUG")
TEST: str | None =os.getenv("TEST") | [
"os.getenv"
] | [((51, 69), 'os.getenv', 'os.getenv', (['"""DEBUG"""'], {}), "('DEBUG')\n", (60, 69), False, 'import os\n'), ((88, 105), 'os.getenv', 'os.getenv', (['"""TEST"""'], {}), "('TEST')\n", (97, 105), False, 'import os\n')] |
import sys
sys.path.append('..')
from prep_data import read_in_presplit_data
from math import isnan, inf
base_data_filename = '../data/binary_'
percentile_of_perplexities_to_keep = 85
percentile_of_perplexities_to_keep = percentile_of_perplexities_to_keep / 100
train_file = base_data_filename + 'train-withperplexities.csv'
dev_file = base_data_filename + 'dev-withperplexities.csv'
test_file = base_data_filename + 'test-withperplexities.csv'
label_key_filename = base_data_filename + 'classes.txt'
train_df, dev_df, test_df, num_labels = read_in_presplit_data(train_file, dev_file, test_file, label_key_filename)
full_list_of_perplexities = train_df['perplexity'].tolist() + \
dev_df['perplexity'].tolist() + \
test_df['perplexity'].tolist()
full_list_of_perplexities = [float(val) for val in full_list_of_perplexities]
full_list_of_perplexities = sorted(full_list_of_perplexities, key=lambda x: inf if isnan(x) else x)
num_nans = 0
for val in full_list_of_perplexities:
if isnan(val):
num_nans += 1
print('Num NaNs: ' + str(num_nans))
print('Quick check that sorting worked:')
print('\tIndices:')
print('\t' + str(int(len(full_list_of_perplexities) * 0)))
print('\t' + str(int(len(full_list_of_perplexities) * .25)))
print('\t' + str(int(len(full_list_of_perplexities) * .5)))
print('\t' + str(int(len(full_list_of_perplexities) * .75)))
print('\t' + str(int(len(full_list_of_perplexities) * 1) - 1))
print()
print('\tValues:')
print('\t' + str(full_list_of_perplexities[int(len(full_list_of_perplexities) * 0)]))
print('\t' + str(full_list_of_perplexities[int(len(full_list_of_perplexities) * .25)]))
print('\t' + str(full_list_of_perplexities[int(len(full_list_of_perplexities) * .5)]))
print('\t' + str(full_list_of_perplexities[int(len(full_list_of_perplexities) * .75)]))
print('\t' + str(full_list_of_perplexities[int(len(full_list_of_perplexities) * 1) - 1]))
print()
for i, val in enumerate(full_list_of_perplexities[:-1]):
if not isnan(val) and not isnan(full_list_of_perplexities[i + 1]):
assert val <= full_list_of_perplexities[i + 1], str(i) + ', ' + str(val) + ', ' + \
str(full_list_of_perplexities[i + 1])
cutoff_ind = int(percentile_of_perplexities_to_keep * len(full_list_of_perplexities))
print('Cutoff ind is ' + str(cutoff_ind) + ' out of ' + str(len(full_list_of_perplexities)))
print(full_list_of_perplexities[cutoff_ind])
print(full_list_of_perplexities[cutoff_ind + 1])
cutoff_val = (full_list_of_perplexities[cutoff_ind] + full_list_of_perplexities[cutoff_ind + 1]) / 2
print('Cutoff perplexity val (must be below this) is ' + str(cutoff_val))
num_perplexities_below = 0
num_perplexities_above = 0
for perplexity in full_list_of_perplexities:
if perplexity < cutoff_val:
num_perplexities_below += 1
else:
num_perplexities_above += 1
print('There are ' + str(num_perplexities_below) + ' perplexities below cutoff val and ' +
str(num_perplexities_above) + ' perplexities above it')
def subset_df(df):
return df[df['perplexity'] < cutoff_val]
train_df = subset_df(train_df)
dev_df = subset_df(dev_df)
test_df = subset_df(test_df)
def get_new_filename(old_filename):
num_tag = str(percentile_of_perplexities_to_keep)
if percentile_of_perplexities_to_keep % 1 == 1:
num_tag = '100'
else:
num_tag = num_tag[num_tag.index('.') + 1:]
if len(num_tag) == 1:
num_tag += '0'
return old_filename[:old_filename.rfind('.')] + '-' + num_tag + 'percentile' + \
old_filename[old_filename.rfind('.'):]
train_df.to_csv(get_new_filename(train_file), index=False)
dev_df.to_csv(get_new_filename(dev_file), index=False)
test_df.to_csv(get_new_filename(test_file), index=False)
print('Wrote subset data csv files.')
| [
"sys.path.append",
"prep_data.read_in_presplit_data",
"math.isnan"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((545, 619), 'prep_data.read_in_presplit_data', 'read_in_presplit_data', (['train_file', 'dev_file', 'test_file', 'label_key_filename'], {}), '(train_file, dev_file, test_file, label_key_filename)\n', (566, 619), False, 'from prep_data import read_in_presplit_data\n'), ((1043, 1053), 'math.isnan', 'isnan', (['val'], {}), '(val)\n', (1048, 1053), False, 'from math import isnan, inf\n'), ((2022, 2032), 'math.isnan', 'isnan', (['val'], {}), '(val)\n', (2027, 2032), False, 'from math import isnan, inf\n'), ((2041, 2080), 'math.isnan', 'isnan', (['full_list_of_perplexities[i + 1]'], {}), '(full_list_of_perplexities[i + 1])\n', (2046, 2080), False, 'from math import isnan, inf\n'), ((968, 976), 'math.isnan', 'isnan', (['x'], {}), '(x)\n', (973, 976), False, 'from math import isnan, inf\n')] |
#!/usr/bin/env python
# MIT License
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
import sys
import argparse
parser = argparse.ArgumentParser(description='Write installed package list from dpkg to the JSON format')
parser.add_argument('--output-path', required=True,
help='what filepath to store the created JSON in')
args = vars(parser.parse_args())
lines = os.popen('dpkg -l | grep "^ii"').read().split('\n')
i = 0
while len([l for l in lines[i].split(' ') if l]) != 5:
i += 1
offsets = [lines[i].index(l) for l in lines[i].split(' ') if len(l)]
pkgs = {}
for line in lines:
parsed = []
for i in range(len(offsets)):
if len(offsets) == i + 1:
parsed.append(line[offsets[i]:].strip())
else:
parsed.append(line[offsets[i]:offsets[i + 1]].strip())
if len(parsed[1]) > 0:
pkgs.update({parsed[1]:{'State':parsed[0], 'Version':parsed[2], 'Architecture':parsed[3],'Description':parsed[4]}})
text_file = open(args["output_path"], "w")
text_file.write("%s" % json.dumps(pkgs))
text_file.close()
| [
"os.popen",
"json.dumps",
"argparse.ArgumentParser"
] | [((1180, 1281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Write installed package list from dpkg to the JSON format"""'}), "(description=\n 'Write installed package list from dpkg to the JSON format')\n", (1203, 1281), False, 'import argparse\n'), ((2103, 2119), 'json.dumps', 'json.dumps', (['pkgs'], {}), '(pkgs)\n', (2113, 2119), False, 'import json\n'), ((1443, 1475), 'os.popen', 'os.popen', (['"""dpkg -l | grep "^ii\\""""'], {}), '(\'dpkg -l | grep "^ii"\')\n', (1451, 1475), False, 'import os\n')] |
import numpy as np
import tagnews
class Test_GeoCoder():
@classmethod
def setup_method(cls):
cls.model = tagnews.GeoCoder()
def test_extract_geostrings(self):
self.model.extract_geostrings(
('This is example article text with a location of'
' 55th and Woodlawn where something happened.')
)
def test_extract_geostring_probs(self):
article = ('This is example article text with a location of'
' 55th and Woodlawn where something happened.')
words, probs = self.model.extract_geostring_probs(article)
max_prob = probs.max()
max_word = words[np.argmax(probs)]
geostrings = self.model.extract_geostrings(article,
prob_thresh=max_prob-0.001)
assert max_word in [word for geostring in geostrings for word in geostring]
def test_extract_geostring_probs_word_not_in_glove(self):
"""
Regression test for issue #105.
"""
article = '___1234567890nonexistent0987654321___'
words, probs = self.model.extract_geostring_probs(article)
def test_lat_longs_from_geostring_lists(self):
geostring_lists = [['5500', 'S', 'Woodlawn'], ['100', 'N.', 'Wacker'], ['thigh']]
lat_longs, scores, num_found = self.model.lat_longs_from_geostring_lists(
geostring_lists, sleep_secs=0.5
)
assert scores[2] < scores[0]
assert scores[2] < scores[1]
assert len(lat_longs) == len(geostring_lists) == len(scores) == len(num_found)
| [
"numpy.argmax",
"tagnews.GeoCoder"
] | [((124, 142), 'tagnews.GeoCoder', 'tagnews.GeoCoder', ([], {}), '()\n', (140, 142), False, 'import tagnews\n'), ((660, 676), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (669, 676), True, 'import numpy as np\n')] |
import math
def rescore_chinese_entities(entity_list):
non_other_list = []
other_list = []
for entity in entity_list:
if "type" in entity:
entity_type = entity["type"]
else:
entity_type = entity["entity_type"]
if "chinese_name" not in entity:
entity["chinese_name"] = entity["name"]
if entity_type != "other":
non_other_list.append({
"id": entity["id"],
"chinese_name": entity["chinese_name"],
"entity_name": entity["entity_name"],
"entity_type": entity["entity_type"]
})
else:
other_list.append({
"id": entity["id"],
"chinese_name": entity["chinese_name"],
"entity_name": entity["entity_name"],
"entity_type": entity["entity_type"]
})
new_list = []
for entity in non_other_list:
hit_count = 0
for other in other_list:
if entity["chinese_name"]:
if entity["chinese_name"] in other["chinese_name"]:
hit_count += 1
else:
if entity["entity_name"] in other["entity_name"]:
hit_count += 1
id = int(entity["id"][1:])
# id_score = (20 - len(entity["id"])) * 1.5
id_score = (24 - math.log(id)) * 1.5
entity["weight"] = hit_count + id_score
if entity["id"][0] == 'A':
entity["weight"] -= 100000
new_list.append(entity)
new_list = sorted(new_list, key=lambda e: e["weight"], reverse=True)
if len(new_list) > 20:
return new_list
else:
return new_list + other_list
| [
"math.log"
] | [((1386, 1398), 'math.log', 'math.log', (['id'], {}), '(id)\n', (1394, 1398), False, 'import math\n')] |
import os
# needs win32all to work on Windows
if os.name == 'nt':
import win32con, win32file, pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
__overlapped = pywintypes.OVERLAPPED()
def lock(file, flags = LOCK_EX | LOCK_NB):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped)
elif os.name == 'posix':
import fcntl
from fcntl import LOCK_EX, LOCK_SH, LOCK_NB
def lock(file, flags=LOCK_EX | LOCK_NB):
fcntl.flock(file.fileno(), flags)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
raise RuntimeError("FileLock only support for nt and posix platforms")
| [
"win32file.UnlockFileEx",
"win32file.LockFileEx",
"pywintypes.OVERLAPPED"
] | [((240, 263), 'pywintypes.OVERLAPPED', 'pywintypes.OVERLAPPED', ([], {}), '()\n', (261, 263), False, 'import win32con, win32file, pywintypes\n'), ((361, 424), 'win32file.LockFileEx', 'win32file.LockFileEx', (['hfile', 'flags', '(0)', '(4294901760)', '__overlapped'], {}), '(hfile, flags, 0, 4294901760, __overlapped)\n', (381, 424), False, 'import win32con, win32file, pywintypes\n'), ((497, 555), 'win32file.UnlockFileEx', 'win32file.UnlockFileEx', (['hfile', '(0)', '(4294901760)', '__overlapped'], {}), '(hfile, 0, 4294901760, __overlapped)\n', (519, 555), False, 'import win32con, win32file, pywintypes\n')] |
"""Documentation about justatest2"""
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1.0"
| [
"logging.NullHandler",
"logging.getLogger"
] | [((93, 114), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (112, 114), False, 'import logging\n'), ((54, 81), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (71, 81), False, 'import logging\n')] |
from rest_framework import serializers
from django.contrib.auth import get_user_model
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = (
'id',
'email',
'mobile_phone',
'last_name',
'first_name',
'patronymic',
'birthday',
'is_superuser',
'is_active'
)
| [
"django.contrib.auth.get_user_model"
] | [((171, 187), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (185, 187), False, 'from django.contrib.auth import get_user_model\n')] |
import unittest.mock as mock
import Test.ID2TAttackTest as Test
sha_default = 'c61cb8ce03e6b8b19132ec6a47adcfb02c4dba4234926653df5443d33b08f33b'
sha_dest_mac_only = 'c42a1775db981a139abd42d031273805cbebd2316b0d8c097217c12193fb8a70'
sha_multiple_victims_macos = 'b9a9f423d4154bc38723214124ad74dfdd07a39753563d21f5b453a8c069914a'
sha_one_victim_linux = '3bb17444446334cf4feee9dd7cbeabd17acbb5ef48525fb3963591f30c37d17a'
sha_port_shuffle = '08bdecc68fa1a2d1b0dd9802d7d025d42d90b9184d1fb6e1bcab234fac7db1b4'
sha_smb2 = '315bc052fd045f8738021062e8b5f77a33c649adfed490d3c9da94c97ba32f95'
sha_ip_src_shuffle = '1d699ca109c62000b77b53002f1087ebf5ccc2c2dead1dbc5c18b5f6311273d0'
sha_victim_range_winxp_hosting = 'bd624da4e3b7a3f06b8154ed9d6274d498b589aaaa11c2d0dc207a80ab7205b9'
# TODO: improve coverage
class UnitTestSMBScan(Test.ID2TAttackTest):
def test_smbscan_default(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="win7"):
self.checksum_test([['SMBScanAttack']], sha_default)
def test_smbscan_one_victim_linux(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="linux"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1', 'ip.dst=192.168.178.10']],
sha_one_victim_linux)
def test_smbscan_victim_range_winxp_hosting(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="winxp"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1', 'ip.dst=192.168.178.5-192.168.178.10',
'hosting.ip=192.168.178.5']], sha_victim_range_winxp_hosting)
def test_smbscan_multiple_victims_macos(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="macos"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1',
'ip.dst=192.168.178.10,192.168.178.15,192.168.178.20',
'hosting.ip=192.168.178.15,192.168.178.20']], sha_multiple_victims_macos)
def test_smbscan_invalid_smb_version(self):
with self.assertRaises(SystemExit):
self.checksum_test([['SMBScanAttack', 'protocol.version=42']], 'somehash')
def test_smbscan_invalid_smb_platform(self):
with self.assertRaises(SystemExit):
self.checksum_test([['SMBScanAttack', 'hosting.version=1337']], 'somehash')
def test_smbscan_port_shuffle(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="win7"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1', 'ip.dst=192.168.178.5-192.168.178.10',
'hosting.ip=192.168.178.5', 'port.src.shuffle=false']], sha_port_shuffle)
def test_smbscan_dest_mac_only(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="win7"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1', 'mac.dst=00:0C:29:9C:70:64']],
sha_dest_mac_only)
def test_smbscan_src_ip_shuffle(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="win7"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1', 'ip.dst=192.168.178.5-192.168.178.10',
'hosting.ip=192.168.178.5', 'ip.src.shuffle=True']], sha_ip_src_shuffle)
def test_smbscan_smb2(self):
with mock.patch("ID2TLib.Utility.get_rnd_os", return_value="linux"):
self.checksum_test([['SMBScanAttack', 'ip.src=192.168.178.1', 'ip.dst=192.168.178.5-192.168.178.10',
'hosting.ip=192.168.178.5', 'protocol.version=2.1', 'hosting.version=2.1']], sha_smb2)
def test_smbscan_order(self):
self.order_test([['SMBScanAttack']])
| [
"unittest.mock.patch"
] | [((893, 954), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""win7"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='win7')\n", (903, 954), True, 'import unittest.mock as mock\n'), ((1080, 1142), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""linux"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='linux')\n", (1090, 1142), True, 'import unittest.mock as mock\n'), ((1367, 1429), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""winxp"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='winxp')\n", (1377, 1429), True, 'import unittest.mock as mock\n'), ((1704, 1766), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""macos"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='macos')\n", (1714, 1766), True, 'import unittest.mock as mock\n'), ((2454, 2515), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""win7"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='win7')\n", (2464, 2515), True, 'import unittest.mock as mock\n'), ((2793, 2854), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""win7"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='win7')\n", (2803, 2854), True, 'import unittest.mock as mock\n'), ((3068, 3129), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""win7"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='win7')\n", (3078, 3129), True, 'import unittest.mock as mock\n'), ((3397, 3459), 'unittest.mock.patch', 'mock.patch', (['"""ID2TLib.Utility.get_rnd_os"""'], {'return_value': '"""linux"""'}), "('ID2TLib.Utility.get_rnd_os', return_value='linux')\n", (3407, 3459), True, 'import unittest.mock as mock\n')] |
import unittest
import pystac
import pystac_io
import pystac_io.s3
class TestPyStacIoContextManager(unittest.TestCase):
def test_context_manager(self):
from pystac_io.s3 import s3_read_text_method
self.assertEqual(
pystac.STAC_IO.read_text_method, pystac.STAC_IO.default_read_text_method
)
with pystac_io.register("s3"):
self.assertEqual(pystac.STAC_IO.read_text_method, s3_read_text_method)
self.assertEqual(
pystac.STAC_IO.read_text_method, pystac.STAC_IO.default_read_text_method
)
def test_func_call(self):
from pystac_io.s3 import s3_read_text_method
self.assertEqual(
pystac.STAC_IO.read_text_method, pystac.STAC_IO.default_read_text_method
)
pystac_io.register("s3")
self.assertEqual(pystac.STAC_IO.read_text_method, s3_read_text_method)
pystac_io.unregister()
self.assertEqual(
pystac.STAC_IO.read_text_method, pystac.STAC_IO.default_read_text_method
)
| [
"pystac_io.register",
"pystac_io.unregister"
] | [((792, 816), 'pystac_io.register', 'pystac_io.register', (['"""s3"""'], {}), "('s3')\n", (810, 816), False, 'import pystac_io\n'), ((905, 927), 'pystac_io.unregister', 'pystac_io.unregister', ([], {}), '()\n', (925, 927), False, 'import pystac_io\n'), ((347, 371), 'pystac_io.register', 'pystac_io.register', (['"""s3"""'], {}), "('s3')\n", (365, 371), False, 'import pystac_io\n')] |
"""
Miscellaneous utility functions.
If several functions here develop a theme, consider reorganising them into a module.
"""
import subprocess as sp
import numpy
import itertools
import hashlib
import json
import types
from datetime import date
from scipy.stats import beta, gamma
from scipy.optimize import minimize
from summer.model import find_name_components
def get_data_hash(*args):
"""
Get a hash of a bunch of JSON serializable data.
Returns first 8 characters of the MD5 hash of the data.
Eg. args of ("foo", {"a": 1}, [1, 2, 3]) --> "34d333dw"
"""
data_str = ""
for arg in args:
try:
data_str += json.dumps(arg)
except TypeError:
continue # Fail silently :(
hash_str = hashlib.md5(data_str.encode()).hexdigest()
return hash_str[:8]
def merge_dicts(src: dict, dest: dict):
"""
Merge src dict into dest dict.
"""
for key, value in src.items():
if isinstance(value, dict):
# get node or create one
node = dest.setdefault(key, {})
merge_dicts(value, node)
else:
dest[key] = value
return dest
def get_git_hash():
"""
Return the current commit hash, or an empty string.
"""
return run_command("git rev-parse HEAD").strip()
def get_git_branch():
"""
Return the current git branch, or an empty string
"""
return run_command("git rev-parse --abbrev-ref HEAD").strip()
def run_command(cmds):
"""
Run a process and retun the stdout.
"""
try:
result = sp.run(cmds, shell=True, check=True, stdout=sp.PIPE, encoding="utf-8")
return result.stdout
except sp.CalledProcessError:
return ""
def return_function_of_function(inner_function, outer_function):
"""
Returns a chained function from two functions
"""
return lambda value: outer_function(inner_function(value))
def step_function_maker(start_time, end_time, value):
def my_function(time):
if start_time <= time <= end_time:
y = value
else:
y = 0.0
return y
return my_function
def progressive_step_function_maker(start_time, end_time, average_value, scaling_time_fraction=0.2):
"""
Make a step_function with linear increasing and decreasing slopes to simulate more progressive changes
:param average_value: targeted average value (auc)
:param scaling_time_fraction: fraction of (end_time - start_time) used for scaling up the value (classic step
function obtained with scaling_time_fraction=0, triangle function obtained with scaling_time_fraction=.5)
:return: function
"""
assert scaling_time_fraction <= 0.5, "scaling_time_fraction must be <=.5"
def my_function(time):
if time <= start_time or time >= end_time:
y = 0
else:
total_time = end_time - start_time
plateau_height = average_value / (1.0 - scaling_time_fraction)
slope = plateau_height / (scaling_time_fraction * total_time)
intercept_left = -slope * start_time
intercept_right = slope * end_time
if (
start_time + scaling_time_fraction * total_time
<= time
<= end_time - scaling_time_fraction * total_time
):
y = plateau_height
elif time < start_time + scaling_time_fraction * total_time:
y = slope * time + intercept_left
else:
y = -slope * time + intercept_right
return y
return my_function
def change_parameter_unit(parameter_dict, multiplier):
"""
used to adapt the latency parameters from the earlier functions according to whether they are needed as by year
rather than by day
:param parameter_dict: dict
dictionary whose values need to be adjusted
:param multiplier: float
multiplier
:return: dict
dictionary with values multiplied by the multiplier argument
"""
return {
param_key: param_value * multiplier for param_key, param_value in parameter_dict.items()
}
def add_w_to_param_names(parameter_dict):
"""
add a "W" string to the end of the parameter name to indicate that we should over-write up the chain
:param parameter_dict: dict
the dictionary before the adjustments
:return: dict
same dictionary but with the "W" string added to each of the keys
"""
return {str(age_group) + "W": value for age_group, value in parameter_dict.items()}
def find_stratum_index_from_string(compartment, stratification, remove_stratification_name=True):
"""
finds the stratum which the compartment (or parameter) name falls in when provided with the compartment name and the
name of the stratification of interest
for example, if the compartment name was infectiousXhiv_positiveXdiabetes_none and the stratification of interest
provided through the stratification argument was hiv, then
:param compartment: str
name of the compartment or parameter to be interrogated
:param stratification: str
the stratification of interest
:param remove_stratification_name: bool
whether to remove the stratification name and its trailing _ from the string to return
:return: str
the name of the stratum within which the compartment falls
"""
stratum_name = [
name
for n_name, name in enumerate(find_name_components(compartment))
if stratification in name
][0]
return (
stratum_name[stratum_name.find("_") + 1 :] if remove_stratification_name else stratum_name
)
def find_first_list_element_above(list, value):
"""
Simple method to return the index of the first element of a list that is greater than a specified value.
Args:
list: List of floats
value: The value that the element must be greater than
"""
return next(x[0] for x in enumerate(list) if x[1] > value)
def get_integration_times(start_year: int, end_year: int, time_step: int):
"""
Get a list of timesteps from start_year to end_year, spaced by time_step.
"""
n_iter = int(round((end_year - start_year) / time_step)) + 1
return numpy.linspace(start_year, end_year, n_iter).tolist()
def element_wise_list_summation(list_1, list_2):
"""
Element-wise summation of two lists of the same length.
"""
return [value_1 + value_2 for value_1, value_2 in zip(list_1, list_2)]
def repeat_list_elements(repetitions, list_to_repeat):
return list(
itertools.chain.from_iterable(
itertools.repeat(i_element, repetitions) for i_element in list_to_repeat
)
)
def repeat_list_elements_average_last_two(raw_props):
"""
Repeat 5-year age-specific proportions, but with 75+s taking the average of the last two groups.
"""
repeated_props = repeat_list_elements(2, raw_props[:-1])
repeated_props[-1] = sum(raw_props[-2:]) / 2.0
return repeated_props
def find_series_compartment_parameter(proportion_to_split, n_compartments, original_parameter):
return (1.0 - (1.0 - proportion_to_split) ** (1.0 / n_compartments)) * original_parameter
def find_rates_and_complements_from_ifr(cfrs, n_compartment_repeats, overall_rates):
"""
Given a list of proportions (CFRs) to be applied to a set of n compartments in series with equal flow rates through
them, work out the death rates and their complements
"""
death_rates = [
find_series_compartment_parameter(i_cfr, n_compartment_repeats, i_rate)
for i_cfr, i_rate in zip(cfrs, overall_rates)
]
complements = [
i_overall_rate - i_death_rate
for i_overall_rate, i_death_rate in zip(overall_rates, death_rates)
]
return death_rates, complements
def find_first_index_reaching_cumulative_sum(a_list, threshold):
"""
Returns the index at which the cumulative sum of a list has reached a given value
:param a_list: list object containing floats or integers
:param threshold: a float or integer
:return: an index (integer)
"""
cumsum_list = numpy.cumsum(a_list).tolist()
if cumsum_list[-1] < threshold:
raise ValueError("The cumulative sum of the entire list is smaller than the threshold")
return next(i for i, val in enumerate(cumsum_list) if val >= threshold)
def get_date_from_tuple(date_as_tuple):
return date(date_as_tuple[0], date_as_tuple[1], date_as_tuple[2])
def get_date_from_string(date_as_string):
return date(int(date_as_string[:4]), int(date_as_string[4: 6]), int(date_as_string[6:]))
def find_relative_date_from_string_or_tuple(requested_date, base_date=(2019, 12, 31)):
requested_date = get_date_from_string(requested_date) if \
type(requested_date) == str else \
get_date_from_tuple(requested_date)
difference = requested_date - get_date_from_tuple(base_date)
return difference.days
def normalise_sequence(input_sequence):
"""
Normalise a list or tuple to produce a tuple with values representing the proportion of each to the total of the
input sequence.
"""
return (i_value / sum(input_sequence) for i_value in input_sequence)
def convert_list_contents_to_int(input_list):
return [int(i_element) for i_element in input_list]
def element_wise_list_division(numerator, denominator):
"""
Simple function to find the quotients of two lists.
"""
return [num / den for num, den in zip(numerator, denominator)]
def find_distribution_params_from_mean_and_ci(distribution, mean, ci, ci_width=.95):
"""
Work out the parameters of a given distribution based on a desired mean and CI
:param distribution: string
either 'gamma' or 'beta' for now
:param mean: float
the desired mean value
:param ci: list or tuple of length 2
the lower and upper bounds of the CI
:param ci_width:
the width of the desired CI
:return:
a dictionary with the parameters
"""
assert len(ci) == 2 and ci[1] > ci[0] and 0. < ci_width < 1.
percentile_low = (1. - ci_width) / 2.
percentile_up = 1. - percentile_low
if distribution == 'beta':
assert 0. < ci[0] < 1. and 0. < ci[1] < 1. and 0. < mean < 1.
def distance_to_minimise(a):
b = a * (1. - mean) / mean
vals = beta.ppf([percentile_low, percentile_up], a, b)
dist = sum([(ci[i] - vals[i])**2 for i in range(2)])
return dist
sol = minimize(distance_to_minimise, [1.], bounds=[(0., None)])
best_a = sol.x
best_b = best_a * (1. - mean) / mean
params = {'a': best_a, 'b': best_b}
elif distribution == 'gamma':
assert ci[0] > 0 and ci[1] > 0 and mean > 0.
def distance_to_minimise(scale):
shape = mean / scale
vals = gamma.ppf([percentile_low, percentile_up], shape, 0, scale)
dist = sum([(ci[i] - vals[i])**2 for i in range(2)])
return dist
sol = minimize(distance_to_minimise, [1.], bounds=[(1.e-11, None)])
best_scale = sol.x
best_shape = mean / best_scale
params = {'shape': best_shape, 'scale': best_scale}
else:
raise ValueError(distribution + " distribution is not supported for the moment")
return params
def copy_function(f, name=None):
'''
return a function with same code, globals, defaults, closure, and
name (or provide a new name)
'''
fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__,
f.__defaults__, f.__closure__)
# in case f was given attrs (note this dict is a shallow copy):
fn.__dict__.update(f.__dict__)
return fn
| [
"subprocess.run",
"scipy.optimize.minimize",
"json.dumps",
"scipy.stats.gamma.ppf",
"numpy.linspace",
"summer.model.find_name_components",
"datetime.date",
"numpy.cumsum",
"types.FunctionType",
"scipy.stats.beta.ppf",
"itertools.repeat"
] | [((8501, 8559), 'datetime.date', 'date', (['date_as_tuple[0]', 'date_as_tuple[1]', 'date_as_tuple[2]'], {}), '(date_as_tuple[0], date_as_tuple[1], date_as_tuple[2])\n', (8505, 8559), False, 'from datetime import date\n'), ((11590, 11691), 'types.FunctionType', 'types.FunctionType', (['f.__code__', 'f.__globals__', '(name or f.__name__)', 'f.__defaults__', 'f.__closure__'], {}), '(f.__code__, f.__globals__, name or f.__name__, f.\n __defaults__, f.__closure__)\n', (11608, 11691), False, 'import types\n'), ((1581, 1651), 'subprocess.run', 'sp.run', (['cmds'], {'shell': '(True)', 'check': '(True)', 'stdout': 'sp.PIPE', 'encoding': '"""utf-8"""'}), "(cmds, shell=True, check=True, stdout=sp.PIPE, encoding='utf-8')\n", (1587, 1651), True, 'import subprocess as sp\n'), ((10604, 10663), 'scipy.optimize.minimize', 'minimize', (['distance_to_minimise', '[1.0]'], {'bounds': '[(0.0, None)]'}), '(distance_to_minimise, [1.0], bounds=[(0.0, None)])\n', (10612, 10663), False, 'from scipy.optimize import minimize\n'), ((658, 673), 'json.dumps', 'json.dumps', (['arg'], {}), '(arg)\n', (668, 673), False, 'import json\n'), ((6298, 6342), 'numpy.linspace', 'numpy.linspace', (['start_year', 'end_year', 'n_iter'], {}), '(start_year, end_year, n_iter)\n', (6312, 6342), False, 'import numpy\n'), ((8208, 8228), 'numpy.cumsum', 'numpy.cumsum', (['a_list'], {}), '(a_list)\n', (8220, 8228), False, 'import numpy\n'), ((10452, 10499), 'scipy.stats.beta.ppf', 'beta.ppf', (['[percentile_low, percentile_up]', 'a', 'b'], {}), '([percentile_low, percentile_up], a, b)\n', (10460, 10499), False, 'from scipy.stats import beta, gamma\n'), ((11120, 11181), 'scipy.optimize.minimize', 'minimize', (['distance_to_minimise', '[1.0]'], {'bounds': '[(1e-11, None)]'}), '(distance_to_minimise, [1.0], bounds=[(1e-11, None)])\n', (11128, 11181), False, 'from scipy.optimize import minimize\n'), ((6679, 6719), 'itertools.repeat', 'itertools.repeat', (['i_element', 'repetitions'], {}), '(i_element, repetitions)\n', (6695, 6719), False, 'import itertools\n'), ((10956, 11015), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['[percentile_low, percentile_up]', 'shape', '(0)', 'scale'], {}), '([percentile_low, percentile_up], shape, 0, scale)\n', (10965, 11015), False, 'from scipy.stats import beta, gamma\n'), ((5514, 5547), 'summer.model.find_name_components', 'find_name_components', (['compartment'], {}), '(compartment)\n', (5534, 5547), False, 'from summer.model import find_name_components\n')] |
import sqlalchemy as SA
import time
from optparse import OptionParser
try:
import simplejson as json
_hush_pyflakes = [json]
del _hush_pyflakes
except ImportError:
import json
from test.discovery_failure_test import BrokenImportTestCase
from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range
from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults
from testify.test_result import TestResult
from testify.test_runner import TestRunner
class DummyTestCase(TestCase):
__test__ = False
def test_pass(self):
pass
def test_fail(self):
assert False
class SQLReporterBaseTestCase(TestCase):
__test__ = False
@setup_teardown
def make_reporter(self):
"""Make self.reporter, a SQLReporter that runs on an empty in-memory SQLite database."""
parser = OptionParser()
add_command_line_options(parser)
(options, args) = parser.parse_args([
'--reporting-db-url', 'sqlite://',
'--sql-reporting-frequency', '0.05',
'--build-info', json.dumps({
'buildbot' : 1,
'buildnumber' : 1,
'branch' : 'a_branch_name',
'revision' : 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef',
'buildname' : 'a_build_name'
})
])
create_engine_opts = {
'poolclass' : SA.pool.StaticPool,
'connect_args' : {'check_same_thread' : False}
}
self.reporter = SQLReporter(options, create_engine_opts=create_engine_opts)
yield
# no teardown.
class SQLReporterTestCase(SQLReporterBaseTestCase):
def test_integration(self):
"""Run a runner with self.reporter as a test reporter, and verify a bunch of stuff."""
runner = TestRunner(DummyTestCase, test_reporters=[self.reporter])
conn = self.reporter.conn
# We're creating a new in-memory database in make_reporter, so we don't need to worry about rows from previous tests.
(build,) = list(conn.execute(Builds.select()))
assert_equal(build['buildname'], 'a_build_name')
assert_equal(build['branch'], 'a_branch_name')
assert_equal(build['revision'], 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef')
# Method count should be None until we discover (which is part of running)
assert_equal(build['method_count'], None)
# End time should be None until we run.
assert_equal(build['end_time'], None)
assert runner.run()
# Now that we've run the tests, get the build row again and check to see that things are updated.
(updated_build,) = list(conn.execute(Builds.select()))
for key in updated_build.keys():
if key not in ('end_time', 'run_time', 'method_count'):
assert_equal(build[key], updated_build[key])
assert_gt(updated_build['run_time'], 0)
assert_in_range(updated_build['end_time'], 0, time.time())
assert_equal(updated_build['method_count'], 2)
# The discovery_failure column should exist and be False.
assert 'discovery_failure' in build
assert_equal(build['discovery_failure'], False)
# Check that we have one failure and one pass, and that they're the right tests.
test_results = list(conn.execute(SA.select(
columns=TestResults.columns + Tests.columns,
from_obj=TestResults.join(Tests, TestResults.c.test == Tests.c.id)
)))
assert_equal(len(test_results), 2)
(passed_test,) = [r for r in test_results if not r['failure']]
(failed_test,) = [r for r in test_results if r['failure']]
assert_equal(passed_test['method_name'], 'test_pass')
assert_equal(failed_test['method_name'], 'test_fail')
def test_update_counts(self):
"""Tell our SQLReporter to update its counts, and check that it does."""
conn = self.reporter.conn
(build,) = list(conn.execute(Builds.select()))
assert_equal(build['method_count'], None)
self.reporter.test_counts(3, 50)
(updated_build,) = list(conn.execute(Builds.select()))
assert_equal(updated_build['method_count'], 50)
def test_previous_run(self):
"""Insert a test result with two previous runs, and make sure it works properly."""
conn = self.reporter.conn
test_case = DummyTestCase()
results = [TestResult(test_case.test_pass) for _ in xrange(3)]
previous_run = None
for result in results:
if previous_run:
result.start(previous_run=previous_run.to_dict())
else:
result.start()
result.end_in_success()
previous_run = result
self.reporter.test_complete(results[-1].to_dict())
assert self.reporter.report() # Make sure all results are inserted.
test_results = list(conn.execute(SA.select(
columns=TestResults.columns + Tests.columns,
from_obj=TestResults.join(Tests, TestResults.c.test == Tests.c.id)
)))
assert_equal(len(test_results), 3)
for result in test_results:
assert_equal(result['method_name'], 'test_pass')
class SQLReporterDiscoveryFailureTestCase(SQLReporterBaseTestCase, BrokenImportTestCase):
def test_sql_reporter_sets_discovery_failure_flag(self):
runner = TestRunner(self.broken_import_module, test_reporters=[self.reporter])
runner.run()
conn = self.reporter.conn
(build,) = list(conn.execute(Builds.select()))
assert_equal(build['discovery_failure'], True)
# vim: set ts=4 sts=4 sw=4 et:
| [
"testify.assert_equal",
"testify.plugins.sql_reporter.add_command_line_options",
"testify.test_result.TestResult",
"testify.plugins.sql_reporter.Builds.select",
"testify.plugins.sql_reporter.SQLReporter",
"testify.test_runner.TestRunner",
"json.dumps",
"optparse.OptionParser",
"testify.plugins.sql_reporter.TestResults.join",
"testify.assert_gt",
"time.time"
] | [((901, 915), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (913, 915), False, 'from optparse import OptionParser\n'), ((924, 956), 'testify.plugins.sql_reporter.add_command_line_options', 'add_command_line_options', (['parser'], {}), '(parser)\n', (948, 956), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((1566, 1625), 'testify.plugins.sql_reporter.SQLReporter', 'SQLReporter', (['options'], {'create_engine_opts': 'create_engine_opts'}), '(options, create_engine_opts=create_engine_opts)\n', (1577, 1625), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((1863, 1920), 'testify.test_runner.TestRunner', 'TestRunner', (['DummyTestCase'], {'test_reporters': '[self.reporter]'}), '(DummyTestCase, test_reporters=[self.reporter])\n', (1873, 1920), False, 'from testify.test_runner import TestRunner\n'), ((2146, 2194), 'testify.assert_equal', 'assert_equal', (["build['buildname']", '"""a_build_name"""'], {}), "(build['buildname'], 'a_build_name')\n", (2158, 2194), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((2203, 2249), 'testify.assert_equal', 'assert_equal', (["build['branch']", '"""a_branch_name"""'], {}), "(build['branch'], 'a_branch_name')\n", (2215, 2249), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((2258, 2333), 'testify.assert_equal', 'assert_equal', (["build['revision']", '"""deadbeefdeadbeefdeadbeefdeadbeefdeadbeef"""'], {}), "(build['revision'], 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef')\n", (2270, 2333), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((2426, 2467), 'testify.assert_equal', 'assert_equal', (["build['method_count']", 'None'], {}), "(build['method_count'], None)\n", (2438, 2467), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((2524, 2561), 'testify.assert_equal', 'assert_equal', (["build['end_time']", 'None'], {}), "(build['end_time'], None)\n", (2536, 2561), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((2941, 2980), 'testify.assert_gt', 'assert_gt', (["updated_build['run_time']", '(0)'], {}), "(updated_build['run_time'], 0)\n", (2950, 2980), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((3056, 3102), 'testify.assert_equal', 'assert_equal', (["updated_build['method_count']", '(2)'], {}), "(updated_build['method_count'], 2)\n", (3068, 3102), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((3222, 3269), 'testify.assert_equal', 'assert_equal', (["build['discovery_failure']", '(False)'], {}), "(build['discovery_failure'], False)\n", (3234, 3269), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((3751, 3804), 'testify.assert_equal', 'assert_equal', (["passed_test['method_name']", '"""test_pass"""'], {}), "(passed_test['method_name'], 'test_pass')\n", (3763, 3804), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((3813, 3866), 'testify.assert_equal', 'assert_equal', (["failed_test['method_name']", '"""test_fail"""'], {}), "(failed_test['method_name'], 'test_fail')\n", (3825, 3866), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((4083, 4124), 'testify.assert_equal', 'assert_equal', (["build['method_count']", 'None'], {}), "(build['method_count'], None)\n", (4095, 4124), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((4239, 4286), 'testify.assert_equal', 'assert_equal', (["updated_build['method_count']", '(50)'], {}), "(updated_build['method_count'], 50)\n", (4251, 4286), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((5480, 5549), 'testify.test_runner.TestRunner', 'TestRunner', (['self.broken_import_module'], {'test_reporters': '[self.reporter]'}), '(self.broken_import_module, test_reporters=[self.reporter])\n', (5490, 5549), False, 'from testify.test_runner import TestRunner\n'), ((5670, 5716), 'testify.assert_equal', 'assert_equal', (["build['discovery_failure']", '(True)'], {}), "(build['discovery_failure'], True)\n", (5682, 5716), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((3035, 3046), 'time.time', 'time.time', ([], {}), '()\n', (3044, 3046), False, 'import time\n'), ((4503, 4534), 'testify.test_result.TestResult', 'TestResult', (['test_case.test_pass'], {}), '(test_case.test_pass)\n', (4513, 4534), False, 'from testify.test_result import TestResult\n'), ((5261, 5309), 'testify.assert_equal', 'assert_equal', (["result['method_name']", '"""test_pass"""'], {}), "(result['method_name'], 'test_pass')\n", (5273, 5309), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((1127, 1292), 'json.dumps', 'json.dumps', (["{'buildbot': 1, 'buildnumber': 1, 'branch': 'a_branch_name', 'revision':\n 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', 'buildname': 'a_build_name'}"], {}), "({'buildbot': 1, 'buildnumber': 1, 'branch': 'a_branch_name',\n 'revision': 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef', 'buildname':\n 'a_build_name'})\n", (1137, 1292), False, 'import json\n'), ((2119, 2134), 'testify.plugins.sql_reporter.Builds.select', 'Builds.select', ([], {}), '()\n', (2132, 2134), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((2743, 2758), 'testify.plugins.sql_reporter.Builds.select', 'Builds.select', ([], {}), '()\n', (2756, 2758), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((2887, 2931), 'testify.assert_equal', 'assert_equal', (['build[key]', 'updated_build[key]'], {}), '(build[key], updated_build[key])\n', (2899, 2931), False, 'from testify import TestCase, setup_teardown, assert_equal, assert_gt, assert_in_range\n'), ((4056, 4071), 'testify.plugins.sql_reporter.Builds.select', 'Builds.select', ([], {}), '()\n', (4069, 4071), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((4212, 4227), 'testify.plugins.sql_reporter.Builds.select', 'Builds.select', ([], {}), '()\n', (4225, 4227), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((5643, 5658), 'testify.plugins.sql_reporter.Builds.select', 'Builds.select', ([], {}), '()\n', (5656, 5658), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((3490, 3547), 'testify.plugins.sql_reporter.TestResults.join', 'TestResults.join', (['Tests', '(TestResults.c.test == Tests.c.id)'], {}), '(Tests, TestResults.c.test == Tests.c.id)\n', (3506, 3547), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n'), ((5098, 5155), 'testify.plugins.sql_reporter.TestResults.join', 'TestResults.join', (['Tests', '(TestResults.c.test == Tests.c.id)'], {}), '(Tests, TestResults.c.test == Tests.c.id)\n', (5114, 5155), False, 'from testify.plugins.sql_reporter import SQLReporter, add_command_line_options, Tests, Builds, TestResults\n')] |
# coding=utf-8
# Development server
from slideatlas import create_app
app = create_app()
app.run(host='0.0.0.0', port=8080)
| [
"slideatlas.create_app"
] | [((77, 89), 'slideatlas.create_app', 'create_app', ([], {}), '()\n', (87, 89), False, 'from slideatlas import create_app\n')] |
# Copyright 2019 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystonemiddleware.auth_token import _path_matches
from keystonemiddleware.tests.unit import utils
class TestAccessRules(utils.BaseTestCase):
def test_path_matches(self):
good_matches = [
('/v2/servers', '/v2/servers'),
('/v2/servers/123', '/v2/servers/{server_id}'),
('/v2/servers/123/', '/v2/servers/{server_id}/'),
('/v2/servers/123', '/v2/servers/*'),
('/v2/servers/123/', '/v2/servers/*/'),
('/v2/servers/123', '/v2/servers/**'),
('/v2/servers/123/', '/v2/servers/**'),
('/v2/servers/123/456', '/v2/servers/**'),
('/v2/servers', '**'),
('/v2/servers/', '**'),
('/v2/servers/123', '**'),
('/v2/servers/123/456', '**'),
('/v2/servers/123/volume/456', '**'),
('/v2/servers/123/456', '/v2/*/*/*'),
('/v2/123/servers/466', '/v2/{project_id}/servers/{server_id}'),
]
for (request, pattern) in good_matches:
self.assertIsNotNone(_path_matches(request, pattern))
bad_matches = [
('/v2/servers/someuuid', '/v2/servers'),
('/v2/servers//', '/v2/servers/{server_id}'),
('/v2/servers/123/', '/v2/servers/{server_id}'),
('/v2/servers/123/456', '/v2/servers/{server_id}'),
('/v2/servers/123/456', '/v2/servers/*'),
('/v2/servers', 'v2/servers'),
('/v2/servers/123/456/789', '/v2/*/*/*'),
('/v2/servers/123/', '/v2/*/*/*'),
('/v2/servers/', '/v2/servers/{server_id}'),
('/v2/servers', '/v2/servers/{server_id}'),
]
for (request, pattern) in bad_matches:
self.assertIsNone(_path_matches(request, pattern))
| [
"keystonemiddleware.auth_token._path_matches"
] | [((1629, 1660), 'keystonemiddleware.auth_token._path_matches', '_path_matches', (['request', 'pattern'], {}), '(request, pattern)\n', (1642, 1660), False, 'from keystonemiddleware.auth_token import _path_matches\n'), ((2320, 2351), 'keystonemiddleware.auth_token._path_matches', '_path_matches', (['request', 'pattern'], {}), '(request, pattern)\n', (2333, 2351), False, 'from keystonemiddleware.auth_token import _path_matches\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
"""
Train a network on multiple GPUs using multiprocessing.
"""
from itertools import cycle, islice
import math
import torch
from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau
from fairseq import nccl, utils
from fairseq.multiprocessing_event_loop import MultiprocessingEventLoop, Future
from fairseq.optim.nag import NAG
from fairseq.optim.adam import Adam
class MultiprocessingTrainer(MultiprocessingEventLoop):
"""Main class for multi-GPU training.
Each GPU has a full copy of the model and is assigned to its own Python
process. Gradients are accumulated with all-reduce and all model replicas
are updated synchronously after each batch.
The methods in this class are divided into synchronous functions, which
prepare and dispatch the input to each process, and asynchronous functions
(prefixed with `_async_`), which run on each process in parallel.
"""
OPTIMIZERS = ['adagrad', 'adam', 'nag', 'sgd']
def __init__(self, args, model, criterion, device_ids=None,
multiprocessing_method='spawn'):
if device_ids is None:
device_ids = tuple(range(torch.cuda.device_count()))
super().__init__(device_ids, multiprocessing_method)
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
model = model.share_memory()
nccl_uid = nccl.get_unique_id()
self.criterion = criterion
Future.gen_list([
self.call_async(rank, '_async_init', args=args, model=model,
criterion=criterion, nccl_uid=nccl_uid)
for rank in range(self.num_replicas)
])
self._grads_initialized = False
def _async_init(self, rank, device_id, args, model, criterion, nccl_uid):
"""Initialize child processes."""
self.args = args
# set CUDA device
torch.cuda.set_device(device_id)
# initialize NCCL
nccl.initialize(self.num_replicas, nccl_uid, device_id)
# copy model and criterion to current device
self.model = model.cuda()
self.criterion = criterion.cuda()
# initialize optimizer and LR scheduler
self.args.lr = list(map(float, self.args.lr.split(',')))
self.optimizer = self._build_optimizer()
self.lr_scheduler = self._build_lr_scheduler()
self.loss = None
self._max_bsz_seen = 0
def _build_optimizer(self):
# When resuming training from a checkpoint, we load the old optimizer
# state that includes things like learning rate, momentum factor, etc.
# We use this dictionary to override values stored in the checkpoint,
# e.g., we might prefer the values specified on the command line.
self._override_optim_state = {}
if self.args.optimizer == 'adagrad':
self._override_optim_state = {
'lr': self.args.lr[0],
'weight_decay': self.args.weight_decay,
}
return torch.optim.Adagrad(self.model.parameters(), **self._override_optim_state)
elif self.args.optimizer == 'adam':
self._override_optim_state = {
'lr': self.args.lr[0],
'betas': eval(self.args.adam_betas),
'weight_decay': self.args.weight_decay,
}
return Adam(self.model.parameters(), **self._override_optim_state)
elif self.args.optimizer == 'nag':
self._override_optim_state = {
'lr': self.args.lr[0],
'momentum': self.args.momentum,
'weight_decay': self.args.weight_decay,
}
return NAG(self.model.parameters(), **self._override_optim_state)
elif self.args.optimizer == 'sgd':
self._override_optim_state = {
'lr': self.args.lr[0],
'momentum': self.args.momentum,
'weight_decay': self.args.weight_decay,
}
return torch.optim.SGD(self.model.parameters(), **self._override_optim_state)
else:
raise ValueError('Unknown optimizer: {}'.format(self.args.optimizer))
def _build_lr_scheduler(self):
if len(self.args.lr) > 1 or self.args.force_anneal > 0:
lrs = self.args.lr
def anneal(e):
if e < self.args.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(e, len(lrs) - 1)]
else:
next_lr = lrs[-1] * self.args.lrshrink ** (e + 1 - self.args.force_anneal)
return next_lr / lrs[0] # correct for scaling from LambdaLR
lr_scheduler = LambdaLR(self.optimizer, anneal)
lr_scheduler.best = None
else:
# decay the LR by a factor every time the validation loss plateaus
lr_scheduler = ReduceLROnPlateau(self.optimizer, patience=0,
factor=self.args.lrshrink)
return lr_scheduler
def get_model(self):
"""Get one of the model replicas."""
# just return the first model, since all replicas are the same
return self.call_async(0, '_async_get_model').gen()
def _async_get_model(self, rank, device_id):
return self.model
def save_checkpoint(self, filename, extra_state):
"""Save a checkpoint for the current model."""
self.call_async(0, '_async_save_checkpoint', filename=filename, extra_state=extra_state).gen()
def _async_save_checkpoint(self, rank, device_id, filename, extra_state):
utils.save_state(filename, self.args, self.model, self.criterion, self.optimizer,
self.lr_scheduler, self._optim_history, extra_state)
def load_checkpoint(self, filename):
"""Load a checkpoint into the model replicas in each process."""
results = Future.gen_list([
self.call_async(rank, '_async_load_checkpoint', filename=filename)
for rank in range(self.num_replicas)
])
extra_state = results[0]
return extra_state
def _async_load_checkpoint(self, rank, device_id, filename):
extra_state, self._optim_history, last_optim_state = utils.load_model_state(
filename, self.model, cuda_device=device_id)
if last_optim_state is not None:
# rebuild optimizer after loading model, since params may have changed
self.optimizer = self._build_optimizer()
self.lr_scheduler = self._build_lr_scheduler()
# only load optimizer and lr_scheduler if they match the checkpoint
last_optim = self._optim_history[-1]
if last_optim['criterion_name'] == self.criterion.__class__.__name__:
self.optimizer.load_state_dict(last_optim_state)
self.lr_scheduler.best = last_optim['best_loss']
# override learning rate, momentum, etc. with latest values
for group in self.optimizer.param_groups:
group.update(self._override_optim_state)
return extra_state
def set_seed(self, seed):
Future.gen_list([
self.call_async(rank, '_async_set_seed', seed=seed)
for rank in range(self.num_replicas)
])
def _async_set_seed(self, rank, device_id, seed):
torch.manual_seed(seed)
def train_step(self, samples):
"""Do forward, backward and gradient step in parallel."""
# PyTorch initializes gradient buffers lazily, so the first
# train step needs to send non-empty samples to all replicas
replace_empty_samples = False
if not self._grads_initialized:
replace_empty_samples = True
self._grads_initialized = True
# scatter sample across GPUs
self._scatter_samples(samples, replace_empty_samples=replace_empty_samples)
# forward pass
sample_sizes, logging_outputs, ooms_fwd = Future.gen_tuple_list([
self.call_async(rank, '_async_forward')
for rank in range(self.num_replicas)
])
# backward pass, all-reduce gradients and take an optimization step
grad_denom = self.criterion.__class__.grad_denom(sample_sizes)
grad_norms, ooms_bwd = Future.gen_tuple_list([
self.call_async(rank, '_async_backward_and_opt', grad_denom=grad_denom)
for rank in range(self.num_replicas)
])
# aggregate logging output
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
logging_output['gnorm'] = grad_norms[0] # log the gradient norm
logging_output['oom'] = sum(ooms_fwd) + sum(ooms_bwd)
return logging_output
def _async_forward(self, rank, device_id, eval=False):
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
with utils.maybe_no_grad(eval):
sample_size, logging_output, oom = 0, {}, False
if self._sample is not None:
try:
# calculate loss and sample size
self.loss, sample_size, logging_output = self.criterion(self.model, self._sample)
except RuntimeError as e:
if not eval and 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU #{}, skipping batch'.format(device_id))
oom = True
self.loss = None
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
raise e
return sample_size, logging_output, oom
def _async_backward_and_opt(self, rank, device_id, grad_denom):
oom = False
if self.loss is not None:
try:
# backward pass
self.loss.backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU #{}, skipping batch'.format(device_id))
oom = True
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
self.optimizer.zero_grad()
else:
raise e
# all-reduce grads and rescale by grad_denom
self._all_reduce_and_rescale_grads(grad_denom)
# clip grads
if self.args.clip_norm > 0:
grad_norm = torch.nn.utils.clip_grad_norm(self.model.parameters(), self.args.clip_norm)
else:
grad_norm = math.sqrt(sum([p.grad.data.norm()**2 for p in self.model.parameters()]))
# take an optimization step
self.optimizer.step()
# reset loss
self.loss = None
return grad_norm, oom
def _all_reduce_and_rescale_grads(self, grad_denom, buffer_size=10485760):
"""All-reduce and rescale gradients in chunks of the specified size."""
grads = [p.grad.data for p in self.model.parameters() if p.requires_grad]
buffer_t = grads[0].new(math.ceil(buffer_size / grads[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy grads into buffer_t
offset = 0
for g in buffer:
numel = g.numel()
buffer_t[offset:offset+numel].copy_(g.view(-1))
offset += numel
# all-reduce and rescale
nccl.all_reduce(buffer_t[:offset])
buffer_t.div_(grad_denom)
# copy all-reduced buffer back into grads
offset = 0
for g in buffer:
numel = g.numel()
g.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for g in grads:
sz = g.numel() * g.element_size()
if sz > buffer_size:
# grad is bigger than buffer, all-reduce and rescale directly
nccl.all_reduce(g)
g.div_(grad_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [g]
filled = sz
else:
# add grad to buffer
buffer.append(g)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
def valid_step(self, samples):
"""Do forward pass in parallel."""
# scatter sample across GPUs
self._scatter_samples(samples, volatile=True)
# forward pass
_sample_sizes, logging_outputs, ooms_fwd = Future.gen_tuple_list([
self.call_async(rank, '_async_forward', eval=True)
for rank in range(self.num_replicas)
])
assert sum(ooms_fwd) == 0
# aggregate logging output
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
return logging_output
def get_lr(self):
"""Get the current learning rate."""
return self.call_async(0, '_async_get_lr').gen()
def _async_get_lr(self, rank, device_id):
return self.optimizer.param_groups[0]['lr']
def lr_step(self, val_loss=None, epoch=None):
"""Adjust the learning rate depending on the validation loss."""
lr = Future.gen_list([
self.call_async(rank, '_async_lr_step', val_loss=val_loss, epoch=epoch)
for rank in range(self.num_replicas)
])
return lr[0]
def _async_lr_step(self, rank, device_id, epoch, val_loss):
# update the learning rate
if self.args.force_anneal > 0:
self.lr_scheduler.step(epoch)
else:
self.lr_scheduler.step(val_loss, epoch)
return self.optimizer.param_groups[0]['lr']
def _scatter_samples(self, samples, volatile=False, replace_empty_samples=False):
"""Split and distribute a sample across GPUs."""
if not replace_empty_samples:
# pad with None until its size is equal to the number of replicas
samples = samples + [None]*(self.num_replicas - len(samples))
else:
# pad by cycling through the given samples
samples = list(islice(cycle(samples), self.num_replicas))
Future.gen_list([
self.call_async(rank, '_async_prepare_sample', sample=samples[rank], volatile=volatile)
for rank in range(self.num_replicas)
])
def _async_prepare_sample(self, rank, device_id, sample, volatile):
if sample is None:
self._sample = None
else:
if hasattr(torch.cuda, 'empty_cache'):
# clear the caching allocator if this is the largest sample we've seen
if sample['target'].size(0) > self._max_bsz_seen:
self._max_bsz_seen = sample['target'].size(0)
torch.cuda.empty_cache()
self._sample = utils.make_variable(sample, volatile=volatile, cuda_device=device_id)
| [
"torch.manual_seed",
"fairseq.nccl.all_reduce",
"itertools.cycle",
"fairseq.nccl.get_unique_id",
"torch.optim.lr_scheduler.LambdaLR",
"fairseq.utils.save_state",
"fairseq.nccl.initialize",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"fairseq.utils.load_model_state",
"torch.cuda.device_count",
"fairseq.utils.maybe_no_grad",
"torch.cuda.is_available",
"fairseq.utils.make_variable",
"torch.cuda.set_device",
"torch.cuda.empty_cache"
] | [((1696, 1716), 'fairseq.nccl.get_unique_id', 'nccl.get_unique_id', ([], {}), '()\n', (1714, 1716), False, 'from fairseq import nccl, utils\n'), ((2202, 2234), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device_id'], {}), '(device_id)\n', (2223, 2234), False, 'import torch\n'), ((2270, 2325), 'fairseq.nccl.initialize', 'nccl.initialize', (['self.num_replicas', 'nccl_uid', 'device_id'], {}), '(self.num_replicas, nccl_uid, device_id)\n', (2285, 2325), False, 'from fairseq import nccl, utils\n'), ((5925, 6064), 'fairseq.utils.save_state', 'utils.save_state', (['filename', 'self.args', 'self.model', 'self.criterion', 'self.optimizer', 'self.lr_scheduler', 'self._optim_history', 'extra_state'], {}), '(filename, self.args, self.model, self.criterion, self.\n optimizer, self.lr_scheduler, self._optim_history, extra_state)\n', (5941, 6064), False, 'from fairseq import nccl, utils\n'), ((6562, 6629), 'fairseq.utils.load_model_state', 'utils.load_model_state', (['filename', 'self.model'], {'cuda_device': 'device_id'}), '(filename, self.model, cuda_device=device_id)\n', (6584, 6629), False, 'from fairseq import nccl, utils\n'), ((7666, 7689), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7683, 7689), False, 'import torch\n'), ((1539, 1564), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1562, 1564), False, 'import torch\n'), ((5011, 5043), 'torch.optim.lr_scheduler.LambdaLR', 'LambdaLR', (['self.optimizer', 'anneal'], {}), '(self.optimizer, anneal)\n', (5019, 5043), False, 'from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau\n'), ((5201, 5273), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['self.optimizer'], {'patience': '(0)', 'factor': 'self.args.lrshrink'}), '(self.optimizer, patience=0, factor=self.args.lrshrink)\n', (5218, 5273), False, 'from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau\n'), ((9270, 9295), 'fairseq.utils.maybe_no_grad', 'utils.maybe_no_grad', (['eval'], {}), '(eval)\n', (9289, 9295), False, 'from fairseq import nccl, utils\n'), ((11896, 11930), 'fairseq.nccl.all_reduce', 'nccl.all_reduce', (['buffer_t[:offset]'], {}), '(buffer_t[:offset])\n', (11911, 11930), False, 'from fairseq import nccl, utils\n'), ((15442, 15511), 'fairseq.utils.make_variable', 'utils.make_variable', (['sample'], {'volatile': 'volatile', 'cuda_device': 'device_id'}), '(sample, volatile=volatile, cuda_device=device_id)\n', (15461, 15511), False, 'from fairseq import nccl, utils\n'), ((12422, 12440), 'fairseq.nccl.all_reduce', 'nccl.all_reduce', (['g'], {}), '(g)\n', (12437, 12440), False, 'from fairseq import nccl, utils\n'), ((1434, 1459), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1457, 1459), False, 'import torch\n'), ((14730, 14744), 'itertools.cycle', 'cycle', (['samples'], {}), '(samples)\n', (14735, 14744), False, 'from itertools import cycle, islice\n'), ((15389, 15413), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15411, 15413), False, 'import torch\n'), ((10595, 10619), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (10617, 10619), False, 'import torch\n'), ((9953, 9977), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (9975, 9977), False, 'import torch\n')] |
'''
Date: 12-17-2018
Problem description:
===================
This problem was asked by Google.
Given an array of integers and a number k, where 1 <= k <= length of the array,
compute the maximum values of each subarray of length k.
For example, given array = [10, 5, 2, 7, 8, 7] and k = 3,
we should get: [10, 7, 8, 8], since:
10 = max(10, 5, 2)
7 = max(5, 2, 7)
8 = max(2, 7, 8)
8 = max(7, 8, 7)
Do this in O(n) time and O(k) space. You can modify the input array in-place
and you do not need to store the results.
You can simply print them out as you compute them.
Algorithm:
=========
Input: list of integers and int k
Output: integer value
Psuedo code:
1. Check edge cases
2. Iterate the len of list, find max value using built-in function int.max()
'''
import time
#
# brute force
#
def maxValInArray(arr, k):
''' check edge case '''
assert k >= 1
assert k <= len(arr)
if k == 1:
#print( arr )
return arr
else:
subarr = list()
listofmax = list()
while (len(arr) >= k):
x = 0
for x in range(x, (k+x)):
subarr.append(arr[x])
#listofmax.append(sorted(subarr, reverse=True)[:1])
listofmax.append(max(subarr))
subarr = []
arr.pop(0)
#print (listofmax)
return listofmax
#
# O(n) time and O(k) space
#
def maxValsList(arr, k):
''' check edge case '''
assert k >= 1
assert k <= len(arr)
if k == 1:
#print( arr )
return arr
else:
retarr = list()
while len(arr) >= k:
#print(max([a[i] for i in range(k)]))
retarr.append(max([arr[i] for i in range(k)]))
arr.pop(0)
return retarr
def test_code():
A = [10, 5, 2, 7, 8, 7]
K = 3
assert maxValInArray(A, K) == [10, 7, 8, 8]
A = [10, 5, 2, 7, 8, 7]
assert maxValsList(A, K) == [10, 7, 8, 8]
A = [10, 5, 2, 7, 8, 7]
K = 1
assert maxValsList(A, K) == [10, 5, 2, 7, 8, 7]
if __name__ == "__main__":
A = [10, 5, 2, 7, 8, 7]
K = 3
print ("Original array: {}".format(A))
starttime = time.time()
print( maxValInArray(A, K))
endtime = time.time()
print("Elapsed time in brute force methob: {} secs".format(endtime - starttime))
A = [10, 5, 2, 7, 8, 7]
starttime = time.time()
print( maxValsList(A, K))
endtime = time.time()
print("Elapsed time in O(n) method: {} secs".format(endtime - starttime))
'''
Run-time output:
===============
$ python codechallenge-06.py
Original array: [10, 5, 2, 7, 8, 7]
[10, 7, 8, 8]
Elapsed time in brute force methob: 0.000123023986816 secs
[10, 7, 8, 8]
Elapsed time in O(n) method: 0.000108003616333 secs
$ pytest codechallenge-06.py
========================================= test session starts ==========================================
platform linux2 -- Python 2.7.13, pytest-3.6.3, py-1.5.4, pluggy-0.6.0
rootdir: /home/markn/devel/py-src/DailyCodeChallenge, inifile:
collected 1 item
codechallenge-06.py . [100%]
======================================= 1 passed in 0.06 seconds =======================================
'''
| [
"time.time"
] | [((1929, 1940), 'time.time', 'time.time', ([], {}), '()\n', (1938, 1940), False, 'import time\n'), ((1982, 1993), 'time.time', 'time.time', ([], {}), '()\n', (1991, 1993), False, 'import time\n'), ((2115, 2126), 'time.time', 'time.time', ([], {}), '()\n', (2124, 2126), False, 'import time\n'), ((2165, 2176), 'time.time', 'time.time', ([], {}), '()\n', (2174, 2176), False, 'import time\n')] |
from pypy.interpreter.error import OperationError
from pypy.tool.pytest.appsupport import AppExceptionInfo
import py
import pypy
conftestpath = py.path.local(pypy.__file__).dirpath("conftest.py")
pytest_plugins = "pytester"
def test_appexecinfo(space):
try:
space.appexec([], "(): raise ValueError")
except OperationError as e:
appex = AppExceptionInfo(space, e)
else:
py.test.fail("did not raise!")
assert appex.exconly().find('ValueError') != -1
assert appex.exconly(tryshort=True).find('ValueError') != -1
assert appex.errisinstance(ValueError)
assert not appex.errisinstance(RuntimeError)
class A:
pass
assert not appex.errisinstance(A)
# this is used by test_wrapped_function_with_different_name below
def inc(self, x):
return x+1
class AppTestWithWrappedInterplevelAttributes:
def setup_class(cls):
space = cls.space
cls.w_some1 = space.wrap(42)
def setup_method(self, meth):
self.w_some2 = self.space.wrap(23)
def test_values_arrive(self):
assert self.some1 == 42
assert self.some2 == 23
def test_values_arrive2(self):
assert self.some1 == 42
def w_compute(self, x):
return x + 2
def test_equal(self):
assert self.compute(3) == 5
w_inc = inc
def test_wrapped_function_with_different_name(self):
assert self.inc(41) == 42
def test_app_test_blow(testdir):
conftestpath.copy(testdir.tmpdir)
sorter = testdir.inline_runsource("""class AppTestBlow:
def test_one(self): exec('blow')
""")
reports = sorter.getreports("pytest_runtest_logreport")
setup, ev, teardown = reports
assert ev.failed
assert setup.passed
assert teardown.passed
assert 'NameError' in ev.longrepr.reprcrash.message
assert 'blow' in ev.longrepr.reprcrash.message
| [
"py.path.local",
"pypy.tool.pytest.appsupport.AppExceptionInfo",
"py.test.fail"
] | [((144, 172), 'py.path.local', 'py.path.local', (['pypy.__file__'], {}), '(pypy.__file__)\n', (157, 172), False, 'import py\n'), ((407, 437), 'py.test.fail', 'py.test.fail', (['"""did not raise!"""'], {}), "('did not raise!')\n", (419, 437), False, 'import py\n'), ((362, 388), 'pypy.tool.pytest.appsupport.AppExceptionInfo', 'AppExceptionInfo', (['space', 'e'], {}), '(space, e)\n', (378, 388), False, 'from pypy.tool.pytest.appsupport import AppExceptionInfo\n')] |
import os
from asyncio import get_event_loop
from congratulations_app.startup_utils import (
get_module_names_path,
load_plugins,
read_module_names,
)
from fastapi_integration.app import AppFactory
from galo_ioc import FactoryContainerImpl, get_factory
from uvicorn import Config, Server
def main() -> None:
module_names_path = get_module_names_path()
module_names = read_module_names(module_names_path)
with FactoryContainerImpl():
loop = get_event_loop()
load_plugins(module_names)
app_factory = get_factory(AppFactory)
app = app_factory()
port = int(os.getenv("SERVER_PORT", "8080"))
config = Config(app=app, port=port, loop=loop)
server = Server(config)
loop.run_until_complete(server.serve())
if __name__ == "__main__":
main()
| [
"galo_ioc.FactoryContainerImpl",
"congratulations_app.startup_utils.load_plugins",
"os.getenv",
"congratulations_app.startup_utils.read_module_names",
"congratulations_app.startup_utils.get_module_names_path",
"galo_ioc.get_factory",
"uvicorn.Server",
"uvicorn.Config",
"asyncio.get_event_loop"
] | [((347, 370), 'congratulations_app.startup_utils.get_module_names_path', 'get_module_names_path', ([], {}), '()\n', (368, 370), False, 'from congratulations_app.startup_utils import get_module_names_path, load_plugins, read_module_names\n'), ((390, 426), 'congratulations_app.startup_utils.read_module_names', 'read_module_names', (['module_names_path'], {}), '(module_names_path)\n', (407, 426), False, 'from congratulations_app.startup_utils import get_module_names_path, load_plugins, read_module_names\n'), ((436, 458), 'galo_ioc.FactoryContainerImpl', 'FactoryContainerImpl', ([], {}), '()\n', (456, 458), False, 'from galo_ioc import FactoryContainerImpl, get_factory\n'), ((475, 491), 'asyncio.get_event_loop', 'get_event_loop', ([], {}), '()\n', (489, 491), False, 'from asyncio import get_event_loop\n'), ((500, 526), 'congratulations_app.startup_utils.load_plugins', 'load_plugins', (['module_names'], {}), '(module_names)\n', (512, 526), False, 'from congratulations_app.startup_utils import get_module_names_path, load_plugins, read_module_names\n'), ((549, 572), 'galo_ioc.get_factory', 'get_factory', (['AppFactory'], {}), '(AppFactory)\n', (560, 572), False, 'from galo_ioc import FactoryContainerImpl, get_factory\n'), ((672, 709), 'uvicorn.Config', 'Config', ([], {'app': 'app', 'port': 'port', 'loop': 'loop'}), '(app=app, port=port, loop=loop)\n', (678, 709), False, 'from uvicorn import Config, Server\n'), ((727, 741), 'uvicorn.Server', 'Server', (['config'], {}), '(config)\n', (733, 741), False, 'from uvicorn import Config, Server\n'), ((620, 652), 'os.getenv', 'os.getenv', (['"""SERVER_PORT"""', '"""8080"""'], {}), "('SERVER_PORT', '8080')\n", (629, 652), False, 'import os\n')] |
"""Proximal Policy Optimization (clip objective)."""
import os
import ray
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.distributions import kl_divergence
from torch.nn.utils.rnn import pad_sequence
from copy import deepcopy
from time import time
class Buffer:
def __init__(self, discount=0.99):
self.discount = discount
self.clear()
def __len__(self):
return len(self.states)
def clear(self):
self.states = []
self.actions = []
self.rewards = []
self.values = []
self.returns = []
self.advantages = []
self.ep_returns = []
self.ep_lens = []
self.size = 0
self.traj_idx = [0]
self.buffer_ready = False
def push(self, state, action, reward, value, done=False):
self.states += [state]
self.actions += [action]
self.rewards += [reward]
self.values += [value]
self.size += 1
def end_trajectory(self, terminal_value=0):
self.traj_idx += [self.size]
rewards = self.rewards[self.traj_idx[-2]:self.traj_idx[-1]]
returns = []
R = terminal_value
for reward in reversed(rewards):
R = self.discount * R + reward
returns.insert(0, R)
self.returns += returns
self.ep_returns += [np.sum(rewards)]
self.ep_lens += [len(rewards)]
def _finish_buffer(self, mirror):
with torch.no_grad():
self.states = torch.Tensor(self.states)
self.actions = torch.Tensor(self.actions)
self.rewards = torch.Tensor(self.rewards)
self.returns = torch.Tensor(self.returns)
self.values = torch.Tensor(self.values)
if mirror is not None:
start = time()
squished = np.prod(list(self.states.size())[:-1])
state_dim = self.states.size()[-1]
state_squished = self.states.view(squished, state_dim).numpy()
self.mirror_states = torch.from_numpy(mirror(state_squished)).view(self.states.size())
a = self.returns - self.values
a = (a - a.mean()) / (a.std() + 1e-4)
self.advantages = a
self.buffer_ready = True
def sample(self, batch_size=64, recurrent=False, mirror=None):
if not self.buffer_ready:
self._finish_buffer(mirror)
if recurrent:
random_indices = SubsetRandomSampler(range(len(self.traj_idx)-1))
sampler = BatchSampler(random_indices, batch_size, drop_last=True)
for traj_indices in sampler:
states = [self.states[self.traj_idx[i]:self.traj_idx[i+1]] for i in traj_indices]
actions = [self.actions[self.traj_idx[i]:self.traj_idx[i+1]] for i in traj_indices]
returns = [self.returns[self.traj_idx[i]:self.traj_idx[i+1]] for i in traj_indices]
advantages = [self.advantages[self.traj_idx[i]:self.traj_idx[i+1]] for i in traj_indices]
traj_mask = [torch.ones_like(r) for r in returns]
states = pad_sequence(states, batch_first=False)
actions = pad_sequence(actions, batch_first=False)
returns = pad_sequence(returns, batch_first=False)
advantages = pad_sequence(advantages, batch_first=False)
traj_mask = pad_sequence(traj_mask, batch_first=False)
if mirror is None:
yield states, actions, returns, advantages, traj_mask
else:
mirror_states = [self.mirror_states[self.traj_idx[i]:self.traj_idx[i+1]] for i in traj_indices]
mirror_states = pad_sequence(mirror_states, batch_first=False)
yield states, mirror_states, actions, returns, advantages, traj_mask
else:
random_indices = SubsetRandomSampler(range(self.size))
sampler = BatchSampler(random_indices, batch_size, drop_last=True)
for i, idxs in enumerate(sampler):
states = self.states[idxs]
actions = self.actions[idxs]
returns = self.returns[idxs]
advantages = self.advantages[idxs]
if mirror is None:
yield states, actions, returns, advantages, 1
else:
mirror_states = self.mirror_states[idxs]
yield states, mirror_states, actions, returns, advantages, 1
def merge_buffers(buffers):
memory = Buffer()
for b in buffers:
offset = len(memory)
memory.states += b.states
memory.actions += b.actions
memory.rewards += b.rewards
memory.values += b.values
memory.returns += b.returns
memory.ep_returns += b.ep_returns
memory.ep_lens += b.ep_lens
memory.traj_idx += [offset + i for i in b.traj_idx[1:]]
memory.size += b.size
return memory
@ray.remote
class PPO_Worker:
def __init__(self, actor, critic, env_fn, gamma):
self.gamma = gamma
self.actor = deepcopy(actor)
self.critic = deepcopy(critic)
self.env = env_fn()
if hasattr(self.env, 'dynamics_randomization'):
self.dynamics_randomization = self.env.dynamics_randomization
else:
self.dynamics_randomization = False
def sync_policy(self, new_actor_params, new_critic_params, input_norm=None):
for p, new_p in zip(self.actor.parameters(), new_actor_params):
p.data.copy_(new_p)
for p, new_p in zip(self.critic.parameters(), new_critic_params):
p.data.copy_(new_p)
if input_norm is not None:
self.actor.welford_state_mean, self.actor.welford_state_mean_diff, self.actor.welford_state_n = input_norm
self.critic.copy_normalizer_stats(self.actor)
def collect_experience(self, max_traj_len, min_steps):
torch.set_num_threads(1)
with torch.no_grad():
start = time()
num_steps = 0
memory = Buffer(self.gamma)
actor = self.actor
critic = self.critic
while num_steps < min_steps:
self.env.dynamics_randomization = self.dynamics_randomization
state = torch.Tensor(self.env.reset())
done = False
value = 0
traj_len = 0
if hasattr(actor, 'init_hidden_state'):
actor.init_hidden_state()
if hasattr(critic, 'init_hidden_state'):
critic.init_hidden_state()
while not done and traj_len < max_traj_len:
state = torch.Tensor(state)
action = actor(state, deterministic=False)
value = critic(state)
next_state, reward, done, _ = self.env.step(action.numpy())
reward = np.array([reward])
memory.push(state.numpy(), action.numpy(), reward, value.numpy())
state = next_state
traj_len += 1
num_steps += 1
value = (not done) * critic(torch.Tensor(state)).numpy()
memory.end_trajectory(terminal_value=value)
return memory
def evaluate(self, trajs=1, max_traj_len=400):
torch.set_num_threads(1)
with torch.no_grad():
ep_returns = []
for traj in range(trajs):
self.env.dynamics_randomization = False
state = torch.Tensor(self.env.reset())
done = False
traj_len = 0
ep_return = 0
if hasattr(self.actor, 'init_hidden_state'):
self.actor.init_hidden_state()
while not done and traj_len < max_traj_len:
action = self.actor(state, deterministic=True)
next_state, reward, done, _ = self.env.step(action.numpy())
state = torch.Tensor(next_state)
ep_return += reward
traj_len += 1
ep_returns += [ep_return]
return np.mean(ep_returns)
class PPO:
def __init__(self, actor, critic, env_fn, args):
self.actor = actor
self.old_actor = deepcopy(actor)
self.critic = critic
if actor.is_recurrent or critic.is_recurrent:
self.recurrent = True
else:
self.recurrent = False
self.actor_optim = optim.Adam(self.actor.parameters(), lr=args.a_lr, eps=args.eps)
self.critic_optim = optim.Adam(self.critic.parameters(), lr=args.c_lr, eps=args.eps)
self.env_fn = env_fn
self.discount = args.discount
self.entropy_coeff = args.entropy_coeff
self.grad_clip = args.grad_clip
self.sparsity = args.sparsity
self.mirror = args.mirror
self.env = env_fn()
if not ray.is_initialized():
if args.redis is not None:
ray.init(redis_address=args.redis)
else:
ray.init(num_cpus=args.workers)
self.workers = [PPO_Worker.remote(actor, critic, env_fn, args.discount) for _ in range(args.workers)]
def update_policy(self, states, actions, returns, advantages, mask, mirror_states=None):
# get old action distribution and log probabilities
with torch.no_grad():
old_pdf = self.old_actor.pdf(states)
old_log_probs = old_pdf.log_prob(actions).sum(-1, keepdim=True)
# if we are using sparsity constraint, set the internal boolean for saving memory to true
if self.sparsity > 0:
self.actor.calculate_norm = True
# get new action distribution and log probabilities
pdf = self.actor.pdf(states)
log_probs = pdf.log_prob(actions).sum(-1, keepdim=True)
if self.sparsity > 0:
self.actor.calculate_norm = False
latent_norm = self.actor.get_latent_norm()
else:
latent_norm = torch.zeros(1)
sparsity_loss = self.sparsity * latent_norm
ratio = ((log_probs - old_log_probs)).exp()
cpi_loss = ratio * advantages * mask
clip_loss = ratio.clamp(0.8, 1.2) * advantages * mask
actor_loss = -torch.min(cpi_loss, clip_loss).mean()
critic_loss = 0.5 * ((returns - self.critic(states)) * mask).pow(2).mean()
entropy_penalty = -(self.entropy_coeff * pdf.entropy() * mask).mean()
if mirror_states is not None:
mirror_time = time()
with torch.no_grad():
action_fn = self.env.mirror_action
squished = np.prod(list(states.size())[:-1])
action_dim = actions.size()[-1]
action_squished = self.actor(mirror_states).view(squished, action_dim).numpy()
mirrored_actions = torch.from_numpy(action_fn(action_squished)).view(actions.size())
unmirrored_actions = pdf.mean
mirror_loss = self.mirror * 4 * (unmirrored_actions - mirrored_actions).pow(2).mean()
#print("{:3.2f}s to calculate mirror loss".format(time() - mirror_time))
else:
mirror_loss = torch.zeros(1)
self.actor_optim.zero_grad()
self.critic_optim.zero_grad()
(actor_loss + entropy_penalty + mirror_loss + sparsity_loss).backward()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), max_norm=self.grad_clip)
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), max_norm=self.grad_clip)
self.actor_optim.step()
self.critic_optim.step()
with torch.no_grad():
return kl_divergence(pdf, old_pdf).mean().numpy(), ((actor_loss + entropy_penalty).item(), critic_loss.item(), mirror_loss.item(), latent_norm.item())
def do_iteration(self, num_steps, max_traj_len, epochs, kl_thresh=0.02, verbose=True, batch_size=64, mirror=False):
self.old_actor.load_state_dict(self.actor.state_dict())
start = time()
actor_param_id = ray.put(list(self.actor.parameters()))
critic_param_id = ray.put(list(self.critic.parameters()))
norm_id = ray.put([self.actor.welford_state_mean, self.actor.welford_state_mean_diff, self.actor.welford_state_n])
steps = max(num_steps // len(self.workers), max_traj_len)
for w in self.workers:
w.sync_policy.remote(actor_param_id, critic_param_id, input_norm=norm_id)
if verbose:
print("\t{:5.4f}s to copy policy params to workers.".format(time() - start))
eval_reward = np.mean(ray.get([w.evaluate.remote(trajs=1, max_traj_len=max_traj_len) for w in self.workers]))
start = time()
buffers = ray.get([w.collect_experience.remote(max_traj_len, steps) for w in self.workers])
memory = merge_buffers(buffers)
total_steps = len(memory)
elapsed = time() - start
if verbose:
print("\t{:3.2f}s to collect {:6n} timesteps | {:3.2}k/s.".format(elapsed, total_steps, (total_steps/1000)/elapsed))
sample_rate = (total_steps/1000)/elapsed
if self.mirror > 0:
state_fn = self.env.mirror_state
else:
state_fn = None
done = False
a_loss = []
c_loss = []
m_loss = []
s_loss = []
kls = []
update_time = time()
torch.set_num_threads(4)
for epoch in range(epochs):
epoch_start = time()
for batch in memory.sample(batch_size=batch_size, recurrent=self.recurrent, mirror=state_fn):
if state_fn is not None:
states, mirror_states, actions, returns, advantages, mask = batch
else:
mirror_states = None
states, actions, returns, advantages, mask = batch
kl, losses = self.update_policy(states, actions, returns, advantages, mask, mirror_states=mirror_states)
kls += [kl]
a_loss += [losses[0]]
c_loss += [losses[1]]
m_loss += [losses[2]]
s_loss += [losses[3]]
if max(kls) > kl_thresh:
done = True
print("\t\tbatch had kl of {} (threshold {}), stopping optimization early.".format(max(kls), kl_thresh))
break
if verbose:
print("\t\tepoch {:2d} in {:3.2f}s, kl {:6.5f}, actor loss {:6.3f}, critic loss {:6.3f}".format(epoch+1, time() - epoch_start, np.mean(kls), np.mean(a_loss), np.mean(c_loss)))
if done:
break
update_time = time() - update_time
if verbose:
print("\t{:3.2f}s to update policy.".format(update_time))
return eval_reward, np.mean(kls), np.mean(a_loss), np.mean(c_loss), np.mean(m_loss), np.mean(s_loss), len(memory), (sample_rate, update_time)
def run_experiment(args):
from util.env import env_factory, train_normalizer
from util.log import create_logger
from policies.critic import FF_V, LSTM_V, GRU_V
from policies.actor import FF_Stochastic_Actor, LSTM_Stochastic_Actor, GRU_Stochastic_Actor, QBN_GRU_Stochastic_Actor
import locale, os
locale.setlocale(locale.LC_ALL, '')
# wrapper function for creating parallelized envs
env_fn = env_factory(args.env)
obs_dim = env_fn().observation_space.shape[0]
action_dim = env_fn().action_space.shape[0]
# Set seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
std = torch.ones(action_dim)*args.std
layers = [int(x) for x in args.layers.split(',')]
if args.arch.lower() == 'lstm':
policy = LSTM_Stochastic_Actor(obs_dim, action_dim, env_name=args.env, fixed_std=std, bounded=False, layers=layers)
critic = LSTM_V(obs_dim, layers=layers)
elif args.arch.lower() == 'gru':
policy = GRU_Stochastic_Actor(obs_dim, action_dim, env_name=args.env, fixed_std=std, bounded=False, layers=layers)
critic = GRU_V(obs_dim, layers=layers)
elif args.arch.lower() == 'qbngru':
policy = QBN_GRU_Stochastic_Actor(obs_dim, action_dim, env_name=args.env, fixed_std=std, bounded=False, layers=layers)
critic = GRU_V(obs_dim, layers=layers)
elif args.arch.lower() == 'ff':
policy = FF_Stochastic_Actor(obs_dim, action_dim, env_name=args.env, fixed_std=std, bounded=False, layers=layers)
critic = FF_V(obs_dim, layers=layers)
else:
raise RuntimeError
policy.legacy = False
env = env_fn()
print("Collecting normalization statistics with {} states...".format(args.prenormalize_steps))
train_normalizer(policy, args.prenormalize_steps, max_traj_len=args.traj_len, noise=1)
critic.copy_normalizer_stats(policy)
policy.train(0)
critic.train(0)
algo = PPO(policy, critic, env_fn, args)
# create a tensorboard logging object
if not args.nolog:
logger = create_logger(args)
else:
logger = None
if args.save_actor is None and logger is not None:
args.save_actor = os.path.join(logger.dir, 'actor.pt')
if args.save_critic is None and logger is not None:
args.save_critic = os.path.join(logger.dir, 'critic.pt')
print()
print("Proximal Policy Optimization:")
print("\tseed: {}".format(args.seed))
print("\tenv: {}".format(args.env))
print("\ttimesteps: {:n}".format(int(args.timesteps)))
print("\titeration steps: {:n}".format(int(args.num_steps)))
print("\tprenormalize steps: {}".format(int(args.prenormalize_steps)))
print("\ttraj_len: {}".format(args.traj_len))
print("\tdiscount: {}".format(args.discount))
print("\tactor_lr: {}".format(args.a_lr))
print("\tcritic_lr: {}".format(args.c_lr))
print("\tadam eps: {}".format(args.eps))
print("\tentropy coeff: {}".format(args.entropy_coeff))
print("\tgrad clip: {}".format(args.grad_clip))
print("\tbatch size: {}".format(args.batch_size))
print("\tepochs: {}".format(args.epochs))
print("\tworkers: {}".format(args.workers))
print()
itr = 0
timesteps = 0
best_reward = None
while timesteps < args.timesteps:
eval_reward, kl, a_loss, c_loss, m_loss, s_loss, steps, (times) = algo.do_iteration(args.num_steps, args.traj_len, args.epochs, batch_size=args.batch_size, kl_thresh=args.kl, mirror=args.mirror)
timesteps += steps
print("iter {:4d} | return: {:5.2f} | KL {:5.4f} | ".format(itr, eval_reward, kl, timesteps), end='')
if m_loss != 0:
print("mirror {:6.5f} | ".format(m_loss), end='')
if s_loss != 0:
print("sparsity {:6.5f} | ".format(s_loss), end='')
print("timesteps {:n}".format(timesteps))
if best_reward is None or eval_reward > best_reward:
print("\t(best policy so far! saving to {})".format(args.save_actor))
best_reward = eval_reward
if args.save_actor is not None:
torch.save(algo.actor, args.save_actor)
if args.save_critic is not None:
torch.save(algo.critic, args.save_critic)
if logger is not None:
logger.add_scalar(args.env + '/kl', kl, timesteps)
logger.add_scalar(args.env + '/return', eval_reward, timesteps)
logger.add_scalar(args.env + '/actor loss', a_loss, timesteps)
logger.add_scalar(args.env + '/critic loss', c_loss, timesteps)
logger.add_scalar(args.env + '/mirror loss', m_loss, timesteps)
logger.add_scalar(args.env + '/sparsity loss', s_loss, timesteps)
logger.add_scalar(args.env + '/sample rate', times[0], timesteps)
logger.add_scalar(args.env + '/update time', times[1], timesteps)
itr += 1
print("Finished ({} of {}).".format(timesteps, args.timesteps))
| [
"ray.is_initialized",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.sampler.BatchSampler",
"torch.min",
"numpy.array",
"copy.deepcopy",
"ray.init",
"numpy.mean",
"util.env.env_factory",
"policies.critic.LSTM_V",
"torch.set_num_threads",
"policies.actor.FF_Stochastic_Actor",
"torch.distributions.kl_divergence",
"numpy.random.seed",
"util.env.train_normalizer",
"policies.actor.QBN_GRU_Stochastic_Actor",
"policies.critic.FF_V",
"torch.ones_like",
"locale.setlocale",
"policies.critic.GRU_V",
"torch.Tensor",
"torch.save",
"util.log.create_logger",
"time.time",
"policies.actor.GRU_Stochastic_Actor",
"torch.manual_seed",
"policies.actor.LSTM_Stochastic_Actor",
"os.path.join",
"numpy.sum",
"ray.put",
"torch.no_grad",
"torch.zeros",
"torch.ones"
] | [((14321, 14356), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '""""""'], {}), "(locale.LC_ALL, '')\n", (14337, 14356), False, 'import locale, os\n'), ((14421, 14442), 'util.env.env_factory', 'env_factory', (['args.env'], {}), '(args.env)\n', (14432, 14442), False, 'from util.env import env_factory, train_normalizer\n'), ((14554, 14582), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (14571, 14582), False, 'import torch\n'), ((14585, 14610), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (14599, 14610), True, 'import numpy as np\n'), ((15671, 15762), 'util.env.train_normalizer', 'train_normalizer', (['policy', 'args.prenormalize_steps'], {'max_traj_len': 'args.traj_len', 'noise': '(1)'}), '(policy, args.prenormalize_steps, max_traj_len=args.\n traj_len, noise=1)\n', (15687, 15762), False, 'from util.env import env_factory, train_normalizer\n'), ((4819, 4834), 'copy.deepcopy', 'deepcopy', (['actor'], {}), '(actor)\n', (4827, 4834), False, 'from copy import deepcopy\n'), ((4853, 4869), 'copy.deepcopy', 'deepcopy', (['critic'], {}), '(critic)\n', (4861, 4869), False, 'from copy import deepcopy\n'), ((5600, 5624), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (5621, 5624), False, 'import torch\n'), ((6820, 6844), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (6841, 6844), False, 'import torch\n'), ((7505, 7524), 'numpy.mean', 'np.mean', (['ep_returns'], {}), '(ep_returns)\n', (7512, 7524), True, 'import numpy as np\n'), ((7639, 7654), 'copy.deepcopy', 'deepcopy', (['actor'], {}), '(actor)\n', (7647, 7654), False, 'from copy import deepcopy\n'), ((11296, 11302), 'time.time', 'time', ([], {}), '()\n', (11300, 11302), False, 'from time import time\n'), ((11446, 11554), 'ray.put', 'ray.put', (['[self.actor.welford_state_mean, self.actor.welford_state_mean_diff, self.\n actor.welford_state_n]'], {}), '([self.actor.welford_state_mean, self.actor.welford_state_mean_diff,\n self.actor.welford_state_n])\n', (11453, 11554), False, 'import ray\n'), ((11963, 11969), 'time.time', 'time', ([], {}), '()\n', (11967, 11969), False, 'from time import time\n'), ((12594, 12600), 'time.time', 'time', ([], {}), '()\n', (12598, 12600), False, 'from time import time\n'), ((12607, 12631), 'torch.set_num_threads', 'torch.set_num_threads', (['(4)'], {}), '(4)\n', (12628, 12631), False, 'import torch\n'), ((14620, 14642), 'torch.ones', 'torch.ones', (['action_dim'], {}), '(action_dim)\n', (14630, 14642), False, 'import torch\n'), ((14753, 14863), 'policies.actor.LSTM_Stochastic_Actor', 'LSTM_Stochastic_Actor', (['obs_dim', 'action_dim'], {'env_name': 'args.env', 'fixed_std': 'std', 'bounded': '(False)', 'layers': 'layers'}), '(obs_dim, action_dim, env_name=args.env, fixed_std=std,\n bounded=False, layers=layers)\n', (14774, 14863), False, 'from policies.actor import FF_Stochastic_Actor, LSTM_Stochastic_Actor, GRU_Stochastic_Actor, QBN_GRU_Stochastic_Actor\n'), ((14873, 14903), 'policies.critic.LSTM_V', 'LSTM_V', (['obs_dim'], {'layers': 'layers'}), '(obs_dim, layers=layers)\n', (14879, 14903), False, 'from policies.critic import FF_V, LSTM_V, GRU_V\n'), ((15954, 15973), 'util.log.create_logger', 'create_logger', (['args'], {}), '(args)\n', (15967, 15973), False, 'from util.log import create_logger\n'), ((16076, 16112), 'os.path.join', 'os.path.join', (['logger.dir', '"""actor.pt"""'], {}), "(logger.dir, 'actor.pt')\n", (16088, 16112), False, 'import locale, os\n'), ((16191, 16228), 'os.path.join', 'os.path.join', (['logger.dir', '"""critic.pt"""'], {}), "(logger.dir, 'critic.pt')\n", (16203, 16228), False, 'import locale, os\n'), ((1379, 1394), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (1385, 1394), True, 'import numpy as np\n'), ((1480, 1495), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1493, 1495), False, 'import torch\n'), ((1518, 1543), 'torch.Tensor', 'torch.Tensor', (['self.states'], {}), '(self.states)\n', (1530, 1543), False, 'import torch\n'), ((1565, 1591), 'torch.Tensor', 'torch.Tensor', (['self.actions'], {}), '(self.actions)\n', (1577, 1591), False, 'import torch\n'), ((1613, 1639), 'torch.Tensor', 'torch.Tensor', (['self.rewards'], {}), '(self.rewards)\n', (1625, 1639), False, 'import torch\n'), ((1661, 1687), 'torch.Tensor', 'torch.Tensor', (['self.returns'], {}), '(self.returns)\n', (1673, 1687), False, 'import torch\n'), ((1709, 1734), 'torch.Tensor', 'torch.Tensor', (['self.values'], {}), '(self.values)\n', (1721, 1734), False, 'import torch\n'), ((2455, 2511), 'torch.utils.data.sampler.BatchSampler', 'BatchSampler', (['random_indices', 'batch_size'], {'drop_last': '(True)'}), '(random_indices, batch_size, drop_last=True)\n', (2467, 2511), False, 'from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n'), ((3777, 3833), 'torch.utils.data.sampler.BatchSampler', 'BatchSampler', (['random_indices', 'batch_size'], {'drop_last': '(True)'}), '(random_indices, batch_size, drop_last=True)\n', (3789, 3833), False, 'from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n'), ((5634, 5649), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5647, 5649), False, 'import torch\n'), ((5665, 5671), 'time.time', 'time', ([], {}), '()\n', (5669, 5671), False, 'from time import time\n'), ((6854, 6869), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6867, 6869), False, 'import torch\n'), ((8285, 8305), 'ray.is_initialized', 'ray.is_initialized', ([], {}), '()\n', (8303, 8305), False, 'import ray\n'), ((8716, 8731), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8729, 8731), False, 'import torch\n'), ((9340, 9354), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (9351, 9354), False, 'import torch\n'), ((9844, 9850), 'time.time', 'time', ([], {}), '()\n', (9848, 9850), False, 'from time import time\n'), ((10478, 10492), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (10489, 10492), False, 'import torch\n'), ((10922, 10937), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10935, 10937), False, 'import torch\n'), ((12155, 12161), 'time.time', 'time', ([], {}), '()\n', (12159, 12161), False, 'from time import time\n'), ((12688, 12694), 'time.time', 'time', ([], {}), '()\n', (12692, 12694), False, 'from time import time\n'), ((13755, 13761), 'time.time', 'time', ([], {}), '()\n', (13759, 13761), False, 'from time import time\n'), ((13887, 13899), 'numpy.mean', 'np.mean', (['kls'], {}), '(kls)\n', (13894, 13899), True, 'import numpy as np\n'), ((13901, 13916), 'numpy.mean', 'np.mean', (['a_loss'], {}), '(a_loss)\n', (13908, 13916), True, 'import numpy as np\n'), ((13918, 13933), 'numpy.mean', 'np.mean', (['c_loss'], {}), '(c_loss)\n', (13925, 13933), True, 'import numpy as np\n'), ((13935, 13950), 'numpy.mean', 'np.mean', (['m_loss'], {}), '(m_loss)\n', (13942, 13950), True, 'import numpy as np\n'), ((13952, 13967), 'numpy.mean', 'np.mean', (['s_loss'], {}), '(s_loss)\n', (13959, 13967), True, 'import numpy as np\n'), ((14952, 15061), 'policies.actor.GRU_Stochastic_Actor', 'GRU_Stochastic_Actor', (['obs_dim', 'action_dim'], {'env_name': 'args.env', 'fixed_std': 'std', 'bounded': '(False)', 'layers': 'layers'}), '(obs_dim, action_dim, env_name=args.env, fixed_std=std,\n bounded=False, layers=layers)\n', (14972, 15061), False, 'from policies.actor import FF_Stochastic_Actor, LSTM_Stochastic_Actor, GRU_Stochastic_Actor, QBN_GRU_Stochastic_Actor\n'), ((15071, 15100), 'policies.critic.GRU_V', 'GRU_V', (['obs_dim'], {'layers': 'layers'}), '(obs_dim, layers=layers)\n', (15076, 15100), False, 'from policies.critic import FF_V, LSTM_V, GRU_V\n'), ((1781, 1787), 'time.time', 'time', ([], {}), '()\n', (1785, 1787), False, 'from time import time\n'), ((3021, 3060), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['states'], {'batch_first': '(False)'}), '(states, batch_first=False)\n', (3033, 3060), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3086, 3126), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['actions'], {'batch_first': '(False)'}), '(actions, batch_first=False)\n', (3098, 3126), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3151, 3191), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['returns'], {'batch_first': '(False)'}), '(returns, batch_first=False)\n', (3163, 3191), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3216, 3259), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['advantages'], {'batch_first': '(False)'}), '(advantages, batch_first=False)\n', (3228, 3259), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3281, 3323), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['traj_mask'], {'batch_first': '(False)'}), '(traj_mask, batch_first=False)\n', (3293, 3323), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((8352, 8386), 'ray.init', 'ray.init', ([], {'redis_address': 'args.redis'}), '(redis_address=args.redis)\n', (8360, 8386), False, 'import ray\n'), ((8411, 8442), 'ray.init', 'ray.init', ([], {'num_cpus': 'args.workers'}), '(num_cpus=args.workers)\n', (8419, 8442), False, 'import ray\n'), ((9864, 9879), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9877, 9879), False, 'import torch\n'), ((15152, 15266), 'policies.actor.QBN_GRU_Stochastic_Actor', 'QBN_GRU_Stochastic_Actor', (['obs_dim', 'action_dim'], {'env_name': 'args.env', 'fixed_std': 'std', 'bounded': '(False)', 'layers': 'layers'}), '(obs_dim, action_dim, env_name=args.env, fixed_std=\n std, bounded=False, layers=layers)\n', (15176, 15266), False, 'from policies.actor import FF_Stochastic_Actor, LSTM_Stochastic_Actor, GRU_Stochastic_Actor, QBN_GRU_Stochastic_Actor\n'), ((15275, 15304), 'policies.critic.GRU_V', 'GRU_V', (['obs_dim'], {'layers': 'layers'}), '(obs_dim, layers=layers)\n', (15280, 15304), False, 'from policies.critic import FF_V, LSTM_V, GRU_V\n'), ((18002, 18041), 'torch.save', 'torch.save', (['algo.actor', 'args.save_actor'], {}), '(algo.actor, args.save_actor)\n', (18012, 18041), False, 'import torch\n'), ((18096, 18137), 'torch.save', 'torch.save', (['algo.critic', 'args.save_critic'], {}), '(algo.critic, args.save_critic)\n', (18106, 18137), False, 'import torch\n'), ((2962, 2980), 'torch.ones_like', 'torch.ones_like', (['r'], {}), '(r)\n', (2977, 2980), False, 'import torch\n'), ((3563, 3609), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['mirror_states'], {'batch_first': '(False)'}), '(mirror_states, batch_first=False)\n', (3575, 3609), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((6239, 6258), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (6251, 6258), False, 'import torch\n'), ((6443, 6461), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (6451, 6461), True, 'import numpy as np\n'), ((7380, 7404), 'torch.Tensor', 'torch.Tensor', (['next_state'], {}), '(next_state)\n', (7392, 7404), False, 'import torch\n'), ((9588, 9618), 'torch.min', 'torch.min', (['cpi_loss', 'clip_loss'], {}), '(cpi_loss, clip_loss)\n', (9597, 9618), False, 'import torch\n'), ((15352, 15460), 'policies.actor.FF_Stochastic_Actor', 'FF_Stochastic_Actor', (['obs_dim', 'action_dim'], {'env_name': 'args.env', 'fixed_std': 'std', 'bounded': '(False)', 'layers': 'layers'}), '(obs_dim, action_dim, env_name=args.env, fixed_std=std,\n bounded=False, layers=layers)\n', (15371, 15460), False, 'from policies.actor import FF_Stochastic_Actor, LSTM_Stochastic_Actor, GRU_Stochastic_Actor, QBN_GRU_Stochastic_Actor\n'), ((15470, 15498), 'policies.critic.FF_V', 'FF_V', (['obs_dim'], {'layers': 'layers'}), '(obs_dim, layers=layers)\n', (15474, 15498), False, 'from policies.critic import FF_V, LSTM_V, GRU_V\n'), ((11814, 11820), 'time.time', 'time', ([], {}), '()\n', (11818, 11820), False, 'from time import time\n'), ((13651, 13663), 'numpy.mean', 'np.mean', (['kls'], {}), '(kls)\n', (13658, 13663), True, 'import numpy as np\n'), ((13665, 13680), 'numpy.mean', 'np.mean', (['a_loss'], {}), '(a_loss)\n', (13672, 13680), True, 'import numpy as np\n'), ((13682, 13697), 'numpy.mean', 'np.mean', (['c_loss'], {}), '(c_loss)\n', (13689, 13697), True, 'import numpy as np\n'), ((13629, 13635), 'time.time', 'time', ([], {}), '()\n', (13633, 13635), False, 'from time import time\n'), ((6664, 6683), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (6676, 6683), False, 'import torch\n'), ((10954, 10981), 'torch.distributions.kl_divergence', 'kl_divergence', (['pdf', 'old_pdf'], {}), '(pdf, old_pdf)\n', (10967, 10981), False, 'from torch.distributions import kl_divergence\n')] |
import decimal
from boto3.dynamodb.conditions import Key
from botocore.exceptions import ClientError
from . import dynamodb
TABLE_NAME = "linebot-happybirthdayleaderboard-leaderdoards"
INDEX_NAME = "group_id-amount-index"
def create_table():
try:
response = dynamodb.create_table(
TableName=TABLE_NAME,
KeySchema=[
{"AttributeName": "group_id", "KeyType": "HASH"},
{"AttributeName": "user_id", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "group_id", "AttributeType": "S"},
{"AttributeName": "user_id", "AttributeType": "S"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "group_id-amount-index",
"KeySchema": [
{"AttributeName": "group_id", "KeyType": "HASH"},
{"AttributeName": "amount", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
}
],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
)
except ClientError:
raise
else:
return response
def get_table():
try:
response = dynamodb.Table(TABLE_NAME)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
response = create_table()
else:
raise
return response
def update_amount(group_id: str, user_id: str):
table = get_table()
response = table.get_item(Key={"group_id": group_id, "user_id": user_id})
if "Item" not in response:
table.put_item(
Item={
"group_id": group_id,
"user_id": user_id,
"amount": decimal.Decimal(0),
}
)
table.update_item(
Key={"group_id": group_id, "user_id": user_id},
UpdateExpression="set amount = amount + :val",
ExpressionAttributeValues={":val": decimal.Decimal(1)},
ReturnValues="UPDATED_NEW",
)
def get_list_of_amount(group_id: str):
table = get_table()
return table.query(
IndexName=INDEX_NAME,
KeyConditionExpression=Key("group_id").eq(group_id),
ScanIndexForward=False,
)
| [
"decimal.Decimal",
"boto3.dynamodb.conditions.Key"
] | [((2080, 2098), 'decimal.Decimal', 'decimal.Decimal', (['(1)'], {}), '(1)\n', (2095, 2098), False, 'import decimal\n'), ((1859, 1877), 'decimal.Decimal', 'decimal.Decimal', (['(0)'], {}), '(0)\n', (1874, 1877), False, 'import decimal\n'), ((2293, 2308), 'boto3.dynamodb.conditions.Key', 'Key', (['"""group_id"""'], {}), "('group_id')\n", (2296, 2308), False, 'from boto3.dynamodb.conditions import Key\n')] |
import cmath
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.signal as signal
import csv
from .Periodogram import Periodogram
sns.set()
class AveragedPeriodogram():
def __init__(self):
self.f = None
self.default_f = np.linspace(0, 0.5, 500)
self.x = None
self.P = None
self.L = None
self.K = None
self.per = Periodogram()
def estimate(self, x, f=None, L=None):
'''
Estimates P_xx for given signal sequence x by dividing it into segment.
If the length of a segment is not given, it is by default equals to the length of x.
Args:
x (numpy array of doubles): Signal
f (numpy array of doubles in range [0, 0.5]): Frequence
L (integer): The length of each segment
'''
# Init values.
if f is None:
self.f = self.default_f
else:
self.f = f
self.x = x
self.N = len(self.x)
self.P = np.zeros(len(self.f))
if L is None or L > self.N:
self.L = self.N
else:
self.L = L
self.K = self.N // self.L
# Calculate P.
for num_segment in range(self.K):
x_segment = x[num_segment*self.L : (num_segment+1)*self.L]
self.per.estimate(x_segment, self.f)
self.P = np.add(self.P, self.per['P'])
self.P = self.P / self.K
def plot(self):
'''
Plots estimated P.
'''
# Check if anything is estimated.
if self.f is None or self.P is None:
return
plt.figure()
plt.semilogy(self.f, self.P)
plt.title('Averaged periodogram estimation')
plt.xlabel('f [Hz]')
plt.ylabel('P')
plt.show()
def compare(self, x, L=None):
'''
Compares with periodogram from scipy.signal by ploting them both.
Args:
x (numpy array of doubles): Signal
L (integer): The length of each segment
'''
f_per, P_per = signal.welch(x, scaling='spectrum', nperseg=L, noverlap=0, window='boxcar')
self.estimate(x, f_per, L)
# Plot them together.
plt.figure()
plt.semilogy(self.f, self.P, 'b', label='averaged periodogram')
plt.semilogy(f_per, P_per, 'r--', label='scipy.signal')
plt.legend()
plt.title('Averaged periodogram comparation')
plt.xlabel('f [Hz]')
plt.ylabel('P')
plt.ylim(bottom=1e-5)
plt.show()
def __getitem__(self, key):
'''
Returns the value for given key, or None if the key is not allowed.
'''
if key == 'f':
return self.f
if key == 'P':
return self.P
if key == 'x':
return self.x
if key == 'L':
return self.L
return None | [
"matplotlib.pyplot.semilogy",
"seaborn.set",
"scipy.signal.welch",
"numpy.add",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((165, 174), 'seaborn.set', 'sns.set', ([], {}), '()\n', (172, 174), True, 'import seaborn as sns\n'), ((277, 301), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)', '(500)'], {}), '(0, 0.5, 500)\n', (288, 301), True, 'import numpy as np\n'), ((1672, 1684), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1682, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1721), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.f', 'self.P'], {}), '(self.f, self.P)\n', (1705, 1721), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1774), 'matplotlib.pyplot.title', 'plt.title', (['"""Averaged periodogram estimation"""'], {}), "('Averaged periodogram estimation')\n", (1739, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (1793, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1827), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P"""'], {}), "('P')\n", (1822, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1844, 1846), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2209), 'scipy.signal.welch', 'signal.welch', (['x'], {'scaling': '"""spectrum"""', 'nperseg': 'L', 'noverlap': '(0)', 'window': '"""boxcar"""'}), "(x, scaling='spectrum', nperseg=L, noverlap=0, window='boxcar')\n", (2146, 2209), True, 'import scipy.signal as signal\n'), ((2295, 2307), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2305, 2307), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2379), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.f', 'self.P', '"""b"""'], {'label': '"""averaged periodogram"""'}), "(self.f, self.P, 'b', label='averaged periodogram')\n", (2328, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2388, 2443), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['f_per', 'P_per', '"""r--"""'], {'label': '"""scipy.signal"""'}), "(f_per, P_per, 'r--', label='scipy.signal')\n", (2400, 2443), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2464), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2462, 2464), True, 'import matplotlib.pyplot as plt\n'), ((2473, 2518), 'matplotlib.pyplot.title', 'plt.title', (['"""Averaged periodogram comparation"""'], {}), "('Averaged periodogram comparation')\n", (2482, 2518), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2547), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (2537, 2547), True, 'import matplotlib.pyplot as plt\n'), ((2556, 2571), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P"""'], {}), "('P')\n", (2566, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2602), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(1e-05)'}), '(bottom=1e-05)\n', (2588, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2610, 2620), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2618, 2620), True, 'import matplotlib.pyplot as plt\n'), ((1418, 1447), 'numpy.add', 'np.add', (['self.P', "self.per['P']"], {}), "(self.P, self.per['P'])\n", (1424, 1447), True, 'import numpy as np\n')] |
import random
import colorama
colorama.init()
d1 = random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
d2 = random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
d3 = random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
d4 = random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
d5 = random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
d6 = random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
print('-='*50)
qdd = int(input('Você quer um código de 5 ou 6 dígitos? ')) #Quantidade de dígitos
if qdd == 5:
print(f'\033[32mLink da print gerada:\033[m prnt.sc/' + d1 + d2 + d3 + d4 + d5)
else:
print(f'\033[32mLink da print gerada:\033[m prnt.sc/' + d1 + d2 + d3 + d4 + d5 + d6)
print('-='*50) | [
"random.choice",
"colorama.init"
] | [((30, 45), 'colorama.init', 'colorama.init', ([], {}), '()\n', (43, 45), False, 'import colorama\n'), ((52, 131), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"""'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n", (65, 131), False, 'import random\n'), ((137, 216), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"""'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n", (150, 216), False, 'import random\n'), ((222, 301), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"""'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n", (235, 301), False, 'import random\n'), ((307, 386), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"""'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n", (320, 386), False, 'import random\n'), ((392, 471), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"""'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n", (405, 471), False, 'import random\n'), ((477, 556), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"""'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n", (490, 556), False, 'import random\n')] |
# Generated by Django 3.2.5 on 2021-07-02 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0008_auto_20210702_2042'),
]
operations = [
migrations.AlterField(
model_name='usercomment',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| [
"django.db.models.AutoField"
] | [((338, 389), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (354, 389), False, 'from django.db import migrations, models\n')] |
import asyncio
loop = asyncio.get_event_loop()
async def hello():
print("Hello")
await asyncio.sleep(3)
print("World!")
if __name__ == '__main__':
loop.run_until_complete(hello())
| [
"asyncio.get_event_loop",
"asyncio.sleep"
] | [((24, 48), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (46, 48), False, 'import asyncio\n'), ((99, 115), 'asyncio.sleep', 'asyncio.sleep', (['(3)'], {}), '(3)\n', (112, 115), False, 'import asyncio\n')] |
import csv
def write_into_csv(info_list):
with open ('student_info.csv', 'a', newline='')as csv_file:
writer=csv.writer(csv_file)
if csv_file.tell() == 0:
writer.writerow(["Name", "Age", "Contact_num", "Email-id"])
writer.writerow(info_list)
condition=True
while(condition):
student_info=input("enter student info in the following format(Name Age Contact_num Email-id) :")
print("Entered student info:"+ student_info)
student_info_list=student_info.split(' ')
print("\nThe entered info :" + "\nName:"+ student_info_list[0] +"\nAge:"+student_info_list[1] +"\nContact_num:"+student_info_list[2] +"\nEmail-id:"+student_info_list[3])
check_info=input("Is the entered info correct?(yes/no):")
if check_info=="yes":
write_into_csv(student_info_list)
condition_check=input("do you want to enter info for another student(yes/no):")
if condition_check=="yes":
condition =True
elif condition_check=="no":
condition=False
elif check_info=="no":
print("please enter the correct info")
| [
"csv.writer"
] | [((130, 150), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (140, 150), False, 'import csv\n')] |
import argparse
import json
import shutil
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("--python_path", type=str, required=True)
parser.add_argument("--display_name", type=str, required=True)
parser.add_argument("--kernel_dir", type=str, required=True)
args = parser.parse_args()
kernel_dir = Path.home() / "miniconda3/share/jupyter/kernels" / args.kernel_dir
kernel_dir.mkdir(exist_ok=False)
kernel = json.dumps(
{
"argv": [
args.python_path,
"-m",
"ipykernel_launcher",
"-f",
"{connection_file}",
],
"display_name": args.display_name,
"language": "python",
}
)
with open(kernel_dir / "kernel.json", "wt") as fp:
fp.write(kernel)
for logo in ["logo-32x32.png", "logo-64x64.png"]:
shutil.copyfile(logo, kernel_dir / logo)
print("Kernel installed.")
print("Do not forget to run `pip install --upgrade ipykernel` in the new environment.")
| [
"shutil.copyfile",
"json.dumps",
"pathlib.Path.home",
"argparse.ArgumentParser"
] | [((77, 102), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (100, 102), False, 'import argparse\n'), ((440, 600), 'json.dumps', 'json.dumps', (["{'argv': [args.python_path, '-m', 'ipykernel_launcher', '-f',\n '{connection_file}'], 'display_name': args.display_name, 'language':\n 'python'}"], {}), "({'argv': [args.python_path, '-m', 'ipykernel_launcher', '-f',\n '{connection_file}'], 'display_name': args.display_name, 'language':\n 'python'})\n", (450, 600), False, 'import json\n'), ((829, 869), 'shutil.copyfile', 'shutil.copyfile', (['logo', '(kernel_dir / logo)'], {}), '(logo, kernel_dir / logo)\n', (844, 869), False, 'import shutil\n'), ((330, 341), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (339, 341), False, 'from pathlib import Path\n')] |
from django.db import models
# Create your models here.
class Car(models.Model):
owner_name = models.CharField(max_length=200)
car = models.CharField(max_length=200)
entry_number = models.IntegerField(default=0, unique=True)
def __str__(self):
return self.owner_name + "'s " + self.car
class Category(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Division(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Poll(models.Model):
car = models.ForeignKey(Car, on_delete=models.CASCADE)
division = models.ForeignKey(Division, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
votes = models.IntegerField(default=0)
class Meta:
ordering = ('votes',)
def __str__(self):
return self.division.name + " " + self.category.name + " " + self.car.owner_name + "'s " + self.car.car
class Show_public(models.Model):
show = models.BooleanField(default=False)
def __str__(self):
return str(self.show) | [
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.IntegerField"
] | [((100, 132), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (116, 132), False, 'from django.db import models\n'), ((143, 175), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (159, 175), False, 'from django.db import models\n'), ((195, 238), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'unique': '(True)'}), '(default=0, unique=True)\n', (214, 238), False, 'from django.db import models\n'), ((356, 388), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (372, 388), False, 'from django.db import models\n'), ((485, 517), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (501, 517), False, 'from django.db import models\n'), ((604, 652), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Car'], {'on_delete': 'models.CASCADE'}), '(Car, on_delete=models.CASCADE)\n', (621, 652), False, 'from django.db import models\n'), ((668, 721), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Division'], {'on_delete': 'models.CASCADE'}), '(Division, on_delete=models.CASCADE)\n', (685, 721), False, 'from django.db import models\n'), ((737, 790), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'on_delete': 'models.CASCADE'}), '(Category, on_delete=models.CASCADE)\n', (754, 790), False, 'from django.db import models\n'), ((803, 833), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (822, 833), False, 'from django.db import models\n'), ((1063, 1097), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1082, 1097), False, 'from django.db import models\n')] |
import telebot
from config import Config
from logs_parser import get_logs_paths, does_this_log_filepath_exists, LogsData, all_days_stats_telegram_format
config = Config()
bot = telebot.TeleBot(config["telegram_bot_token"], parse_mode=None)
@bot.message_handler(commands=['start', 'help'])
def support(message):
bot.send_message(message.chat.id, "List all available log files: /list\nNumber of plotted blocks per day: /stats")
def generate_receptions_markup():
markup = telebot.types.InlineKeyboardMarkup()
for shortened_path, full_path in get_logs_paths():
markup.row(telebot.types.InlineKeyboardButton(shortened_path, callback_data=full_path))
return markup
@bot.message_handler(commands=['list'])
def list_logs_files(message):
bot.send_message(message.chat.id, "Select log files you would like to observe",
reply_markup=generate_receptions_markup())
@bot.message_handler(commands=['stats'])
def stats(message):
bot.send_message(message.chat.id, "Number of plots created:\n" + all_days_stats_telegram_format())
@bot.callback_query_handler(func=lambda query: does_this_log_filepath_exists(query.data))
def send_stats(query):
bot.send_message(
query.message.json['chat']['id'],
"```\n" + f"{query.data}:\n" + LogsData(query.data).telegram_format() + "\n```",
parse_mode='MarkdownV2',
)
if __name__ == '__main__':
bot.polling(interval=config["polling_interval"])
| [
"config.Config",
"logs_parser.get_logs_paths",
"telebot.types.InlineKeyboardButton",
"logs_parser.LogsData",
"telebot.types.InlineKeyboardMarkup",
"telebot.TeleBot",
"logs_parser.all_days_stats_telegram_format",
"logs_parser.does_this_log_filepath_exists"
] | [((164, 172), 'config.Config', 'Config', ([], {}), '()\n', (170, 172), False, 'from config import Config\n'), ((180, 242), 'telebot.TeleBot', 'telebot.TeleBot', (["config['telegram_bot_token']"], {'parse_mode': 'None'}), "(config['telegram_bot_token'], parse_mode=None)\n", (195, 242), False, 'import telebot\n'), ((484, 520), 'telebot.types.InlineKeyboardMarkup', 'telebot.types.InlineKeyboardMarkup', ([], {}), '()\n', (518, 520), False, 'import telebot\n'), ((558, 574), 'logs_parser.get_logs_paths', 'get_logs_paths', ([], {}), '()\n', (572, 574), False, 'from logs_parser import get_logs_paths, does_this_log_filepath_exists, LogsData, all_days_stats_telegram_format\n'), ((595, 670), 'telebot.types.InlineKeyboardButton', 'telebot.types.InlineKeyboardButton', (['shortened_path'], {'callback_data': 'full_path'}), '(shortened_path, callback_data=full_path)\n', (629, 670), False, 'import telebot\n'), ((1042, 1074), 'logs_parser.all_days_stats_telegram_format', 'all_days_stats_telegram_format', ([], {}), '()\n', (1072, 1074), False, 'from logs_parser import get_logs_paths, does_this_log_filepath_exists, LogsData, all_days_stats_telegram_format\n'), ((1125, 1166), 'logs_parser.does_this_log_filepath_exists', 'does_this_log_filepath_exists', (['query.data'], {}), '(query.data)\n', (1154, 1166), False, 'from logs_parser import get_logs_paths, does_this_log_filepath_exists, LogsData, all_days_stats_telegram_format\n'), ((1294, 1314), 'logs_parser.LogsData', 'LogsData', (['query.data'], {}), '(query.data)\n', (1302, 1314), False, 'from logs_parser import get_logs_paths, does_this_log_filepath_exists, LogsData, all_days_stats_telegram_format\n')] |
from pymongo import MongoClient
def __get_db_handle():
client = MongoClient("localhost", 27017)
db_handle = client['GoBDB']
return db_handle, client
def get_categories():
(gobDb, client) = __get_db_handle()
jobCollection = gobDb["Jobs"]
features = jobCollection.aggregate([
{
'$project': {
'category': '$attributes.category_name',
}
}
])
return [*features]
def get_seniority():
(gobDb, client) = __get_db_handle()
jobCollection = gobDb["Jobs"]
features = jobCollection.aggregate([
{
'$project': {
'seniority': '$attributes.seniority.data.attributes.name'
}
}
])
return [*features]
def get_categories_and_seniorities():
(gobDb, client) = __get_db_handle()
jobCollection = gobDb["Jobs"]
features = jobCollection.aggregate([
{
'$project': {
'category': '$attributes.category_name',
'seniority': '$attributes.seniority.data.attributes.name'
}
}
])
return [*features]
def get_categories_and_tags():
(gobDb, client) = __get_db_handle()
jobCollection = gobDb["Jobs"]
features = jobCollection.aggregate([
{
'$project': {
'category': '$attributes.category_name',
'tags': '$attributes.tags.data'
}
}
])
return [*features] | [
"pymongo.MongoClient"
] | [((69, 100), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (80, 100), False, 'from pymongo import MongoClient\n')] |
import bs4
from bs4 import BeautifulSoup
import requests
import lxml
import time
from lxml import etree
import PostBenben
import os
from goto import with_goto
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
version = 'Luogu Benben v1.5\n按Ctrl+C发犇犇\n_______________________________________\n'
import signal
def signal_handler(signal,frame):
PostBenben.post()
signal.signal(signal.SIGINT,signal_handler)
"""
def get_proxy():
return requests.get("http://192.168.3.11:5010/get/").content
def delete_proxy(proxy):
requests.get("http://192.168.3.11:5010/delete/?proxy={}".format(proxy))
def getHtml():
retry_count = 10
proxy = get_proxy()
while retry_count > 0:
try:
html = requests.get(url = 'https://www.luogu.org/feed/all', headers = headers, timeout = 10 ,proxies={"http": "http://{}".format(proxy)})
# 使用代理访问
return html
except Exception:
retry_count -= 1
# 出错5次, 删除代理池中代理
delete_proxy(proxy)
return None
"""
def getHtml():
html = requests.get(url = 'https://www.luogu.org/feed/all', headers = headers, timeout = 10 )
return html
@with_goto
def get():
Comment_last = 0
'''
global Comment1
global Comment2
global Comment3
global Comment4
global Comment5
global Comment6
global Comment7
global Comment8
global Comment9
global Comment10
Comment1 = ""
Comment2 = ""
Comment3 = ""
Comment4 = ""
Comment5 = ""
Comment6 = ""
Comment7 = ""
Comment8 = ""
Comment9 = ""
Comment10 = ""
'''
label .start
response = getHtml()
content = response.content
html_raw = content.decode()
html = etree.HTML(html_raw)
name = html.xpath('/html/body/li[1]/div[2]/header/div/span/a')
for i in name:
Name = i.text
ttime = html.xpath('/html/body/li[1]/div[2]/header/div/text()')
for i in ttime:
Time = i
bsdata = bs4.BeautifulSoup(content, 'html.parser')
comment_raw = bsdata.find("span",class_="feed-comment")
Comment = comment_raw.get_text()
'''
comment1 = html.xpath('/html/body/li[1]/div[2]/div/span/p')
for i in comment1:
Comment1 = i.text
comment2 = html.xpath('/html/body/li[1]/div[2]/div/span/p/a[1]')
for i in comment2:
Comment2 = i.text
comment3 = html.xpath('/html/body/li[1]/div[2]/div/span/p/text()[2]')
for i in comment3:
Comment3 = i
comment4 = html.xpath('/html/body/li[1]/div[2]/div/span/p/a[2]')
for i in comment4:
Comment4 = i.text
comment5 = html.xpath('/html/body/li[1]/div[2]/div/span/p/text()[3]')
for i in comment5:
Comment5 = i
comment6 = html.xpath('/html/body/li[1]/div[2]/div/span/p/a[3]')
for i in comment6:
Comment6 = i.text
comment7 = html.xpath('/html/body/li[1]/div[2]/div/span/p/text()[4]')
for i in comment7:
Comment7 = i
comment8 = html.xpath('/html/body/li[1]/div[2]/div/span/p/a[5]')
for i in comment8:
Comment8 = i.text
comment9 = html.xpath('/html/body/li[1]/div[2]/div/span/p/text()[5]')
for i in comment9:
Comment9 = i
Comment = '%s%s%s%s%s%s%s%s%s' % (Comment1, Comment2, Comment3 , Comment4 , Comment5, Comment6, Comment7, Comment8, Comment9)
'''
if(Comment==Comment_last):
time.sleep(1)
else:
print('\n')
print(Name + Time)
print(Comment)
time.sleep(1)
Comment_last = Comment
goto .start
os.system('cls')
# 等待代理池
# time.sleep(2)
print(version)
get()
os.system('cls')
| [
"signal.signal",
"requests.get",
"time.sleep",
"bs4.BeautifulSoup",
"lxml.etree.HTML",
"PostBenben.post",
"os.system"
] | [((677, 721), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (690, 721), False, 'import signal\n'), ((3813, 3829), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (3822, 3829), False, 'import os\n'), ((3877, 3893), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (3886, 3893), False, 'import os\n'), ((657, 674), 'PostBenben.post', 'PostBenben.post', ([], {}), '()\n', (672, 674), False, 'import PostBenben\n'), ((1354, 1433), 'requests.get', 'requests.get', ([], {'url': '"""https://www.luogu.org/feed/all"""', 'headers': 'headers', 'timeout': '(10)'}), "(url='https://www.luogu.org/feed/all', headers=headers, timeout=10)\n", (1366, 1433), False, 'import requests\n'), ((2024, 2044), 'lxml.etree.HTML', 'etree.HTML', (['html_raw'], {}), '(html_raw)\n', (2034, 2044), False, 'from lxml import etree\n'), ((2271, 2312), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (2288, 2312), False, 'import bs4\n'), ((3751, 3764), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3761, 3764), False, 'import time\n'), ((3653, 3666), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3663, 3666), False, 'import time\n')] |
from selenium import webdriver
DIRECTORY = 'reports'
CLONE_DIRECTORY = '../amazon-clone/src'
NAME = 'Laptop'
CURRENCY = '€'
MIN_PRICE = '275'
MAX_PRICE = '650'
FILTERS = {
'min': MIN_PRICE,
'max': MAX_PRICE
}
BASE_URL = "http://www.amazon.de/"
#gets our actual webdriver, returns webdriver of chrome
def get_chrome_web_driver(options):
return webdriver.Chrome('./chromedriver.exe', chrome_options=options)
#again, for our options
def get_web_driver_options():
return webdriver.ChromeOptions()
#run browser in incognito mode, ignoring certificate errors
def set_ignore_certificate_error(options):
options.add_argument('--ignore-certificate-errors')
def set_browser_as_icognito(options):
options.add_argument('--incognito')
| [
"selenium.webdriver.Chrome",
"selenium.webdriver.ChromeOptions"
] | [((357, 419), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""./chromedriver.exe"""'], {'chrome_options': 'options'}), "('./chromedriver.exe', chrome_options=options)\n", (373, 419), False, 'from selenium import webdriver\n'), ((486, 511), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (509, 511), False, 'from selenium import webdriver\n')] |
import os
import numpy as np
import configure_finetuning
from finetune import task, feature_spec
import tensorflow.compat.v1 as tf
import pandas as pd
from finetune.classification import classification_metrics
class SentimentExample(task.Example):
def __init__(self, eid, input_ids, targets):
super(SentimentExample, self).__init__('sentiment')
self.eid = eid
self.input_ids = input_ids
self.targets = targets
class SentimentTask(task.Task):
def __init__(self, config: configure_finetuning.FinetuningConfig):
super().__init__(config, "sentiment")
self.n_outputs = 18
self.pad_token_id = 1
def get_prediction_module(self, bert_model, features, is_training, percent_done):
reprs = bert_model.get_pooled_output()
if is_training: reprs = tf.nn.dropout(reprs, keep_prob=0.9)
predictions = tf.layers.dense(reprs, self.n_outputs)
targets = features["targets"]
losses = tf.keras.losses.mean_absolute_error(targets, predictions)
outputs = dict(
loss=losses,
predictions=predictions,
targets=targets,
input_ids=features['input_ids'],
eid=features["eid"]
)
return losses, outputs
def get_scorer(self):
return classification_metrics.RegressionScorer()
def get_examples(self, split):
table = pd.read_parquet(os.path.join('./sentiment_dataset', split))
for i, row in table.iterrows():
eid, input_ids, labels = row[['_id', 'input_ids', 'labels']]
yield SentimentExample(eid, input_ids, labels)
def featurize(self, example: SentimentExample, is_training, log=False):
input_len = min(len(example.input_ids), self.config.max_seq_length)
# Pad the input ids
input_ids = np.full(shape=self.config.max_seq_length, fill_value=self.pad_token_id)
input_ids[:input_len] = example.input_ids[:input_len]
# Create a attaion mask
input_mask = np.zeros((self.config.max_seq_length,), dtype=np.int)
input_mask[:input_len] = 1
return {
"input_ids": input_ids,
"input_mask": input_mask,
"targets": example.targets,
"eid": example.eid,
"task_id": self.config.task_names.index(self.name),
}
def get_feature_specs(self):
return [
feature_spec.FeatureSpec("eid", []),
feature_spec.FeatureSpec("targets", [self.n_outputs], is_int_feature=False),
]
| [
"tensorflow.compat.v1.keras.losses.mean_absolute_error",
"tensorflow.compat.v1.layers.dense",
"os.path.join",
"numpy.zeros",
"finetune.feature_spec.FeatureSpec",
"finetune.classification.classification_metrics.RegressionScorer",
"numpy.full",
"tensorflow.compat.v1.nn.dropout"
] | [((885, 923), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['reprs', 'self.n_outputs'], {}), '(reprs, self.n_outputs)\n', (900, 923), True, 'import tensorflow.compat.v1 as tf\n'), ((979, 1036), 'tensorflow.compat.v1.keras.losses.mean_absolute_error', 'tf.keras.losses.mean_absolute_error', (['targets', 'predictions'], {}), '(targets, predictions)\n', (1014, 1036), True, 'import tensorflow.compat.v1 as tf\n'), ((1312, 1353), 'finetune.classification.classification_metrics.RegressionScorer', 'classification_metrics.RegressionScorer', ([], {}), '()\n', (1351, 1353), False, 'from finetune.classification import classification_metrics\n'), ((1839, 1910), 'numpy.full', 'np.full', ([], {'shape': 'self.config.max_seq_length', 'fill_value': 'self.pad_token_id'}), '(shape=self.config.max_seq_length, fill_value=self.pad_token_id)\n', (1846, 1910), True, 'import numpy as np\n'), ((2026, 2079), 'numpy.zeros', 'np.zeros', (['(self.config.max_seq_length,)'], {'dtype': 'np.int'}), '((self.config.max_seq_length,), dtype=np.int)\n', (2034, 2079), True, 'import numpy as np\n'), ((826, 861), 'tensorflow.compat.v1.nn.dropout', 'tf.nn.dropout', (['reprs'], {'keep_prob': '(0.9)'}), '(reprs, keep_prob=0.9)\n', (839, 861), True, 'import tensorflow.compat.v1 as tf\n'), ((1422, 1464), 'os.path.join', 'os.path.join', (['"""./sentiment_dataset"""', 'split'], {}), "('./sentiment_dataset', split)\n", (1434, 1464), False, 'import os\n'), ((2416, 2451), 'finetune.feature_spec.FeatureSpec', 'feature_spec.FeatureSpec', (['"""eid"""', '[]'], {}), "('eid', [])\n", (2440, 2451), False, 'from finetune import task, feature_spec\n'), ((2465, 2540), 'finetune.feature_spec.FeatureSpec', 'feature_spec.FeatureSpec', (['"""targets"""', '[self.n_outputs]'], {'is_int_feature': '(False)'}), "('targets', [self.n_outputs], is_int_feature=False)\n", (2489, 2540), False, 'from finetune import task, feature_spec\n')] |
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from www.apps.bookmark.models import Bookmark
from ..models import JobPost
def get_employer_jobpost(profile):
""" """
jobs = None
if profile.is_employer:
jobs = JobPost.objects.filter(user=profile.user).order_by('title', 'employment_option', 'is_active')
return jobs
def get_employer_jobpost_public_view(profile):
""" Returns all jobs post if profile is active and an employer """
jobs = None
if profile.is_enabled:
jobs = get_employer_jobpost(profile)
return jobs
def update_employer_job_status(profile):
if profile.is_employer:
jobs = JobPost.objects.filter(user=profile.user)
if jobs:
profile.posted_jobs = True
else:
profile.posted_jobs = False
profile.save()
def bookmark_job(user, job):
""" Bookmark a job for user """
bookmark = Bookmark.objects.create_bookmark(user, job)
return bookmark
def get_job_bookmarks(user):
""" get all job bookmarks for user """
bookmarks = None
try:
bookmarks = Bookmark.objects.get_all_bookmarks_for_this_model_for_this_user(user, JobPost)
except:
pass
return bookmarks
def del_job_bookmark(user, job):
""" delete a job bookmark for this user """
Bookmark.objects.delete_bookmark(user, job)
return
| [
"www.apps.bookmark.models.Bookmark.objects.create_bookmark",
"www.apps.bookmark.models.Bookmark.objects.get_all_bookmarks_for_this_model_for_this_user",
"www.apps.bookmark.models.Bookmark.objects.delete_bookmark"
] | [((956, 999), 'www.apps.bookmark.models.Bookmark.objects.create_bookmark', 'Bookmark.objects.create_bookmark', (['user', 'job'], {}), '(user, job)\n', (988, 999), False, 'from www.apps.bookmark.models import Bookmark\n'), ((1358, 1401), 'www.apps.bookmark.models.Bookmark.objects.delete_bookmark', 'Bookmark.objects.delete_bookmark', (['user', 'job'], {}), '(user, job)\n', (1390, 1401), False, 'from www.apps.bookmark.models import Bookmark\n'), ((1144, 1222), 'www.apps.bookmark.models.Bookmark.objects.get_all_bookmarks_for_this_model_for_this_user', 'Bookmark.objects.get_all_bookmarks_for_this_model_for_this_user', (['user', 'JobPost'], {}), '(user, JobPost)\n', (1207, 1222), False, 'from www.apps.bookmark.models import Bookmark\n')] |
from setuptools import setup, find_packages
setup(
name='3PNN',
version='1.0.0',
description='Modelling 3D printed cochlea EFI with neural network.',
license='BSD 3-clause license',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=find_packages(include=('method')),
install_requires=[
'tensorflow==2.1.0',
'pints @ git+https://github.com/pints-team/pints@6e30367e07c7be0888c8e051d0e83a8cbebdb2cb#egg=pints',
'SALib==1.4.0.1',
'scikit-learn==0.24.0',
'seaborn==0.11.1',
],
)
| [
"setuptools.find_packages"
] | [((269, 300), 'setuptools.find_packages', 'find_packages', ([], {'include': '"""method"""'}), "(include='method')\n", (282, 300), False, 'from setuptools import setup, find_packages\n')] |
try: #import necessary modules
print("importing modules...")
from gdrive.drivePublisher import Gpublisher
from slack.slackPublisher import Spublisher
from camera.cam import Camera
from datalog.log import Log
import datetime
import os
import cv2
import numpy as np
import time
print("done")
except Exception as e: #handle import errors
print("error importing modules")
print(e)
exit()
try: #initialize component classes
print("initializing modules...")
camera = Camera()
slack = Spublisher()
drive = Gpublisher()
log = Log()
print("done")
except Exception as e: #handle initialization errors
print("error initializing modules")
print(e)
exit()
def closeAll(): #soft exit method
log.printWrite("stopping MailBot")
exit()
def Upload(frame, face): #upload method
imagePath = "camera/image/image.jpg" #location for temporary image storage
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") #time stamp for image
camera.saveFrame(frame, imagePath) #save frame to disk
fileID = drive.upload(imagePath, date) #upload frame to gdrive
URL = "https://drive.google.com/uc?id=" + str(fileID) #convert gdrive fileID into embeddable URL
if face: #post image to slack using URL
slack.post(URL, message="Mail-Person Spotted! Face Found!", name=date)
else:
slack.post(URL, message="Mail-Person Spotted!", name=date)
os.remove(imagePath) #remove frame from disk
log.printWrite("uploaded image")
log.printWrite("starting main loop")
#timer variables
timer = 0 #timer counter AKA the tick
timerConstant = 1 #timer delay time AKA the tock
isOpen = False #state detection variables
doorState1 = False #current door state - true = open, false = closed
doorState2 = False #previous doorState1 value
openFrames=[] #frame buffer for all frames captured while door is open
startDetection = False
faceDetected = False
faceArray = []
camera.minimumConfidence = 0.96
while True: #main loop
try:
frame = camera.getFrame() #get frame from camera
gray = camera.convertGray(frame) #convert to grayscale
if timer <= 0: #continue of there is no delay
average = camera.averageGraySpace(gray) #calculate average light-level of the frame
if average > 100: #detect is door is open
doorState1 = True
openFrames.append(frame)
else:
doorState1 = False
if doorState2 == doorState1 and doorState2 == True: #see if door is open
isOpen = True
elif doorState2 == doorState1 and doorState2 == False: #see if door has just been closed
if isOpen == True:
startDetection = True
isOpen = False
if startDetection == True: #begin face detection
log.printWrite("starting search: " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
for f in openFrames: #use frames stored in buffer
log.printWrite("searching for faces...")
faces = camera.getFaces(f) #detect faces in gray frame
kill = False
for i in range(0, faces.shape[2]):
confidence = faces[0, 0, i, 2]
if confidence > camera.minimumConfidence and kill == False:
log.printWrite("face found "+ str(confidence))
(h, w) = f.shape[:2]
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(f, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(f, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
faceDetected = True
faceArray.append([f,confidence])
kill = True
elif (kill == True):
break
if (kill == True):
break
if faceDetected == False: #if no faces are found upload anyways
log.printWrite("no face found")
median = int(len(openFrames)/2) #find middle frame
log.printWrite("uploading image")
Upload(openFrames[median], False) #upload image to the internet
timer = 30 #wait 30 seconds before attempting to capture again
log.printWrite("delay "+ str(timer) + " seconds")
else:
highestConfidence = 0
bestFrame = None
for i in range(len(faceArray)):
if faceArray[i][1] > highestConfidence:
highestConfidence = faceArray[i][1]
bestFrame = faceArray[i][0]
log.printWrite("uploading image")
log.printWrite("max confidence: " + str(highestConfidence))
Upload(bestFrame, True)
timer = 30
log.printWrite("delay " + str(timer) + " seconds")
faces=None
faceArray=[]
openFrames=[] #clear frame buffer
faceDetected = False #reset detection variables
startDetection = False
doorState2 = doorState1 #update previous door status before beginning next loop
else: #if timer is set delay a given amount of time
time.sleep(timerConstant) #total delay time = timmerConstant * timer
timer-=1
except KeyboardInterrupt: #handle keyboard interrupts
log.printWrite("keyboard interrupt")
break #stop loop
except Exception as e: #handle misc errors
log.printWrite("error")
log.printWrite(str(e))
break #stop loop
closeAll() #exit safely
| [
"camera.cam.Camera",
"slack.slackPublisher.Spublisher",
"cv2.rectangle",
"datalog.log.Log",
"time.sleep",
"cv2.putText",
"datetime.datetime.now",
"numpy.array",
"gdrive.drivePublisher.Gpublisher",
"os.remove"
] | [((709, 717), 'camera.cam.Camera', 'Camera', ([], {}), '()\n', (715, 717), False, 'from camera.cam import Camera\n'), ((730, 742), 'slack.slackPublisher.Spublisher', 'Spublisher', ([], {}), '()\n', (740, 742), False, 'from slack.slackPublisher import Spublisher\n'), ((755, 767), 'gdrive.drivePublisher.Gpublisher', 'Gpublisher', ([], {}), '()\n', (765, 767), False, 'from gdrive.drivePublisher import Gpublisher\n'), ((778, 783), 'datalog.log.Log', 'Log', ([], {}), '()\n', (781, 783), False, 'from datalog.log import Log\n'), ((1986, 2006), 'os.remove', 'os.remove', (['imagePath'], {}), '(imagePath)\n', (1995, 2006), False, 'import os\n'), ((1321, 1344), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1342, 1344), False, 'import datetime\n'), ((7657, 7682), 'time.sleep', 'time.sleep', (['timerConstant'], {}), '(timerConstant)\n', (7667, 7682), False, 'import time\n'), ((5384, 5448), 'cv2.rectangle', 'cv2.rectangle', (['f', '(startX, startY)', '(endX, endY)', '(0, 0, 255)', '(2)'], {}), '(f, (startX, startY), (endX, endY), (0, 0, 255), 2)\n', (5397, 5448), False, 'import cv2\n'), ((5477, 5563), 'cv2.putText', 'cv2.putText', (['f', 'text', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(f, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, \n 255), 2)\n', (5488, 5563), False, 'import cv2\n'), ((5103, 5125), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (5111, 5125), True, 'import numpy as np\n'), ((4305, 4328), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4326, 4328), False, 'import datetime\n')] |
import numpy as np
def hausdorff_dimension(cloudmask):
if not len(cloudmask.shape) == 2:
raise NotImplementedError("Can only calculate Hausdorff fractal"
" dimension of 2D arrays")
collapsed = np.argwhere(cloudmask > 0)
Lx, Ly = cloudmask.shape
# computing the fractal dimension
# considering only scales in a logarithmic list
scales=np.logspace(0.01, 10, num=10, endpoint=False, base=1.8)
Ns= np.zeros_like(scales)
# looping over several scales
for i, scale in enumerate(scales):
try:
bins = (np.arange(0,Lx,scale),np.arange(0,Ly,scale))
H, edges = np.histogramdd(collapsed, bins=bins)
Ns[i] = np.sum(H>0)
except Exception as e:
Ns = Ns[:i]
scales = scales[:i]
raise
break
# linear fit, polynomial of degree 1
data =np.polyfit(x=np.log(scales), y=np.log(Ns), deg=1, full=True)
coeffs, residuals, rank, singular_values, rcond = data
return -coeffs[0]
| [
"numpy.histogramdd",
"numpy.log",
"numpy.sum",
"numpy.argwhere",
"numpy.logspace",
"numpy.zeros_like",
"numpy.arange"
] | [((245, 271), 'numpy.argwhere', 'np.argwhere', (['(cloudmask > 0)'], {}), '(cloudmask > 0)\n', (256, 271), True, 'import numpy as np\n'), ((403, 458), 'numpy.logspace', 'np.logspace', (['(0.01)', '(10)'], {'num': '(10)', 'endpoint': '(False)', 'base': '(1.8)'}), '(0.01, 10, num=10, endpoint=False, base=1.8)\n', (414, 458), True, 'import numpy as np\n'), ((468, 489), 'numpy.zeros_like', 'np.zeros_like', (['scales'], {}), '(scales)\n', (481, 489), True, 'import numpy as np\n'), ((664, 700), 'numpy.histogramdd', 'np.histogramdd', (['collapsed'], {'bins': 'bins'}), '(collapsed, bins=bins)\n', (678, 700), True, 'import numpy as np\n'), ((721, 734), 'numpy.sum', 'np.sum', (['(H > 0)'], {}), '(H > 0)\n', (727, 734), True, 'import numpy as np\n'), ((924, 938), 'numpy.log', 'np.log', (['scales'], {}), '(scales)\n', (930, 938), True, 'import numpy as np\n'), ((942, 952), 'numpy.log', 'np.log', (['Ns'], {}), '(Ns)\n', (948, 952), True, 'import numpy as np\n'), ((596, 619), 'numpy.arange', 'np.arange', (['(0)', 'Lx', 'scale'], {}), '(0, Lx, scale)\n', (605, 619), True, 'import numpy as np\n'), ((618, 641), 'numpy.arange', 'np.arange', (['(0)', 'Ly', 'scale'], {}), '(0, Ly, scale)\n', (627, 641), True, 'import numpy as np\n')] |
# python David_1_4_6_arithmetic.py --image "../../../CV-PyImageSearch Gurus Course/Dataset/data/jared.JPG"
# python David_1_4_6_arithmetic.py --image "../../../CV-PyImageSearch Gurus Course/Dataset/data/grand_canyon.png"
# import the necessary packages
import numpy as np
import argparse
import cv2
# 1.Preprocessing
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image and show it
image = cv2.imread(args["image"])
image = cv2.resize(image,(200,300),interpolation = cv2.INTER_CUBIC)
cv2.imshow("Original", image)
# 2.加減Channel於影像上 :
# 2.1 加 chanel
M = np.ones(image.shape, dtype = "uint8") * 80
added = cv2.add(image, M)
cv2.imshow("Added80", added)
# 2.1 加 chanel
M = np.ones(image.shape, dtype = "uint8") * 30
added = cv2.add(image, M)
cv2.imshow("Added30", added)
# 2.2 減 chanel
M = np.ones(image.shape, dtype = "uint8") * 50
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted50", subtracted)
M = np.ones(image.shape, dtype = "uint8") * 100
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted100", subtracted)
cv2.waitKey(0)
| [
"numpy.ones",
"argparse.ArgumentParser",
"cv2.imshow",
"cv2.waitKey",
"cv2.resize",
"cv2.subtract",
"cv2.imread",
"cv2.add"
] | [((386, 411), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (409, 411), False, 'import argparse\n'), ((557, 582), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (567, 582), False, 'import cv2\n'), ((591, 651), 'cv2.resize', 'cv2.resize', (['image', '(200, 300)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (200, 300), interpolation=cv2.INTER_CUBIC)\n', (601, 651), False, 'import cv2\n'), ((651, 680), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (661, 680), False, 'import cv2\n'), ((779, 796), 'cv2.add', 'cv2.add', (['image', 'M'], {}), '(image, M)\n', (786, 796), False, 'import cv2\n'), ((797, 825), 'cv2.imshow', 'cv2.imshow', (['"""Added80"""', 'added'], {}), "('Added80', added)\n", (807, 825), False, 'import cv2\n'), ((901, 918), 'cv2.add', 'cv2.add', (['image', 'M'], {}), '(image, M)\n', (908, 918), False, 'import cv2\n'), ((919, 947), 'cv2.imshow', 'cv2.imshow', (['"""Added30"""', 'added'], {}), "('Added30', added)\n", (929, 947), False, 'import cv2\n'), ((1028, 1050), 'cv2.subtract', 'cv2.subtract', (['image', 'M'], {}), '(image, M)\n', (1040, 1050), False, 'import cv2\n'), ((1051, 1089), 'cv2.imshow', 'cv2.imshow', (['"""Subtracted50"""', 'subtracted'], {}), "('Subtracted50', subtracted)\n", (1061, 1089), False, 'import cv2\n'), ((1152, 1174), 'cv2.subtract', 'cv2.subtract', (['image', 'M'], {}), '(image, M)\n', (1164, 1174), False, 'import cv2\n'), ((1175, 1214), 'cv2.imshow', 'cv2.imshow', (['"""Subtracted100"""', 'subtracted'], {}), "('Subtracted100', subtracted)\n", (1185, 1214), False, 'import cv2\n'), ((1215, 1229), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1226, 1229), False, 'import cv2\n'), ((728, 763), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (735, 763), True, 'import numpy as np\n'), ((850, 885), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (857, 885), True, 'import numpy as np\n'), ((972, 1007), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (979, 1007), True, 'import numpy as np\n'), ((1095, 1130), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (1102, 1130), True, 'import numpy as np\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from wtforms import TextAreaField, TextField, validators
from wtforms.fields.html5 import IntegerField
class comment_form(FlaskForm):
comment = StringField('comment', validators=[DataRequired()])
class rating_form(FlaskForm):
clarity = IntegerField('clarity', default=3)
helpfulness = IntegerField('helpfulness', default=3)
availability = IntegerField('availability', default=3) | [
"wtforms.validators.DataRequired",
"wtforms.fields.html5.IntegerField"
] | [((354, 388), 'wtforms.fields.html5.IntegerField', 'IntegerField', (['"""clarity"""'], {'default': '(3)'}), "('clarity', default=3)\n", (366, 388), False, 'from wtforms.fields.html5 import IntegerField\n'), ((407, 445), 'wtforms.fields.html5.IntegerField', 'IntegerField', (['"""helpfulness"""'], {'default': '(3)'}), "('helpfulness', default=3)\n", (419, 445), False, 'from wtforms.fields.html5 import IntegerField\n'), ((465, 504), 'wtforms.fields.html5.IntegerField', 'IntegerField', (['"""availability"""'], {'default': '(3)'}), "('availability', default=3)\n", (477, 504), False, 'from wtforms.fields.html5 import IntegerField\n'), ((292, 306), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (304, 306), False, 'from wtforms.validators import DataRequired\n')] |
import shlex
from blight.actions import InjectFlags
from blight.tool import CC, CXX
def test_inject_flags():
inject_flags = InjectFlags(
{"CFLAGS": "-more -flags", "CXXFLAGS": "-these -are -ignored", "CPPFLAGS": "-foo"}
)
cc = CC(["-fake", "-flags"])
inject_flags.before_run(cc)
assert cc.args == shlex.split("-fake -flags -more -flags -foo")
def test_inject_flags_cxx():
inject_flags = InjectFlags(
{"CFLAGS": "-these -are -ignored", "CXXFLAGS": "-more -flags", "CPPFLAGS": "-bar"}
)
cxx = CXX(["-fake", "-flags"])
inject_flags.before_run(cxx)
assert cxx.args == shlex.split("-fake -flags -more -flags -bar")
def test_inject_linker_flags():
inject_flags = InjectFlags(
{
"CFLAGS": "-cc-flags",
"CFLAGS_LINKER": "-c-linker-flags",
"CXXFLAGS": "-cxx-flags",
"CXXFLAGS_LINKER": "-cxx-linker-flags",
}
)
cc_nolink = CC(["-c"])
cc_link = CC(["-fake", "-flags"])
cxx_nolink = CXX(["-c"])
cxx_link = CXX(["-fake", "-flags"])
inject_flags.before_run(cc_nolink)
inject_flags.before_run(cc_link)
inject_flags.before_run(cxx_nolink)
inject_flags.before_run(cxx_link)
assert cc_nolink.args == shlex.split("-c -cc-flags")
assert cc_link.args == shlex.split("-fake -flags -cc-flags -c-linker-flags")
assert cxx_nolink.args == shlex.split("-c -cxx-flags")
assert cxx_link.args == shlex.split("-fake -flags -cxx-flags -cxx-linker-flags")
def test_inject_flags_unknown_lang():
inject_flags = InjectFlags(
{"CFLAGS": "-these -are -ignored", "CXXFLAGS": "-so -are -these", "CPPFLAGS": "-and -this"}
)
cxx = CXX(["-x", "-unknownlanguage"])
inject_flags.before_run(cxx)
assert cxx.args == shlex.split("-x -unknownlanguage")
| [
"blight.actions.InjectFlags",
"blight.tool.CXX",
"blight.tool.CC",
"shlex.split"
] | [((131, 230), 'blight.actions.InjectFlags', 'InjectFlags', (["{'CFLAGS': '-more -flags', 'CXXFLAGS': '-these -are -ignored', 'CPPFLAGS':\n '-foo'}"], {}), "({'CFLAGS': '-more -flags', 'CXXFLAGS': '-these -are -ignored',\n 'CPPFLAGS': '-foo'})\n", (142, 230), False, 'from blight.actions import InjectFlags\n'), ((250, 273), 'blight.tool.CC', 'CC', (["['-fake', '-flags']"], {}), "(['-fake', '-flags'])\n", (252, 273), False, 'from blight.tool import CC, CXX\n'), ((426, 525), 'blight.actions.InjectFlags', 'InjectFlags', (["{'CFLAGS': '-these -are -ignored', 'CXXFLAGS': '-more -flags', 'CPPFLAGS':\n '-bar'}"], {}), "({'CFLAGS': '-these -are -ignored', 'CXXFLAGS': '-more -flags',\n 'CPPFLAGS': '-bar'})\n", (437, 525), False, 'from blight.actions import InjectFlags\n'), ((546, 570), 'blight.tool.CXX', 'CXX', (["['-fake', '-flags']"], {}), "(['-fake', '-flags'])\n", (549, 570), False, 'from blight.tool import CC, CXX\n'), ((728, 870), 'blight.actions.InjectFlags', 'InjectFlags', (["{'CFLAGS': '-cc-flags', 'CFLAGS_LINKER': '-c-linker-flags', 'CXXFLAGS':\n '-cxx-flags', 'CXXFLAGS_LINKER': '-cxx-linker-flags'}"], {}), "({'CFLAGS': '-cc-flags', 'CFLAGS_LINKER': '-c-linker-flags',\n 'CXXFLAGS': '-cxx-flags', 'CXXFLAGS_LINKER': '-cxx-linker-flags'})\n", (739, 870), False, 'from blight.actions import InjectFlags\n'), ((957, 967), 'blight.tool.CC', 'CC', (["['-c']"], {}), "(['-c'])\n", (959, 967), False, 'from blight.tool import CC, CXX\n'), ((982, 1005), 'blight.tool.CC', 'CC', (["['-fake', '-flags']"], {}), "(['-fake', '-flags'])\n", (984, 1005), False, 'from blight.tool import CC, CXX\n'), ((1023, 1034), 'blight.tool.CXX', 'CXX', (["['-c']"], {}), "(['-c'])\n", (1026, 1034), False, 'from blight.tool import CC, CXX\n'), ((1050, 1074), 'blight.tool.CXX', 'CXX', (["['-fake', '-flags']"], {}), "(['-fake', '-flags'])\n", (1053, 1074), False, 'from blight.tool import CC, CXX\n'), ((1572, 1680), 'blight.actions.InjectFlags', 'InjectFlags', (["{'CFLAGS': '-these -are -ignored', 'CXXFLAGS': '-so -are -these',\n 'CPPFLAGS': '-and -this'}"], {}), "({'CFLAGS': '-these -are -ignored', 'CXXFLAGS':\n '-so -are -these', 'CPPFLAGS': '-and -this'})\n", (1583, 1680), False, 'from blight.actions import InjectFlags\n'), ((1701, 1732), 'blight.tool.CXX', 'CXX', (["['-x', '-unknownlanguage']"], {}), "(['-x', '-unknownlanguage'])\n", (1704, 1732), False, 'from blight.tool import CC, CXX\n'), ((330, 375), 'shlex.split', 'shlex.split', (['"""-fake -flags -more -flags -foo"""'], {}), "('-fake -flags -more -flags -foo')\n", (341, 375), False, 'import shlex\n'), ((629, 674), 'shlex.split', 'shlex.split', (['"""-fake -flags -more -flags -bar"""'], {}), "('-fake -flags -more -flags -bar')\n", (640, 674), False, 'import shlex\n'), ((1260, 1287), 'shlex.split', 'shlex.split', (['"""-c -cc-flags"""'], {}), "('-c -cc-flags')\n", (1271, 1287), False, 'import shlex\n'), ((1315, 1368), 'shlex.split', 'shlex.split', (['"""-fake -flags -cc-flags -c-linker-flags"""'], {}), "('-fake -flags -cc-flags -c-linker-flags')\n", (1326, 1368), False, 'import shlex\n'), ((1399, 1427), 'shlex.split', 'shlex.split', (['"""-c -cxx-flags"""'], {}), "('-c -cxx-flags')\n", (1410, 1427), False, 'import shlex\n'), ((1456, 1512), 'shlex.split', 'shlex.split', (['"""-fake -flags -cxx-flags -cxx-linker-flags"""'], {}), "('-fake -flags -cxx-flags -cxx-linker-flags')\n", (1467, 1512), False, 'import shlex\n'), ((1791, 1825), 'shlex.split', 'shlex.split', (['"""-x -unknownlanguage"""'], {}), "('-x -unknownlanguage')\n", (1802, 1825), False, 'import shlex\n')] |
"""Information about your repl."""
import os
from typing import Optional
class ReplInfo:
"""Represents info about the current repl."""
@property
def id(self) -> Optional[str]:
"""The id of the repl (REPL_ID environment variable)."""
return os.getenv("REPL_ID")
@property
def slug(self) -> Optional[str]:
"""The slug of the repl (REPL_SLUG environement variable).
The slug is the url-safe version of the repl's name.
Returns:
Optional[str]: The repl slug.
"""
return os.getenv("REPL_SLUG")
@property
def owner(self) -> Optional[str]:
"""The owner of the repl (REPL_OWNER environment variable)."""
return os.getenv("REPL_OWNER")
@property
def language(self) -> Optional[str]:
"""The language of the repl (REPL_LANGUAGE environment variable)."""
return os.getenv("REPL_LANGUAGE")
@property
def id_co_url(self) -> Optional[str]:
"""The hosted URL of the repl in the form https://<id>.id.repl.co.
Less readable than the vanity URL but guaranteed to work (the vanity URL might
be too long for a certificate to be issued for it, causing it to break).
Returns:
Optional[str]: The id URL or None if there is no ID.
"""
repl_id = self.id
if repl_id is None:
return None
return f"https://{repl_id}.id.repl.co"
@property
def co_url(self) -> Optional[str]:
"""The readable, hosted repl.co URL for this repl.
See id_url for the difference between the hosted URL types.
Returns:
Optional[str]: The vanity hosted URL or None if slug or owner is None.
"""
slug = self.slug
owner = self.owner
if slug is None or owner is None:
return None
return f"https://{slug.lower()}.{owner.lower()}.repl.co"
@property
def replit_url(self) -> Optional[str]:
"""The URL of this repl on replit.com."""
slug = self.slug
owner = self.owner
if slug is None or owner is None:
return None
return f"https://replit.com/@{owner}/{slug}"
@property
def replit_id_url(self) -> Optional[str]:
"""The URL of this repl on replit.com, based on the repl's ID."""
repl_id = self.id
if repl_id is None:
return None
return f"https://replit.com/replid/{repl_id}"
| [
"os.getenv"
] | [((271, 291), 'os.getenv', 'os.getenv', (['"""REPL_ID"""'], {}), "('REPL_ID')\n", (280, 291), False, 'import os\n'), ((560, 582), 'os.getenv', 'os.getenv', (['"""REPL_SLUG"""'], {}), "('REPL_SLUG')\n", (569, 582), False, 'import os\n'), ((722, 745), 'os.getenv', 'os.getenv', (['"""REPL_OWNER"""'], {}), "('REPL_OWNER')\n", (731, 745), False, 'import os\n'), ((894, 920), 'os.getenv', 'os.getenv', (['"""REPL_LANGUAGE"""'], {}), "('REPL_LANGUAGE')\n", (903, 920), False, 'import os\n')] |
import os
def my_mp4_playlist(file_path, new_song):
"""
:param file_path:
:param new_song:
:return: None - just output to file the new song name on the 3th place in the right format
"""
file_input = open(file_path, "r")
lists = file_input.read().split("\n")
lists_splitter = []
lists_to_output = []
file_input.seek(0)
first_char = file_input.read(1)
for item in lists:
item = item.split(';')
lists_splitter.append(item)
if not first_char:
lists_to_output = ["\n", "\n", new_song]
file_input.close()
else:
lists_splitter[2][0] = new_song
file_input.close()
for item in lists_splitter:
item[len(item)-1] = '\n'
lists_to_output.append(';'.join(item))
file_input = open(file_path, "w")
for item in lists_to_output:
file_input.write(item)
file_input.close()
def main():
my_mp4_playlist(os.getcwd() + r"\mp3list.txt", "Python Love Story")
file_input = open(os.getcwd() + r"\mp3list.txt", "r")
print(file_input.read())
if __name__ == '__main__':
main() | [
"os.getcwd"
] | [((945, 956), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (954, 956), False, 'import os\n'), ((1019, 1030), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1028, 1030), False, 'import os\n')] |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import progdb
from util import *
import os
class TestDatabase(unittest.TestCase):
def test_basic(self):
db = progdb.Database()
db.add_search_path("./tests")
sleep_start = time.time()
MessageLoop.run_while(lambda: time.time() < sleep_start + 1.0) # sleep for a bit so the db can find things
self.assertTrue(db.get_num_files() != 0)
goal = os.path.abspath("./tests/apps/test1.c")
log1("Searching...")
matching = db.find_files_matching("test1.c")
self.assertTrue(goal in matching)
db.shutdown()
def test_ignores(self):
db = progdb.Database()
db.add_ignore("Makefile")
db.add_search_path("./tests")
sleep_start = time.time()
MessageLoop.run_while(lambda: time.time() < sleep_start + 1.0) # sleep for a bit so the db can find things
self.assertTrue(db.get_num_files() != 0)
log1("Searching...")
matching = db.find_files_matching("Makefile")
self.assertEqual(len(matching), 0)
goal = os.path.abspath("./tests/apps/test1.c")
matching = db.find_files_matching("test1.c")
self.assertTrue(goal in matching)
db.shutdown()
def test_remoted_basic(self):
db = RemoteClass(progdb.Database)
db.call_async.add_search_path("./tests")
sleep_start = time.time()
MessageLoop.run_while(lambda: time.time() < sleep_start + 1.0) # sleep for a bit so the db can find things
self.assertTrue(db.call.get_num_files() != 0)
goal = os.path.abspath("./tests/apps/test1.c")
log1("Searching...")
matching = db.call.find_files_matching("test1.c")
self.assertTrue(goal in matching)
db.shutdown()
| [
"progdb.Database",
"time.time",
"os.path.abspath"
] | [((721, 738), 'progdb.Database', 'progdb.Database', ([], {}), '()\n', (736, 738), False, 'import progdb\n'), ((792, 803), 'time.time', 'time.time', ([], {}), '()\n', (801, 803), False, 'import time\n'), ((973, 1012), 'os.path.abspath', 'os.path.abspath', (['"""./tests/apps/test1.c"""'], {}), "('./tests/apps/test1.c')\n", (988, 1012), False, 'import os\n'), ((1180, 1197), 'progdb.Database', 'progdb.Database', ([], {}), '()\n', (1195, 1197), False, 'import progdb\n'), ((1281, 1292), 'time.time', 'time.time', ([], {}), '()\n', (1290, 1292), False, 'import time\n'), ((1577, 1616), 'os.path.abspath', 'os.path.abspath', (['"""./tests/apps/test1.c"""'], {}), "('./tests/apps/test1.c')\n", (1592, 1616), False, 'import os\n'), ((1858, 1869), 'time.time', 'time.time', ([], {}), '()\n', (1867, 1869), False, 'import time\n'), ((2045, 2084), 'os.path.abspath', 'os.path.abspath', (['"""./tests/apps/test1.c"""'], {}), "('./tests/apps/test1.c')\n", (2060, 2084), False, 'import os\n'), ((838, 849), 'time.time', 'time.time', ([], {}), '()\n', (847, 849), False, 'import time\n'), ((1327, 1338), 'time.time', 'time.time', ([], {}), '()\n', (1336, 1338), False, 'import time\n'), ((1904, 1915), 'time.time', 'time.time', ([], {}), '()\n', (1913, 1915), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: thepoy
# @Email: <EMAIL>
# @File Name: __init__.py
# @Created: 2021-02-08 15:43:32
# @Modified: 2021-06-20 20:15:43
import os
import sys
import json
import argparse
from typing import Union
from up2b.up2b_lib.up2b_api import CONF_FILE, choose_image_bed
from up2b.up2b_lib.up2b_api.sm import SM
from up2b.up2b_lib.up2b_api.imgtu import Imgtu
from up2b.up2b_lib.up2b_api.gitee import Gitee
from up2b.up2b_lib.up2b_api.github import Github
from up2b.up2b_lib.constants import SM_MS, IMGTU, GITEE, GITHUB, IMAGE_BEDS_CODE
__version__ = "0.1.9"
IMAGE_BEDS = {SM_MS: SM, IMGTU: Imgtu, GITEE: Gitee, GITHUB: Github}
def _BuildParser():
parser = argparse.ArgumentParser(description="A package that can upload pictures to the image bed in Typora.")
parser.add_argument("-v", "--version", action="version", version=__version__)
parser.add_argument("-aac", action="store_true", help="allow automatic image compression")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-c",
"--choose-site",
choices=[str(k) for k in IMAGE_BEDS.keys()],
metavar=str({v: k for k, v in IMAGE_BEDS_CODE.items()}),
help="choose the image bed you want to use and exit",
type=str,
)
group.add_argument(
"-l",
"--login",
nargs=2,
metavar=("USERNAME", "PASSWORD"),
help=(
"save the user authentication token after successful login. You "
"must enter the username and password after `-l` or `--login`"
),
type=str,
)
group.add_argument(
"-lg",
"--login-git",
nargs=4,
metavar=("ACCESS_TOKEN", "USERNAME", "REPO", "FOLDER"),
help="save the authentication information of the git website, such as gitee, github",
type=str,
)
group.add_argument("-p", "--image-path", help="upload only one picture", type=str)
group.add_argument(
"-ps",
"--images-path",
metavar="IMAGE_PATH",
nargs="+",
help="upload multiple pictures, the maximum is 10 pictures, use spaces to separate each image path.",
type=str,
)
return parser
def _read_image_bed(auto_compress: bool) -> Union[SM, Imgtu, Gitee, Github]:
try:
with open(CONF_FILE) as f:
conf = json.loads(f.read())
return IMAGE_BEDS[conf["image_bed"]](auto_compress=auto_compress)
except FileNotFoundError:
print(
"Error: The configuration file is not found, "
"you need to use `--choose-site` or `-c` to select the image bed first."
)
sys.exit(1)
def main() -> int:
args = _BuildParser().parse_args()
if args.choose_site:
choose_image_bed(int(args.choose_site))
if args.choose_site == str(GITEE):
print(
"Warning: resources bigger than 1M on `gitee` cannot be publicly accessed. "
"Please manually compress the image size to 1M or less, "
"or use the `-aac` parameter to enable the automatic compression function."
)
return 0
ib = _read_image_bed(args.aac)
if args.login:
if isinstance(ib, Gitee) or isinstance(ib, Github):
print("Error: you have chosen `gitee` or `github` as the image bed, please login with `-lg`")
return 1
ib.login(*args.login)
return 0
if args.login_git:
if not (isinstance(ib, Gitee) or isinstance(ib, Github)):
print("Error: the image bed you choose is not gitee or github, , please login with `-lg`")
return 1
ib.login(*args.login_git)
if args.image_path:
if not os.path.exists(args.image_path):
raise FileNotFoundError(f"{args.image_path}")
ib.upload_image(args.image_path)
return 0
if args.images_path:
ib.upload_images(args.images_path)
return 0
return 1
def run_main():
sys.exit(main())
if __name__ == "__main__":
run_main()
| [
"os.path.exists",
"up2b.up2b_lib.constants.IMAGE_BEDS_CODE.items",
"argparse.ArgumentParser",
"sys.exit"
] | [((706, 812), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A package that can upload pictures to the image bed in Typora."""'}), "(description=\n 'A package that can upload pictures to the image bed in Typora.')\n", (729, 812), False, 'import argparse\n'), ((2683, 2694), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2691, 2694), False, 'import sys\n'), ((3763, 3794), 'os.path.exists', 'os.path.exists', (['args.image_path'], {}), '(args.image_path)\n', (3777, 3794), False, 'import os\n'), ((1189, 1212), 'up2b.up2b_lib.constants.IMAGE_BEDS_CODE.items', 'IMAGE_BEDS_CODE.items', ([], {}), '()\n', (1210, 1212), False, 'from up2b.up2b_lib.constants import SM_MS, IMGTU, GITEE, GITHUB, IMAGE_BEDS_CODE\n')] |
import yaml
import twitter
from afinn import Afinn
def light_on(yml_path=".api_key"): #TODO: work something out for yml path
with open(yml_path) as f:
config = yaml.load(f)
api = twitter.Api(consumer_key=config['apikey'],
consumer_secret=config['apisecret'],
access_token_key=config['accesstoken'],
access_token_secret=config['tokensecret'])
statuses = api.GetUserTimeline(screen_name=config['user'])
# Get sentiment from newest tweet
afinn = Afinn()
score = afinn.score(statuses[0].text)
if score > 0:
return False, True, False
elif score < 0:
return True, False, False
else:
return True, True, True
if __name__ == '__main__':
print(light_on())
| [
"afinn.Afinn",
"yaml.load",
"twitter.Api"
] | [((197, 368), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': "config['apikey']", 'consumer_secret': "config['apisecret']", 'access_token_key': "config['accesstoken']", 'access_token_secret': "config['tokensecret']"}), "(consumer_key=config['apikey'], consumer_secret=config[\n 'apisecret'], access_token_key=config['accesstoken'],\n access_token_secret=config['tokensecret'])\n", (208, 368), False, 'import twitter\n'), ((541, 548), 'afinn.Afinn', 'Afinn', ([], {}), '()\n', (546, 548), False, 'from afinn import Afinn\n'), ((173, 185), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (182, 185), False, 'import yaml\n')] |
import scipy.signal as signal
import copy
import numpy as np
import ray
import os
import imageio
from Env_Builder import *
from Map_Generator2 import maze_generator
from parameters import *
# helper functions
def discount(x, gamma):
return signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
class Worker():
def __init__(self, metaAgentID, workerID, workers_per_metaAgent, env, localNetwork, sess, groupLock, learningAgent, global_step):
self.metaAgentID = metaAgentID
self.agentID = workerID
self.name = "worker_" + str(workerID)
self.num_workers = workers_per_metaAgent
self.global_step = global_step
self.nextGIF = 0
self.env = env
self.local_AC = localNetwork
self.groupLock = groupLock
self.learningAgent = learningAgent
self.sess = sess
self.loss_metrics = None
self.perf_metrics = None
self.allGradients = []
def __del__(self):
if NN_DEBUG_MODE:
print('((worker)__del__)meta{0}worker{1}'.format(self.metaAgentID, self.agentID))
def calculateImitationGradient(self, rollout, episode_count): # todo: check rollout
rollout = np.array(rollout, dtype=object)
# we calculate the loss differently for imitation
# if imitation=True the rollout is assumed to have different dimensions:
# [o[0],o[1],optimal_actions]
target_meangoal = rollout[:, 2]
target_block = rollout[:, 6]
rewards = rollout[:, 7]
advantages = rollout[:, 8]
# rnn_state = self.local_AC.state_init
# s1Value = self.sess.run(self.local_AC.value,
# feed_dict={self.local_AC.inputs : np.stack(rollout[:, 0]),
# self.local_AC.goal_pos : np.stack(rollout[:, 1]),
# self.local_AC.state_in[0]: rnn_state[0],
# self.local_AC.state_in[1]: rnn_state[1]})[0, 0]
#
# v = self.sess.run([self.local_AC.value,
# ],
# # todo: feed the message(last time step) here
# feed_dict={self.local_AC.inputs: np.stack(rollout[:, 0]), # state
# self.local_AC.goal_pos: np.stack(rollout[:, 1]), # goal vector
# self.local_AC.state_in[0]: rnn_state[0],
# self.local_AC.state_in[1]: rnn_state[1],
# })
# values = v[0,0]
# self.rewards_plus = np.asarray(rewards.tolist() + [s1Value])
# discounted_rewards = discount(self.rewards_plus, gamma)[:-1]
# self.value_plus = np.asarray(values.tolist() + [s1Value])
# advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
# advantages = discount(advantages, gamma)
temp_actions = np.stack(rollout[:, 3])
rnn_state = self.local_AC.state_init
feed_dict = {self.global_step : episode_count,
self.local_AC.inputs : np.stack(rollout[:, 0]),
self.local_AC.goal_pos : np.stack(rollout[:, 1]),
self.local_AC.optimal_actions: np.stack(rollout[:, 3]),
self.local_AC.state_in[0] : rnn_state[0],
self.local_AC.state_in[1] : rnn_state[1],
self.local_AC.train_imitation: (rollout[:, 4]),
self.local_AC.target_v : np.stack(temp_actions),
self.local_AC.train_value : temp_actions,
self.local_AC.advantages : advantages,
self.local_AC.target_meangoals : np.stack(target_meangoal),
self.local_AC.target_blockings : np.stack(target_block),
}
# print('feed ', feed_dict)
v_l, i_l, local_vars, i_grads = self.sess.run([self.local_AC.value_loss,
self.local_AC.imitation_loss,
self.local_AC.local_vars,
self.local_AC.i_grads
],
feed_dict=feed_dict)
if NN_DEBUG_MODE:
print('v_l', v_l)
print('i_l', i_l)
# print('local_vars', local_vars)
print('l_v', local_vars)
# print('igrads', i_grads)
# raise(TypeError)
return [i_l], i_grads
def calculateGradient(self, rollout, bootstrap_value, episode_count, rnn_state0):
# ([s,a,r,s1,v[0,0]])
rollout = np.array(rollout, dtype=object) # todo: meangoal, blocking
inputs = rollout[:, 0]
goals = rollout[:, 6]
target_meangoal = rollout[:, 7]
target_block = rollout[:, 8]
# meangoal = rollout[:, -5]
# blocking = rollout[:, -4]
# message = rollout[:, -3]
actions = rollout[:, 1]
rewards = rollout[:, 2]
values = rollout[:, 4]
valids = rollout[:, 5]
train_value = rollout[:, -2]
train_policy = rollout[:, -1]
# Here we take the rewards and values from the rollout, and use them to
# generate the advantage and discounted returns. (With bootstrapping)
# The advantage function uses "Generalized Advantage Estimation"
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus, gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages, gamma)
num_samples = min(EPISODE_SAMPLES, len(advantages))
sampleInd = np.sort(np.random.choice(advantages.shape[0], size=(num_samples,), replace=False))
feed_dict = {
self.global_step : episode_count,
self.local_AC.target_v : np.stack(discounted_rewards),
self.local_AC.inputs : np.stack(inputs),
self.local_AC.goal_pos : np.stack(goals),
self.local_AC.actions : actions,
self.local_AC.target_meangoals : np.stack(target_meangoal),
self.local_AC.target_blockings : np.stack(target_block),
# self.local_AC.block : block,
# self.local_AC.message : message,
self.local_AC.train_valid: np.stack(valids),
self.local_AC.advantages : advantages,
self.local_AC.train_value: train_value,
self.local_AC.state_in[0]: rnn_state0[0],
self.local_AC.state_in[1]: rnn_state0[1],
# self.local_AC.train_policy: train_policy,
self.local_AC.train_valids: np.vstack(train_policy)
}
v_l, p_l, valid_l, e_l, g_n, v_n, blocking_l, meangoal_l, message_l, grads = self.sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.valid_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.blocking_loss,
self.local_AC.mean_goal_loss,
self.local_AC.message_loss,
self.local_AC.grads],
feed_dict=feed_dict)
return [v_l, p_l, valid_l, e_l, blocking_l, meangoal_l, message_l, g_n, v_n], grads
def imitation_learning_only(self, episode_count):
self.env._reset()
rollouts, targets_done = self.parse_path(episode_count)
# rollouts.append([])
if rollouts is None:
return None, 0
gradients = []
losses = []
for i in range(self.num_workers):
train_buffer = rollouts[i]
imitation_loss, grads = self.calculateImitationGradient(train_buffer, episode_count)
gradients.append(grads)
losses.append(imitation_loss)
return gradients, losses
def run_episode_multithreaded(self, episode_count, coord):
if NN_DEBUG_MODE:
print('(Worker-RL)Begin to run! meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
if self.metaAgentID < NUM_IL_META_AGENTS:
assert(1==0)
# print("THIS CODE SHOULD NOT TRIGGER")
# self.is_imitation = True
# self.imitation_learning_only()
global episode_lengths, episode_mean_values, episode_invalid_ops, episode_stop_ops, episode_rewards, episode_finishes
# print('episode_mean_values', episode_lengths)
num_agents = self.num_workers
with self.sess.as_default(), self.sess.graph.as_default():
while self.shouldRun(coord, episode_count):
episode_buffer, episode_values = [], []
episode_reward = episode_step_count = episode_inv_count = targets_done =episode_stop_count = 0
self.synchronize()
# Initial state from the environment
if self.agentID == 1:
if NN_DEBUG_MODE:
print('(Worker-RL)self.env._reset(a) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
self.env._reset()
if NN_DEBUG_MODE:
print('(Worker-RL)self.env._reset(b) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
joint_observations[self.metaAgentID] = self.env._observe()
if NN_DEBUG_MODE:
print('(Worker-RL)self.synchronize(1a) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
self.synchronize() # synchronize starting time of the threads
if NN_DEBUG_MODE:
print('(Worker-RL)self.synchronize(1b) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
# Get Information For Each Agent
validActions = self.env.listValidActions(self.agentID, joint_observations[self.metaAgentID][self.agentID])
s = joint_observations[self.metaAgentID][self.agentID]
rnn_state = self.local_AC.state_init
rnn_state0 = rnn_state
self.synchronize() # synchronize starting time of the threads
swarm_reward[self.metaAgentID] = 0
swarm_targets[self.metaAgentID] = 0
episode_rewards[self.metaAgentID] = []
episode_finishes[self.metaAgentID] = []
episode_lengths[self.metaAgentID] = []
episode_mean_values[self.metaAgentID] = []
episode_invalid_ops[self.metaAgentID] = []
episode_stop_ops[self.metaAgentID] = []
# ===============================start training =======================================================================
# RL
if True:
# prepare to save GIF
saveGIF = False
global GIFS_FREQUENCY_RL
if OUTPUT_GIFS and self.agentID == 1 and ((not TRAINING) or (episode_count >= self.nextGIF)):
saveGIF = True
self.nextGIF = episode_count + GIFS_FREQUENCY_RL
GIF_episode = int(episode_count)
GIF_frames = [self.env._render()]
# start RL
self.env.finished = False
agent_done = False
while not self.env.finished:
if not agent_done:
# todo: add multi-output here
a_dist, v, rnn_state, \
blocking, meangoal, message = self.sess.run([self.local_AC.policy,
self.local_AC.value,
self.local_AC.state_out,
self.local_AC.blocking,
self.local_AC.mean_goal,
self.local_AC.message,
],
# todo: feed the message(last time step) here
feed_dict={self.local_AC.inputs : [s[0]], # state
self.local_AC.goal_pos : [s[1]], # goal vector
self.local_AC.state_in[0]: rnn_state[0],
self.local_AC.state_in[1]: rnn_state[1],
})
skipping_state = False
train_policy = train_val = 1
if not skipping_state and not agent_done:
if not (np.argmax(a_dist.flatten()) in validActions):
episode_inv_count += 1
train_val = 0
train_valid = np.zeros(a_size)
train_valid[validActions] = 1
valid_dist = np.array([a_dist[0, validActions]])
valid_dist /= np.sum(valid_dist)
a = validActions[np.random.choice(range(valid_dist.shape[1]), p=valid_dist.ravel())]
joint_actions[self.metaAgentID][self.agentID] = a
if a == 0:
episode_stop_count += 1
# public the message here 'joint_comms'
joint_comms[self.metaAgentID][self.agentID] = message
joint_blocking[self.metaAgentID][self.agentID] = self.env.individual_blocking[self.agentID]
# Make A Single Agent Gather All Information
self.synchronize()
if self.agentID == 1:
# Add the message channel, set the communication channel while set the state.
all_obs, all_rewards = self.env.step_all(movement_dict=joint_actions[self.metaAgentID],
msg_dict=joint_comms[self.metaAgentID]) # already contain the local comms_map
for i in range(1, self.num_workers+1):
joint_observations[self.metaAgentID][i] = all_obs[i]
joint_rewards[self.metaAgentID][i] = all_rewards[i]
joint_done[self.metaAgentID][i] = (self.env.world.agents[i].status >=1)
if saveGIF and self.agentID == 1:
GIF_frames.append(self.env._render())
self.synchronize() # synchronize threads
# Get observation,reward, valid actions for each agent
s1 = joint_observations[self.metaAgentID][self.agentID]
r = copy.deepcopy(joint_rewards[self.metaAgentID][self.agentID])
if not agent_done:
validActions = self.env.listValidActions(self.agentID, s1)
self.synchronize()
# Append to Appropriate buffers
if not skipping_state and not agent_done:
episode_buffer.append([s[0], a, joint_rewards[self.metaAgentID][self.agentID] , s1, v[0, 0], train_valid, s[1], s[2], joint_blocking[self.metaAgentID][self.agentID], meangoal ,blocking, message, train_val,train_policy])
episode_values.append(v[0, 0])
episode_reward += r
episode_step_count += 1
# Update State
s = s1
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if (not agent_done) and (len(episode_buffer)>1) and ((len(episode_buffer) % EXPERIENCE_BUFFER_SIZE == 0) or joint_done[self.metaAgentID][self.agentID] or episode_step_count==max_episode_length):
# Since we don't know what the true final return is,
# we "bootstrap" from our current value estimation.
if len(episode_buffer) >= EXPERIENCE_BUFFER_SIZE:
train_buffer = episode_buffer[-EXPERIENCE_BUFFER_SIZE:]
else:
train_buffer = episode_buffer[:]
# if joint_done[self.metaAgentID][self.agentID]:
# s1Value = 0 # Terminal state
# episode_buffer = []
# targets_done += 1
# else:
s1Value = self.sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs : np.array([s[0]]),
self.local_AC.goal_pos : [s[1]],
self.local_AC.state_in[0]: rnn_state[0],
self.local_AC.state_in[1]: rnn_state[1]})[0, 0]
self.loss_metrics, grads = self.calculateGradient(train_buffer, s1Value, episode_count, rnn_state0)
self.allGradients.append(grads)
rnn_state0 = rnn_state
self.synchronize()
# finish condition: reach max-len or all agents are done under one-shot mode
if episode_step_count >= max_episode_length:
break
episode_lengths[self.metaAgentID].append(episode_step_count)
episode_mean_values[self.metaAgentID].append(np.nanmean(episode_values))
episode_invalid_ops[self.metaAgentID].append(episode_inv_count)
episode_stop_ops[self.metaAgentID].append(episode_stop_count)
swarm_reward[self.metaAgentID] += episode_reward
swarm_targets[self.metaAgentID] += targets_done
self.synchronize()
if self.agentID == 1:
episode_rewards[self.metaAgentID].append(swarm_reward[self.metaAgentID])
episode_finishes[self.metaAgentID].append(swarm_targets[self.metaAgentID])
if saveGIF:
make_gif(np.array(GIF_frames),
'{}/episode_{:d}_{:d}_{:.1f}.gif'.format(gifs_path,GIF_episode, episode_step_count,
swarm_reward[self.metaAgentID]))
self.synchronize()
perf_metrics = np.array([
episode_step_count,
np.nanmean(episode_values),
episode_inv_count,
episode_stop_count,
episode_reward,
targets_done
])
assert len(self.allGradients) > 0, 'Empty gradients at end of RL episode?!'
return perf_metrics
def synchronize(self):
# handy thing for keeping track of which to release and acquire
if not hasattr(self, "lock_bool"):
self.lock_bool = False
self.groupLock.release(int(self.lock_bool), self.name)
self.groupLock.acquire(int(not self.lock_bool), self.name)
self.lock_bool = not self.lock_bool
def work(self, currEpisode, coord, saver, allVariables):
'''
Interacts with the environment. The agent gets either gradients or experience buffer
'''
self.currEpisode = currEpisode
if COMPUTE_TYPE == COMPUTE_OPTIONS.multiThreaded:
self.perf_metrics = self.run_episode_multithreaded(currEpisode, coord)
else:
print("not implemented")
assert(1==0)
# gradients are accessed by the runner in self.allGradients
return
def parse_path(self, episode_count):
"""needed function to take the path generated from M* and create the
observations and actions for the agent
path: the exact path ouput by M*, assuming the correct number of agents
returns: the list of rollouts for the "episode":
list of length num_agents with each sublist a list of tuples
(observation[0],observation[1],optimal_action,reward)"""
global GIF_frames, SAVE_IL_GIF, IL_GIF_PROB
saveGIF = False
if np.random.rand() < IL_GIF_PROB:
saveGIF = True
if saveGIF and SAVE_IL_GIF:
GIF_frames = [self.env._render()]
result = [[] for i in range(self.num_workers)]
msg = np.float32(0)
blocking = np.float32(0)
reward = np.float32(0)
advantages = np.float32(0)
meangoal = np.array([0., 0.], dtype='float32')
actions = {}
o = {}
finished = {}
train_imitation = {}
count_finished = 0
pos_buffer = []
goal_buffer = []
all_obs = self.env._observe()
for agentID in range(1, self.num_workers + 1):
o[agentID] = all_obs[agentID]
train_imitation[agentID] = 1
finished[agentID] = 0
step_count = 0
while step_count <= max_episode_length and count_finished < self.num_workers:
path = self.env.expert_until_first_goal()
if path is None: # solution not exists
if step_count != 0:
return result, 0
print('(worker)meta{0}worker{1} Failed intially!'.format(self.metaAgentID, self.agentID))
return None, 0
else:
print('(worker)meta{0}worker{1} Success intially!'.format(self.metaAgentID, self.agentID))
none_on_goal = True # todo:
path_step = 1
while none_on_goal and step_count <= max_episode_length and count_finished < self.num_workers:
positions = []
goals = []
for i in range(self.num_workers):
agent_id = i + 1
# if finished[agent_id]:
# actions[agent_id] = 0
# else:
# next_pos = path[path_step][i]
# diff = tuple_minus(next_pos, self.env.world.getPos(agent_id))
# try:
# actions[agent_id] = dir2action(diff)
# except:
# print('(parse_path)pos_buffer', pos_buffer)
# print('(parse_path)goal_buffer', goal_buffer)
# actions[agent_id] = dir2action(diff)
next_pos = path[path_step][i]
diff = tuple_minus(next_pos, self.env.world.getPos(agent_id))
try:
actions[agent_id] = dir2action(diff)
except:
print('(parse_path)pos_buffer', pos_buffer)
print('(parse_path)goal_buffer', goal_buffer)
actions[agent_id] = dir2action(diff)
if ENV_DEBUG_MODE:
print('(parse_path)actions', actions)
all_obs, _ = self.env.step_all(actions)
for i in range(self.num_workers):
agent_id = i + 1
positions.append(self.env.world.getPos(agent_id))
goals.append(self.env.world.getGoal(agent_id))
result[i].append(
[o[agent_id][0], o[agent_id][1], o[agent_id][2], actions[agent_id], train_imitation[agent_id],
msg, blocking, reward, advantages])
if self.env.world.agents[agent_id].status >= 1 and finished[agent_id] != 1:
# none_on_goal = False # todo:
finished[agent_id] = 1 # todo:
count_finished += 1
pos_buffer.append(positions)
goal_buffer.append(goals)
if saveGIF and SAVE_IL_GIF:
GIF_frames.append(self.env._render())
o = all_obs
step_count += 1
path_step += 1
if saveGIF and SAVE_IL_GIF:
make_gif(np.array(GIF_frames),
'{}/episodeIL_{}.gif'.format(gifs_path, episode_count))
return result, count_finished
def shouldRun(self, coord, episode_count=None):
if TRAINING:
return not coord.should_stop()
| [
"numpy.random.rand",
"numpy.random.choice",
"numpy.array",
"scipy.signal.lfilter",
"numpy.stack",
"numpy.nanmean",
"numpy.vstack",
"numpy.zeros",
"copy.deepcopy",
"numpy.sum",
"numpy.float32"
] | [((248, 297), 'scipy.signal.lfilter', 'signal.lfilter', (['[1]', '[1, -gamma]', 'x[::-1]'], {'axis': '(0)'}), '([1], [1, -gamma], x[::-1], axis=0)\n', (262, 297), True, 'import scipy.signal as signal\n'), ((1198, 1229), 'numpy.array', 'np.array', (['rollout'], {'dtype': 'object'}), '(rollout, dtype=object)\n', (1206, 1229), True, 'import numpy as np\n'), ((2954, 2977), 'numpy.stack', 'np.stack', (['rollout[:, 3]'], {}), '(rollout[:, 3])\n', (2962, 2977), True, 'import numpy as np\n'), ((4761, 4792), 'numpy.array', 'np.array', (['rollout'], {'dtype': 'object'}), '(rollout, dtype=object)\n', (4769, 4792), True, 'import numpy as np\n'), ((22268, 22281), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (22278, 22281), True, 'import numpy as np\n'), ((22301, 22314), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (22311, 22314), True, 'import numpy as np\n'), ((22332, 22345), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (22342, 22345), True, 'import numpy as np\n'), ((22367, 22380), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (22377, 22380), True, 'import numpy as np\n'), ((22400, 22437), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': '"""float32"""'}), "([0.0, 0.0], dtype='float32')\n", (22408, 22437), True, 'import numpy as np\n'), ((3142, 3165), 'numpy.stack', 'np.stack', (['rollout[:, 0]'], {}), '(rollout[:, 0])\n', (3150, 3165), True, 'import numpy as np\n'), ((3219, 3242), 'numpy.stack', 'np.stack', (['rollout[:, 1]'], {}), '(rollout[:, 1])\n', (3227, 3242), True, 'import numpy as np\n'), ((3296, 3319), 'numpy.stack', 'np.stack', (['rollout[:, 3]'], {}), '(rollout[:, 3])\n', (3304, 3319), True, 'import numpy as np\n'), ((3574, 3596), 'numpy.stack', 'np.stack', (['temp_actions'], {}), '(temp_actions)\n', (3582, 3596), True, 'import numpy as np\n'), ((3787, 3812), 'numpy.stack', 'np.stack', (['target_meangoal'], {}), '(target_meangoal)\n', (3795, 3812), True, 'import numpy as np\n'), ((3873, 3895), 'numpy.stack', 'np.stack', (['target_block'], {}), '(target_block)\n', (3881, 3895), True, 'import numpy as np\n'), ((5938, 6011), 'numpy.random.choice', 'np.random.choice', (['advantages.shape[0]'], {'size': '(num_samples,)', 'replace': '(False)'}), '(advantages.shape[0], size=(num_samples,), replace=False)\n', (5954, 6011), True, 'import numpy as np\n'), ((6129, 6157), 'numpy.stack', 'np.stack', (['discounted_rewards'], {}), '(discounted_rewards)\n', (6137, 6157), True, 'import numpy as np\n'), ((6198, 6214), 'numpy.stack', 'np.stack', (['inputs'], {}), '(inputs)\n', (6206, 6214), True, 'import numpy as np\n'), ((6255, 6270), 'numpy.stack', 'np.stack', (['goals'], {}), '(goals)\n', (6263, 6270), True, 'import numpy as np\n'), ((6366, 6391), 'numpy.stack', 'np.stack', (['target_meangoal'], {}), '(target_meangoal)\n', (6374, 6391), True, 'import numpy as np\n'), ((6439, 6461), 'numpy.stack', 'np.stack', (['target_block'], {}), '(target_block)\n', (6447, 6461), True, 'import numpy as np\n'), ((6600, 6616), 'numpy.stack', 'np.stack', (['valids'], {}), '(valids)\n', (6608, 6616), True, 'import numpy as np\n'), ((6925, 6948), 'numpy.vstack', 'np.vstack', (['train_policy'], {}), '(train_policy)\n', (6934, 6948), True, 'import numpy as np\n'), ((22058, 22074), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (22072, 22074), True, 'import numpy as np\n'), ((25919, 25939), 'numpy.array', 'np.array', (['GIF_frames'], {}), '(GIF_frames)\n', (25927, 25939), True, 'import numpy as np\n'), ((16125, 16185), 'copy.deepcopy', 'copy.deepcopy', (['joint_rewards[self.metaAgentID][self.agentID]'], {}), '(joint_rewards[self.metaAgentID][self.agentID])\n', (16138, 16185), False, 'import copy\n'), ((19196, 19222), 'numpy.nanmean', 'np.nanmean', (['episode_values'], {}), '(episode_values)\n', (19206, 19222), True, 'import numpy as np\n'), ((14058, 14074), 'numpy.zeros', 'np.zeros', (['a_size'], {}), '(a_size)\n', (14066, 14074), True, 'import numpy as np\n'), ((14175, 14210), 'numpy.array', 'np.array', (['[a_dist[0, validActions]]'], {}), '([a_dist[0, validActions]])\n', (14183, 14210), True, 'import numpy as np\n'), ((14253, 14271), 'numpy.sum', 'np.sum', (['valid_dist'], {}), '(valid_dist)\n', (14259, 14271), True, 'import numpy as np\n'), ((20287, 20313), 'numpy.nanmean', 'np.nanmean', (['episode_values'], {}), '(episode_values)\n', (20297, 20313), True, 'import numpy as np\n'), ((19880, 19900), 'numpy.array', 'np.array', (['GIF_frames'], {}), '(GIF_frames)\n', (19888, 19900), True, 'import numpy as np\n'), ((18233, 18249), 'numpy.array', 'np.array', (['[s[0]]'], {}), '([s[0]])\n', (18241, 18249), True, 'import numpy as np\n')] |
from pathlib import Path
import json
import attr
import joblib
import numpy as np
import crowsetta
import vak
from vak.utils.data import reshape_data_for_batching
BIRDS = ['bl26lb16',
'gy6or6',
'or60yw70',
'gr41rd51',
]
TRAIN_SET_DURATIONS = [60, 120, 480]
HERE = Path(__file__).parent
DATA_DIR = HERE.joinpath('../../data/BFSongRepository')
def main():
for train_set_dur in TRAIN_SET_DURATIONS:
for bird in BIRDS:
all_predict_vds_paths = DATA_DIR.joinpath(f'{train_set_dur}s').joinpath(bird).joinpath('vds').glob('*predict.vds.json')
for predict_vds_path in all_predict_vds_paths:
print(f'resegmenting {predict_vds_path}')
predict_vds = vak.Dataset.load(predict_vds_path)
new_predict_vds_path = str(predict_vds_path).replace('predict.vds.json', 'predict.resegment.vds.json')
if not Path(new_predict_vds_path).exists():
predict_vds = predict_vds.load_spects()
lbl_tb_predict = predict_vds.lbl_tb_list()
lbl_tb_predict_reseg = [vak.utils.labels.resegment(lbl_tb,min_dur_tb=2,majority_vote=True) for lbl_tb in lbl_tb_predict]
labels_reseg, onsets_reseg, offsets_reseg = [], [], []
for lbl_tb in lbl_tb_predict_reseg:
lbl, on, off = vak.utils.labels.lbl_tb2segments(lbl_tb,labelmap=predict_vds.labelmap,timebin_dur=predict_vds.voc_list[0].metaspect.timebin_dur)
labels_reseg.append(lbl)
onsets_reseg.append(on)
offsets_reseg.append(off)
new_annots = []
for lbl, on, off, voc in zip(labels_reseg, onsets_reseg, offsets_reseg, predict_vds.voc_list):
annot = crowsetta.Sequence.from_keyword(labels=lbl, onsets_s=on, offsets_s=off, file=voc.annot.file)
new_annots.append(annot)
new_voc_list = [attr.evolve(voc, annot=annot) for voc, annot in zip(predict_vds.voc_list, new_annots)]
predict_vds_reseg = attr.evolve(predict_vds, voc_list=new_voc_list)
print(f'saving resegmented Dataset in {new_predict_vds_path}')
predict_vds_reseg = predict_vds_reseg.clear_spects()
predict_vds_reseg.save(new_predict_vds_path)
else:
print(f'skipping {new_predict_vds_path}, already exists')
if __name__ == '__main__':
main()
| [
"vak.Dataset.load",
"pathlib.Path",
"crowsetta.Sequence.from_keyword",
"vak.utils.labels.lbl_tb2segments",
"attr.evolve",
"vak.utils.labels.resegment"
] | [((305, 319), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (309, 319), False, 'from pathlib import Path\n'), ((748, 782), 'vak.Dataset.load', 'vak.Dataset.load', (['predict_vds_path'], {}), '(predict_vds_path)\n', (764, 782), False, 'import vak\n'), ((2164, 2211), 'attr.evolve', 'attr.evolve', (['predict_vds'], {'voc_list': 'new_voc_list'}), '(predict_vds, voc_list=new_voc_list)\n', (2175, 2211), False, 'import attr\n'), ((1130, 1198), 'vak.utils.labels.resegment', 'vak.utils.labels.resegment', (['lbl_tb'], {'min_dur_tb': '(2)', 'majority_vote': '(True)'}), '(lbl_tb, min_dur_tb=2, majority_vote=True)\n', (1156, 1198), False, 'import vak\n'), ((1398, 1532), 'vak.utils.labels.lbl_tb2segments', 'vak.utils.labels.lbl_tb2segments', (['lbl_tb'], {'labelmap': 'predict_vds.labelmap', 'timebin_dur': 'predict_vds.voc_list[0].metaspect.timebin_dur'}), '(lbl_tb, labelmap=predict_vds.labelmap,\n timebin_dur=predict_vds.voc_list[0].metaspect.timebin_dur)\n', (1430, 1532), False, 'import vak\n'), ((1858, 1954), 'crowsetta.Sequence.from_keyword', 'crowsetta.Sequence.from_keyword', ([], {'labels': 'lbl', 'onsets_s': 'on', 'offsets_s': 'off', 'file': 'voc.annot.file'}), '(labels=lbl, onsets_s=on, offsets_s=off,\n file=voc.annot.file)\n', (1889, 1954), False, 'import crowsetta\n'), ((2037, 2066), 'attr.evolve', 'attr.evolve', (['voc'], {'annot': 'annot'}), '(voc, annot=annot)\n', (2048, 2066), False, 'import attr\n'), ((925, 951), 'pathlib.Path', 'Path', (['new_predict_vds_path'], {}), '(new_predict_vds_path)\n', (929, 951), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
#
# Copyright 2019 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
import sys
import unittest
suffix = ""
class Finder:
def __init__(self):
sock = open("workdir/streets%s.csv" % suffix)
first = True
self.streets = []
for line in sock.readlines():
if first:
first = False
continue
cols = line.strip().split("\t")
if len(cols) < 2:
continue
# No idea why this shows up, manually checking it seems it has house numbers.
# The 3rd doesn't seem to have odd house numbers, so not tagged, but nothing to improve, either.
# 4th is only a small part of the whole way in case of sashegy
# filtering, and this part in fact doesn't have house numbers.
if not (cols[1] in ("Barackmag utca", "Higany utca", "Gazdagréti út", "Harasztos út")):
self.streets.append(cols[1])
self.streets = sorted(set(self.streets))
sock.close()
sock = open("workdir/street-housenumbers%s.csv" % suffix)
first = True
self.streetsWithHouses = []
self.warnings = []
for line in sock.readlines():
if first:
first = False
continue
cols = line.strip().split("\t")
if len(cols[1]):
# Ignore house numbers outside this area (ideally we shouldn't even get these).
if cols[1] in self.streets:
self.streetsWithHouses.append(cols[1])
else:
self.warnings.append("WARNING: '%s': house number without addr:street, please fix! (id=%s)" % (cols[2], cols[0]))
self.streetsWithHouses = sorted(set(self.streetsWithHouses))
sock.close()
self.streetsWithoutHouses = [street for street in self.streets if street not in self.streetsWithHouses]
assert len(self.streets) == len(self.streetsWithHouses) + len(self.streetsWithoutHouses)
class Test(unittest.TestCase):
def test_none(self):
"""This test makes sure that there are no streets without house numbers
in the downloaded area."""
finder = Finder()
self.maxDiff = None
self.assertEqual([], finder.warnings)
self.assertEqual([], finder.streetsWithoutHouses)
if __name__ == '__main__':
if "-s" in sys.argv:
finder = Finder()
print("%s streets in total." % len(finder.streets))
print("%s streets have at least one house number." % len(finder.streetsWithHouses))
print("%s streets have no house number." % len(finder.streetsWithoutHouses))
print("Coverage is %s%%." % round(float(len(finder.streetsWithHouses)) * 100 / len(finder.streets)))
else:
if len(sys.argv) > 1:
suffix = sys.argv[1]
sys.argv = sys.argv[:1]
unittest.main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| [
"unittest.main"
] | [((2996, 3011), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3009, 3011), False, 'import unittest\n')] |
import os
import csv
import statistics
from scipy import stats
import scipy as sp
import numpy as np
rootdir = '/home/yukiho/results'
#~ Date,Population,Capacity,home,shop,greens,temple,church,orphanage,1,2,3,4,5
csvheader = ['scenario','year','N']
#~ csvheader += ['population mean','population sd']
#~ csvheader += ['Capacity mean','Capacity sd']
csvheader += ['homemean','homesd','homeste', 'homelb', 'homeub', 'homemin', 'homemax' ]
csvheader += ['shopmean','shopsd', 'shopste', 'shoplb', 'shopub', 'shopmin', 'shopmax']
#~ csvheader += ['greens mean','greens sd']
#~ csvheader += ['temple mean','temple sd']
#~ csvheader += ['church mean','church sd']
#~ csvheader += ['orphanage mean','orphanage sd']
#~ csvheader += ['1 storey mean','1 storey sd']
#~ csvheader += ['2 storey mean','2 storey sd']
#~ csvheader += ['3 storey mean','3 storey sd']
#~ csvheader += ['4 storey mean','4 storey sd']
#~ csvheader += ['5 storey mean','5 storey sd']
csvrows = []
# write header first
with open(os.path.join(rootdir,'e1-all.csv'),'w') as csvfile:
writer = csv.writer(csvfile,delimiter=',',quotechar='"',quoting=csv.QUOTE_MINIMAL)
writer.writerow(csvheader)
i = 1979
while i <= 2016:
for dirs in os.listdir(rootdir):
print (dirs)
population = []
capacity = []
homes = []
shops = []
greens = []
temple = []
church = []
orphanage = []
floor1 = []
floor2 = []
floor3 = []
floor4 = []
floor5 = []
for subdir, sdirs, files in os.walk(os.path.join(rootdir,dirs)):
for file in files:
if file.endswith('.csv'):
with open(os.path.join(subdir,file),'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
if row[0] == str(i):
#~ population += [float(row[1])]
#~ capacity += [float(row[2])]
homes += [float(row[3])]
shops += [float(row[4])]
#~ greens += [float(row[5])]
#~ temple += [float(row[6])]
#~ church += [float(row[7])]
#~ orphanage += [float(row[8])]
#~ floor1 += [float(row[9])]
#~ floor2 += [float(row[10])]
#~ floor3 += [float(row[11])]
#~ floor4 += [float(row[12])]
#~ floor5 += [float(row[13])]
if homes != []:
parms = dirs.replace('results','')
csvrow = [parms,i]
#~ csvheader += ['home mean','home sd','home cv','home ste', 'home 95 lb', 'home 95 ub', 'home min', 'home max' ]
#~ csvheader += ['shop mean','shop sd','shop cv', 'shop ste', 'shop 95 lb', 'shop 95 ub', 'shop min', 'shop max']
# stats for homes
n, (min,max), mean, var, skew, kurt = stats.describe(homes[0:20])
std = np.std(homes[0:20])
lb, ub = stats.norm.interval(0.05,loc=mean,scale=std)
ste = stats.sem(homes[0:20],axis=None)
csvrow += [n, mean, std, ste,lb,ub,min,max]
#stats for shops
n, (min,max), mean, var, skew, kurt = stats.describe(shops[0:20])
std = np.std(shops[0:20])
lb, ub = stats.norm.interval(0.05,loc=mean,scale=std)
ste = stats.sem(shops[0:20],axis=None)
csvrow += [mean, std, ste,lb,ub,min,max]
#~ homesmean = statistics.mean(homes)
#~ shopsmean = statistics.mean(shops)
#~ greensmean = statistics.mean(greens)
#~ templemean = statistics.mean(temple)
#~ churchmean = statistics.mean(church)
#~ orphanagemean = statistics.mean(orphanage)
#~ floor1mean = statistics.mean(floor1)
#~ floor2mean = statistics.mean(floor2)
#~ floor3mean = statistics.mean(floor3)
#~ floor4mean = statistics.mean(floor4)
#~ floor5mean = statistics.mean(floor5)
#~ populationstdev = statistics.stdev(population)
#~ capacitystdev = statistics.stdev(capacity)
#~ homesstdev = statistics.stdev(homes)
#~ shopsstdev = statistics.stdev(shops)
#~ greensstdev = statistics.stdev(greens)
#~ templestdev = statistics.stdev(temple)
#~ churchstdev = statistics.stdev(church)
#~ orphanagestdev = statistics.stdev(orphanage)
#~ floor1stdev = statistics.stdev(floor1)
#~ floor2stdev = statistics.stdev(floor2)
#~ floor3stdev = statistics.stdev(floor3)
#~ floor4stdev = statistics.stdev(floor4)
#~ floor5stdev = statistics.stdev(floor5)
#~ f_value, p_value = stats.f_oneway(*all_homes)
#~ csvrow += [p_value]
#~ csvrow += [populationmean,populationstdev]
#~ csvrow += [capacitymean,capacitystdev]
#~ csvrow += [homesmean,homesstdev]
#~ csvrow += [shopsmean,shopsstdev]
#~ csvrow += [greensmean,greensstdev]
#~ csvrow += [templemean,templestdev]
#~ csvrow += [churchmean,churchstdev]
#~ csvrow += [orphanagemean,orphanagestdev]
#~ csvrow += [floor1mean,floor1stdev]
#~ csvrow += [floor2mean,floor2stdev]
#~ csvrow += [floor3mean,floor3stdev]
#~ csvrow += [floor4mean,floor4stdev]
#~ csvrow += [floor5mean,floor5stdev]
csvrows += [csvrow]
i += 1
with open(os.path.join(rootdir,'e1-all.csv'),'a') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',quotechar='"',quoting=csv.QUOTE_MINIMAL)
for row in csvrows:
csvwriter.writerow(row)
| [
"os.listdir",
"scipy.stats.describe",
"csv.writer",
"os.path.join",
"scipy.stats.norm.interval",
"scipy.stats.sem",
"numpy.std",
"csv.reader"
] | [((1060, 1136), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csvfile, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (1070, 1136), False, 'import csv\n'), ((1213, 1232), 'os.listdir', 'os.listdir', (['rootdir'], {}), '(rootdir)\n', (1223, 1232), False, 'import os\n'), ((6033, 6109), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csvfile, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (6043, 6109), False, 'import csv\n'), ((995, 1030), 'os.path.join', 'os.path.join', (['rootdir', '"""e1-all.csv"""'], {}), "(rootdir, 'e1-all.csv')\n", (1007, 1030), False, 'import os\n'), ((5965, 6000), 'os.path.join', 'os.path.join', (['rootdir', '"""e1-all.csv"""'], {}), "(rootdir, 'e1-all.csv')\n", (5977, 6000), False, 'import os\n'), ((1585, 1612), 'os.path.join', 'os.path.join', (['rootdir', 'dirs'], {}), '(rootdir, dirs)\n', (1597, 1612), False, 'import os\n'), ((3205, 3232), 'scipy.stats.describe', 'stats.describe', (['homes[0:20]'], {}), '(homes[0:20])\n', (3219, 3232), False, 'from scipy import stats\n'), ((3251, 3270), 'numpy.std', 'np.std', (['homes[0:20]'], {}), '(homes[0:20])\n', (3257, 3270), True, 'import numpy as np\n'), ((3292, 3338), 'scipy.stats.norm.interval', 'stats.norm.interval', (['(0.05)'], {'loc': 'mean', 'scale': 'std'}), '(0.05, loc=mean, scale=std)\n', (3311, 3338), False, 'from scipy import stats\n'), ((3355, 3388), 'scipy.stats.sem', 'stats.sem', (['homes[0:20]'], {'axis': 'None'}), '(homes[0:20], axis=None)\n', (3364, 3388), False, 'from scipy import stats\n'), ((3536, 3563), 'scipy.stats.describe', 'stats.describe', (['shops[0:20]'], {}), '(shops[0:20])\n', (3550, 3563), False, 'from scipy import stats\n'), ((3582, 3601), 'numpy.std', 'np.std', (['shops[0:20]'], {}), '(shops[0:20])\n', (3588, 3601), True, 'import numpy as np\n'), ((3623, 3669), 'scipy.stats.norm.interval', 'stats.norm.interval', (['(0.05)'], {'loc': 'mean', 'scale': 'std'}), '(0.05, loc=mean, scale=std)\n', (3642, 3669), False, 'from scipy import stats\n'), ((3686, 3719), 'scipy.stats.sem', 'stats.sem', (['shops[0:20]'], {'axis': 'None'}), '(shops[0:20], axis=None)\n', (3695, 3719), False, 'from scipy import stats\n'), ((1793, 1827), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1803, 1827), False, 'import csv\n'), ((1717, 1743), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (1729, 1743), False, 'import os\n')] |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from . import views
app_name = 'sso'
urlpatterns = [
url(
r'^$',
login_required(views.SSORequestAccessView.as_view()),
name="request_access",
),
url(
r'^thanks/$',
login_required(TemplateView.as_view(
template_name="sso/request_access_success.html"
)),
name="request_access_success",
),
]
| [
"django.views.generic.TemplateView.as_view"
] | [((370, 439), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""sso/request_access_success.html"""'}), "(template_name='sso/request_access_success.html')\n", (390, 439), False, 'from django.views.generic import TemplateView\n')] |
"""Wrappers for training bricks with PyLearn2.
This module contains a set of wrappers that allows to outsource
training and monitoring to Pylearn2.
"""
import logging
from collections import OrderedDict
import theano
from theano import tensor
import pylearn2.costs.cost
import pylearn2.models
import pylearn2.train
import pylearn2.training_algorithms.learning_rule
import pylearn2.train_extensions
import pylearn2.space
from pylearn2.space import CompositeSpace
from pylearn2.utils import serial
from pylearn2.monitor import push_monitor
from blocks.select import Selector
from blocks.utils import pack
from blocks.graph import ComputationGraph
from blocks.utils import shared_floatx, unpack
logger = logging.getLogger()
class Pylearn2Model(pylearn2.models.Model):
supervised = False
def __init__(self, brick, **kwargs):
"""Wraps a brick to support the Pylearn2 model interface.
Parameters
----------
brick : Brick
The brick to wrap.
"""
self.brick = brick
super(Pylearn2Model, self).__init__(**kwargs)
def get_params(self):
return Selector(self.brick).get_params().values()
@staticmethod
def load(path):
"""Loads a model from path.
We need this wrapper to make the loaded monitor continuable
(currently deserialized monitor is non-functional in PyLearn2).
For this we had to create a new monitor and initialize with the
data from the old one.
Parameters
----------
path : str
The model path.
"""
model = push_monitor(serial.load(path), "_delete_me",
transfer_experience=True, save_records=True)
del model._delete_me
return model
class Pylearn2Cost(pylearn2.costs.cost.Cost):
"""Wraps a Theano cost to support the PyLearn2 Cost interface.
Parameters
----------
cost : Theano variable
The Theano variable corresponding to the end of the cost
computation graph.
Notes
-----
The inputs of the computation graph must have names compatible with
names of the data sources. The is necessary in order to replace with
with the ones given by PyLearn2.
"""
def __init__(self, cost):
self.cost = cost
self.inputs = ComputationGraph(self.cost).dict_of_inputs()
def expr(self, model, data, **kwargs):
assert not model.supervised
data = pack(data)
data = [tensor.unbroadcast(var, *range(var.ndim))
for var in data]
return theano.clone(
self.cost, replace=dict(zip(self.inputs.values(), data)))
def get_gradients(self, model, data, **kwargs):
if not hasattr(self, "_grads"):
self._grads = [tensor.grad(self.expr(model, data), p)
for p in model.get_params()]
return OrderedDict(zip(model.get_params(), self._grads)), OrderedDict()
def get_monitoring_channels(self, model, data, **kwargs):
return OrderedDict()
def get_data_specs(self, model):
return model.data_specs
class SGDLearningRule(pylearn2.training_algorithms.learning_rule.LearningRule):
"""The default SGD learning rule.
.. todo::
Move this class to PyLearn2 and make it the default learning rule.
"""
def get_updates(self, learning_rate, grads, lr_scalers):
return {param:
param - learning_rate * lr_scalers.get(param, 1.) * grad
for param, grad in grads.items()}
class Pylearn2LearningRule(pylearn2.training_algorithms
.learning_rule.LearningRule):
"""Wraps a PyLearn2 learning rule to add per-update monitoring.
Parameters
----------
learning_rule : :class:`LearningRule`
A PyLearn2 learning rule to wrap.
monitor_values : dict of (name, Theano variable) pairs
The values to monitor and their names.
updates : OrderedDict
Custom updates to perform when computing gradients.
.. todo::
`updates` are never used.
"""
def __init__(self, learning_rule, monitor_values=None, updates=None):
self.learning_rule = learning_rule
self.values = []
self.accumulators = []
self._callback_called = False
if monitor_values:
for name, value in monitor_values.items():
self.monitor_value(name, value)
if not updates:
updates = OrderedDict()
self.updates = updates
def monitor_value(self, name, value):
"""Add monitoring to be performed with gradient computation.
Parameters
----------
name : str
The name of the value to be monitored.
value : Theano variable
The value to be monitored.
"""
if self._callback_called:
raise Exception("It is to add to monitoring to the {}:"
"a callback has been called".format(self.__name__))
self.values.append(value)
self.accumulators.append(shared_floatx(0, name=name))
def add_channels_to_monitor(self, monitor, datasets):
self.learning_rule.add_channels_to_monitor(monitor, datasets)
for accumulator in self.accumulators:
monitor.add_channel(accumulator.name, ipt=None, val=accumulator,
data_specs=(pylearn2.space.NullSpace(), ''),
dataset=datasets)
self._callback_called = True
def get_updates(self, learning_rate, grads, lr_scalers):
"""Wraps the respective method of the wrapped learning rule.
Performs name-based input substitution for the monitored values.
Currently very hacky: the inputs from the gradients are typically
named `$ALGO[$SOURCE]` in PyLearn2, where `$ALGO` is the algorithm
name and `$SOURCE` is a source name from the data specification.
This convention is exploited to match them with the inputs of
monitoring values, whose input names are expected to match source
names.
"""
updates = self.learning_rule.get_updates(learning_rate, grads,
lr_scalers)
grad_inputs = ComputationGraph(list(grads.values())).dict_of_inputs()
for value, accumulator in zip(self.values, self.accumulators):
value_inputs = ComputationGraph(value).dict_of_inputs()
replace_dict = dict()
for name, input_ in value_inputs.items():
# See docstring to see how it works
grad_input = grad_inputs[unpack(
[n for n in grad_inputs
if n.endswith('[{}]'.format(name))],
singleton=True)]
replace_dict[input_] = tensor.unbroadcast(
grad_input, *range(grad_input.ndim))
updates[accumulator] = (
accumulator + theano.clone(value, replace_dict))
self._callback_called = True
updates.update(self.updates)
return updates
class DefaultExtension(pylearn2.train_extensions.TrainExtension):
"""This extension helps Pylearn2LearningRule do its job.
The job of this extensions is to help the Pylearn2LearningRule in its
monitoring duties. Due to impossibility of reseting the accumulators of
monitored values, the gradient computation function simply adds values
from new batches to the accumulators. At the end of each epoch the
accumulator's value from the previous epoch should be subtracted and
the difference should be divided over the number of batches to get an
average for the last epoch. This is done in the `on_monitor` method.
"""
def setup(self, model, dataset, algoritm):
self._last_batches_seen = model.monitor.get_batches_seen()
self._last_values = dict()
def on_monitor(self, model, dataset, algorithm):
learning_rule = algorithm.learning_rule
if not learning_rule:
return
batches_seen = model.monitor.get_batches_seen()
if (isinstance(learning_rule, Pylearn2LearningRule) and
len(self._last_values)):
for accum in learning_rule.accumulators:
accum.set_value(
((accum.get_value() - self._last_values[accum]) /
(batches_seen - self._last_batches_seen)).astype(
theano.config.floatX))
for accum in learning_rule.accumulators:
self._last_values[accum] = accum.get_value()
batches_seen -= self._last_batches_seen
class Pylearn2Train(pylearn2.train.Train):
"""Convinience wrapper over the PyLearn2 main loop.
Sets `model.data_specs` using `dataset.data_specs` and the names of the
input variables.
"""
def __init__(self, dataset, model, algorithm,
save_path=None, save_freq=0, extensions=None,
*args, **kwargs):
# Set data_specs
spaces, sources = dataset.data_specs
if isinstance(spaces, CompositeSpace):
spaces = spaces.components
else:
spaces = (spaces,)
sources = (sources,)
input_names = list(algorithm.cost.inputs.keys())
spaces = [spaces[sources.index(source)] for source in input_names]
if len(spaces) > 1:
spaces = CompositeSpace(spaces)
sources = input_names
else:
spaces = spaces[0]
sources = input_names[0]
model.data_specs = (spaces, sources)
# Add default extensions
if not extensions:
extensions = list()
extensions.append(DefaultExtension())
super(Pylearn2Train, self).__init__(
dataset, model, algorithm, save_path, save_freq, extensions,
*args, **kwargs)
def setup(self):
"""Make monitor persistency the default behaviour."""
if hasattr(self.model, 'monitor'):
# Cheat on monitor._sanity_check
# TODO: raise a discussion about it
for channel in self.model.monitor.channels.values():
channel.prereqs = None
super(Pylearn2Train, self).setup()
self.model.monitor.on_channel_conflict = 'copy_history'
| [
"logging.getLogger",
"collections.OrderedDict",
"blocks.utils.shared_floatx",
"pylearn2.utils.serial.load",
"blocks.select.Selector",
"blocks.graph.ComputationGraph",
"pylearn2.space.CompositeSpace",
"theano.clone",
"blocks.utils.pack"
] | [((706, 725), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (723, 725), False, 'import logging\n'), ((2472, 2482), 'blocks.utils.pack', 'pack', (['data'], {}), '(data)\n', (2476, 2482), False, 'from blocks.utils import pack\n'), ((3046, 3059), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3057, 3059), False, 'from collections import OrderedDict\n'), ((1621, 1638), 'pylearn2.utils.serial.load', 'serial.load', (['path'], {}), '(path)\n', (1632, 1638), False, 'from pylearn2.utils import serial\n'), ((2954, 2967), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2965, 2967), False, 'from collections import OrderedDict\n'), ((4493, 4506), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4504, 4506), False, 'from collections import OrderedDict\n'), ((5092, 5119), 'blocks.utils.shared_floatx', 'shared_floatx', (['(0)'], {'name': 'name'}), '(0, name=name)\n', (5105, 5119), False, 'from blocks.utils import shared_floatx, unpack\n'), ((9444, 9466), 'pylearn2.space.CompositeSpace', 'CompositeSpace', (['spaces'], {}), '(spaces)\n', (9458, 9466), False, 'from pylearn2.space import CompositeSpace\n'), ((2332, 2359), 'blocks.graph.ComputationGraph', 'ComputationGraph', (['self.cost'], {}), '(self.cost)\n', (2348, 2359), False, 'from blocks.graph import ComputationGraph\n'), ((6996, 7029), 'theano.clone', 'theano.clone', (['value', 'replace_dict'], {}), '(value, replace_dict)\n', (7008, 7029), False, 'import theano\n'), ((6444, 6467), 'blocks.graph.ComputationGraph', 'ComputationGraph', (['value'], {}), '(value)\n', (6460, 6467), False, 'from blocks.graph import ComputationGraph\n'), ((1131, 1151), 'blocks.select.Selector', 'Selector', (['self.brick'], {}), '(self.brick)\n', (1139, 1151), False, 'from blocks.select import Selector\n')] |
# <NAME>
# Made on 10/12/2021
# This is the script used in Blender for Blender to read the required numpy arrays and animate tarnsforms and colours
# appropriately. Use Blender 3.0.0
import bpy
import numpy as np
vac_file_name = r"E:\Projects Small\2022_01_Europe_vaccinated_infographic\vaccinated.npy"
vac_array = np.load(vac_file_name)
deaths_file_name = r"E:\Projects Small\2022_01_Europe_vaccinated_infographic\deaths.npy"
deaths_array = np.load(deaths_file_name)
dates_file_name = r"E:\Projects Small\2022_01_Europe_vaccinated_infographic\dates.npy"
dates_array = np.load(dates_file_name, allow_pickle=True)
start_of_vaccination_index = 320
for obj_i, obj in enumerate(bpy.data.collections['Meshes'].objects):
mat = bpy.data.materials.new(name='colour_of_{}'.format(obj.name))
mat.shadow_method = 'NONE'
obj.active_material = mat
for vac_i in np.arange(vac_array.shape[0]):
frame = start_of_vaccination_index + vac_i
vaccinated = vac_array[vac_i, obj_i] / np.max(vac_array)
mat.diffuse_color = [1 - vaccinated, vaccinated, 0, 1]
mat.keyframe_insert(data_path = 'diffuse_color', frame=frame, index=-1)
print('Done Mat')
for hook_i, hook in enumerate(bpy.data.collections['Hooks'].objects):
for death_i in np.arange(deaths_array.shape[0]):
height = 5 * deaths_array[death_i, hook_i] / np.max(deaths_array)
hook.location.z = height
hook.keyframe_insert(data_path = 'location', frame = death_i)
print('Done Height')
def update_date(self):
date_text_obj = bpy.context.scene.objects['Date']
frame = bpy.context.scene.frame_current
date_text_obj.data.body = dates_array[frame]
bpy.app.handlers.frame_change_pre.append(update_date)
for i in np.arange(bpy.context.scene.frame_end):
bpy.context | [
"numpy.max",
"numpy.load",
"bpy.app.handlers.frame_change_pre.append",
"numpy.arange"
] | [((326, 348), 'numpy.load', 'np.load', (['vac_file_name'], {}), '(vac_file_name)\n', (333, 348), True, 'import numpy as np\n'), ((457, 482), 'numpy.load', 'np.load', (['deaths_file_name'], {}), '(deaths_file_name)\n', (464, 482), True, 'import numpy as np\n'), ((588, 631), 'numpy.load', 'np.load', (['dates_file_name'], {'allow_pickle': '(True)'}), '(dates_file_name, allow_pickle=True)\n', (595, 631), True, 'import numpy as np\n'), ((1734, 1787), 'bpy.app.handlers.frame_change_pre.append', 'bpy.app.handlers.frame_change_pre.append', (['update_date'], {}), '(update_date)\n', (1774, 1787), False, 'import bpy\n'), ((1800, 1838), 'numpy.arange', 'np.arange', (['bpy.context.scene.frame_end'], {}), '(bpy.context.scene.frame_end)\n', (1809, 1838), True, 'import numpy as np\n'), ((899, 928), 'numpy.arange', 'np.arange', (['vac_array.shape[0]'], {}), '(vac_array.shape[0])\n', (908, 928), True, 'import numpy as np\n'), ((1315, 1347), 'numpy.arange', 'np.arange', (['deaths_array.shape[0]'], {}), '(deaths_array.shape[0])\n', (1324, 1347), True, 'import numpy as np\n'), ((1034, 1051), 'numpy.max', 'np.max', (['vac_array'], {}), '(vac_array)\n', (1040, 1051), True, 'import numpy as np\n'), ((1403, 1423), 'numpy.max', 'np.max', (['deaths_array'], {}), '(deaths_array)\n', (1409, 1423), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class StorageLocation(object):
"""
Properties that point to a specific object in Object Storage.
"""
def __init__(self, **kwargs):
"""
Initializes a new StorageLocation object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param region_id:
The value to assign to the region_id property of this StorageLocation.
:type region_id: str
:param compartment_id:
The value to assign to the compartment_id property of this StorageLocation.
:type compartment_id: str
:param namespace_name:
The value to assign to the namespace_name property of this StorageLocation.
:type namespace_name: str
:param bucket_name:
The value to assign to the bucket_name property of this StorageLocation.
:type bucket_name: str
:param object_name:
The value to assign to the object_name property of this StorageLocation.
:type object_name: str
"""
self.swagger_types = {
'region_id': 'str',
'compartment_id': 'str',
'namespace_name': 'str',
'bucket_name': 'str',
'object_name': 'str'
}
self.attribute_map = {
'region_id': 'regionId',
'compartment_id': 'compartmentId',
'namespace_name': 'namespaceName',
'bucket_name': 'bucketName',
'object_name': 'objectName'
}
self._region_id = None
self._compartment_id = None
self._namespace_name = None
self._bucket_name = None
self._object_name = None
@property
def region_id(self):
"""
**[Required]** Gets the region_id of this StorageLocation.
The region id.
:return: The region_id of this StorageLocation.
:rtype: str
"""
return self._region_id
@region_id.setter
def region_id(self, region_id):
"""
Sets the region_id of this StorageLocation.
The region id.
:param region_id: The region_id of this StorageLocation.
:type: str
"""
self._region_id = region_id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this StorageLocation.
The unique identifier for the compartment.
:return: The compartment_id of this StorageLocation.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this StorageLocation.
The unique identifier for the compartment.
:param compartment_id: The compartment_id of this StorageLocation.
:type: str
"""
self._compartment_id = compartment_id
@property
def namespace_name(self):
"""
**[Required]** Gets the namespace_name of this StorageLocation.
The Object Storage namespace.
:return: The namespace_name of this StorageLocation.
:rtype: str
"""
return self._namespace_name
@namespace_name.setter
def namespace_name(self, namespace_name):
"""
Sets the namespace_name of this StorageLocation.
The Object Storage namespace.
:param namespace_name: The namespace_name of this StorageLocation.
:type: str
"""
self._namespace_name = namespace_name
@property
def bucket_name(self):
"""
**[Required]** Gets the bucket_name of this StorageLocation.
The name of the bucket.
:return: The bucket_name of this StorageLocation.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""
Sets the bucket_name of this StorageLocation.
The name of the bucket.
:param bucket_name: The bucket_name of this StorageLocation.
:type: str
"""
self._bucket_name = bucket_name
@property
def object_name(self):
"""
**[Required]** Gets the object_name of this StorageLocation.
The name of the object.
:return: The object_name of this StorageLocation.
:rtype: str
"""
return self._object_name
@object_name.setter
def object_name(self, object_name):
"""
Sets the object_name of this StorageLocation.
The name of the object.
:param object_name: The object_name of this StorageLocation.
:type: str
"""
self._object_name = object_name
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict"
] | [((5325, 5350), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (5344, 5350), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import unittest
from continuous_delivery.models import CiResult
from continuous_delivery.models import CiArtifact
from continuous_delivery.models import CiConfiguration
from continuous_delivery.models import ProvisioningConfigurationSource
from continuous_delivery.models import ProvisioningConfiguration
from mock import patch, Mock
from vsts_accounts.models import AccountModel
from vsts_info_provider.models import TeamProjectInfo, RepositoryInfo, CollectionInfo, VstsInfo
from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager
class TestContinousDeliveryManager(unittest.TestCase):
def fake_callback(self):
pass
def test_constructor(self):
cdman = ContinuousDeliveryManager(None)
cdman = ContinuousDeliveryManager(self.fake_callback)
def test_get_vsts_app_id(self):
cdman = ContinuousDeliveryManager(None)
self.assertEqual('499b84ac-1321-427f-aa17-267ca6975798', cdman.get_vsts_app_id())
def test_set_azure_web_info(self):
cdman = ContinuousDeliveryManager(None)
cdman.set_azure_web_info('group1', 'web1', 'fakeCreds', 'sub1', 'subname1', 'tenant1', 'South Central US')
self.assertEqual('fakeCreds', cdman._azure_info.credentials)
self.assertEqual('group1', cdman._azure_info.resource_group_name)
self.assertEqual('sub1', cdman._azure_info.subscription_id)
self.assertEqual('subname1', cdman._azure_info.subscription_name)
self.assertEqual('tenant1', cdman._azure_info.tenant_id)
self.assertEqual('South Central US', cdman._azure_info.webapp_location)
self.assertEqual('web1', cdman._azure_info.website_name)
cdman.set_azure_web_info(None, None, None, None, None, None, None)
self.assertEqual(None, cdman._azure_info.credentials)
self.assertEqual(None, cdman._azure_info.resource_group_name)
self.assertEqual(None, cdman._azure_info.subscription_id)
self.assertEqual(None, cdman._azure_info.subscription_name)
self.assertEqual(None, cdman._azure_info.tenant_id)
self.assertEqual(None, cdman._azure_info.webapp_location)
self.assertEqual(None, cdman._azure_info.website_name)
def test_set_repository_info(self):
cdman = ContinuousDeliveryManager(None)
cdman.set_repository_info('repoUrl1', 'master1', 'token1')
self.assertEqual('master1', cdman._repo_info.branch)
self.assertEqual('token1', cdman._repo_info.git_token)
self.assertEqual('repoUrl1', cdman._repo_info.url)
cdman.set_repository_info(None, None, None)
self.assertEqual(None, cdman._repo_info.branch)
self.assertEqual(None, cdman._repo_info.git_token)
self.assertEqual(None, cdman._repo_info.url)
@patch("vsts_cd_manager.continuous_delivery_manager.ContinuousDelivery")
@patch("vsts_cd_manager.continuous_delivery_manager.Account")
def test_setup_continuous_delivery___account_doesnt_exist(self, mock_account, mock_cd):
# Mock the CD Client
mocked_cd = mock_cd.return_value
# Mock the Account Client
mocked_account = mock_account.return_value
mocked_account.create_account.return_value = AccountModel()
mocked_account.account_exists.return_value = False
# create CD manager
cdman = ContinuousDeliveryManager(None)
# Mock the vsts info call
cdman._get_vsts_info = self._mock_get_vsts_info
# set required values
cdman.set_azure_web_info('group1', 'web1', 'fakeCreds', 'sub1', 'subname1', 'tenant1', 'South Central US')
cdman.set_repository_info('repoUrl1', 'master1', 'token1')
# call setup
with self.assertRaises(RuntimeError) as context:
cdman.setup_continuous_delivery('staging', 'AspNetWap', "account1", False, 'token2')
self.assertTrue('does not exist' in str(context.exception))
@patch("vsts_cd_manager.continuous_delivery_manager.ContinuousDelivery")
@patch("vsts_cd_manager.continuous_delivery_manager.Account")
def test_setup_continuous_delivery___create_account(self, mock_account, mock_cd):
# Mock the CD Client
mocked_cd = mock_cd.return_value
mocked_cd.provisioning_configuration.return_value = self._get_provisioning_config('queued', '')
mocked_cd.get_provisioning_configuration.return_value = self._get_provisioning_config('succeeded', '')
# Mock the Account Client
mocked_account = mock_account.return_value
mocked_account.create_account.return_value = AccountModel('111', 'collection111')
mocked_account.account_exists.return_value = False
# create CD manager
cdman = ContinuousDeliveryManager(None)
# Mock the vsts info call
cdman._get_vsts_info = self._mock_get_vsts_info
# set required values
cdman.set_azure_web_info('group1', 'web1', 'fakeCreds', 'sub1', 'subname1', 'tenant1', 'South Central US')
cdman.set_repository_info('repoUrl1', 'master1', 'token1')
# call setup
result = cdman.setup_continuous_delivery('staging', 'AspNetWap', "account1", True, 'token2')
self.assertEqual('SUCCESS', result.status)
self.assertTrue("The Team Services account 'https://account1.visualstudio.com' was created" in result.status_message)
self.assertEqual('https://portal.azure.com/#resource/subscriptions/sub1/resourceGroups/group1/providers/Microsoft.Web/sites/web1/vstscd', result.azure_continuous_delivery_url)
self.assertEqual('group1', result.azure_resource_group)
self.assertEqual('sub1', result.azure_subscription_id)
self.assertEqual('web1', result.azure_website_name)
self.assertEqual(True, result.vsts_account_created)
self.assertEqual('https://account1.visualstudio.com', result.vsts_account_url)
self.assertEqual('https://account1.visualstudio.com/333/_build?_a=simple-process&definitionId=123', result.vsts_build_def_url)
self.assertEqual('https://account1.visualstudio.com/333/_apps/hub/ms.vss-releaseManagement-web.hub-explorer?definitionId=321&_a=releases', result.vsts_release_def_url)
def _mock_get_vsts_info(self, vsts_repo_url, cred):
collection_info = CollectionInfo('111', 'collection111', 'https://collection111.visualstudio.com')
project_info = TeamProjectInfo('333', 'project1', 'https://collection111.visualstudio.com/project1', 'good', '1')
repository_info = RepositoryInfo('222', 'repo222', 'https://collection111.visualstudio.com/project1/_git/repo222', project_info)
return VstsInfo('server1', collection_info, repository_info)
def _get_provisioning_config(self, status, status_message):
ci_config = CiConfiguration(
CiArtifact('333', 'project1', 'https://collection111.visualstudio.com/project1'),
CiArtifact('123', 'builddef123', 'https://collection111.visualstudio.com/project1/build/definition/123'),
CiArtifact('321', 'releasedef321', 'https://collection111.visualstudio.com/project1/release/definition/321'),
CiResult(status, status_message))
return ProvisioningConfiguration('abcd', None, None, ci_config)
if __name__ == '__main__':
unittest.main() | [
"vsts_accounts.models.AccountModel",
"mock.patch",
"vsts_info_provider.models.CollectionInfo",
"continuous_delivery.models.CiResult",
"vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager",
"vsts_info_provider.models.TeamProjectInfo",
"vsts_info_provider.models.RepositoryInfo",
"continuous_delivery.models.ProvisioningConfiguration",
"unittest.main",
"continuous_delivery.models.CiArtifact",
"vsts_info_provider.models.VstsInfo"
] | [((3156, 3227), 'mock.patch', 'patch', (['"""vsts_cd_manager.continuous_delivery_manager.ContinuousDelivery"""'], {}), "('vsts_cd_manager.continuous_delivery_manager.ContinuousDelivery')\n", (3161, 3227), False, 'from mock import patch, Mock\n'), ((3233, 3293), 'mock.patch', 'patch', (['"""vsts_cd_manager.continuous_delivery_manager.Account"""'], {}), "('vsts_cd_manager.continuous_delivery_manager.Account')\n", (3238, 3293), False, 'from mock import patch, Mock\n'), ((4295, 4366), 'mock.patch', 'patch', (['"""vsts_cd_manager.continuous_delivery_manager.ContinuousDelivery"""'], {}), "('vsts_cd_manager.continuous_delivery_manager.ContinuousDelivery')\n", (4300, 4366), False, 'from mock import patch, Mock\n'), ((4372, 4432), 'mock.patch', 'patch', (['"""vsts_cd_manager.continuous_delivery_manager.Account"""'], {}), "('vsts_cd_manager.continuous_delivery_manager.Account')\n", (4377, 4432), False, 'from mock import patch, Mock\n'), ((7622, 7637), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7635, 7637), False, 'import unittest\n'), ((1094, 1125), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['None'], {}), '(None)\n', (1119, 1125), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((1142, 1187), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['self.fake_callback'], {}), '(self.fake_callback)\n', (1167, 1187), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((1241, 1272), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['None'], {}), '(None)\n', (1266, 1272), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((1419, 1450), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['None'], {}), '(None)\n', (1444, 1450), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((2648, 2679), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['None'], {}), '(None)\n', (2673, 2679), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((3594, 3608), 'vsts_accounts.models.AccountModel', 'AccountModel', ([], {}), '()\n', (3606, 3608), False, 'from vsts_accounts.models import AccountModel\n'), ((3712, 3743), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['None'], {}), '(None)\n', (3737, 3743), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((4942, 4978), 'vsts_accounts.models.AccountModel', 'AccountModel', (['"""111"""', '"""collection111"""'], {}), "('111', 'collection111')\n", (4954, 4978), False, 'from vsts_accounts.models import AccountModel\n'), ((5082, 5113), 'vsts_cd_manager.continuous_delivery_manager.ContinuousDeliveryManager', 'ContinuousDeliveryManager', (['None'], {}), '(None)\n', (5107, 5113), False, 'from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager\n'), ((6627, 6712), 'vsts_info_provider.models.CollectionInfo', 'CollectionInfo', (['"""111"""', '"""collection111"""', '"""https://collection111.visualstudio.com"""'], {}), "('111', 'collection111', 'https://collection111.visualstudio.com'\n )\n", (6641, 6712), False, 'from vsts_info_provider.models import TeamProjectInfo, RepositoryInfo, CollectionInfo, VstsInfo\n'), ((6731, 6833), 'vsts_info_provider.models.TeamProjectInfo', 'TeamProjectInfo', (['"""333"""', '"""project1"""', '"""https://collection111.visualstudio.com/project1"""', '"""good"""', '"""1"""'], {}), "('333', 'project1',\n 'https://collection111.visualstudio.com/project1', 'good', '1')\n", (6746, 6833), False, 'from vsts_info_provider.models import TeamProjectInfo, RepositoryInfo, CollectionInfo, VstsInfo\n'), ((6856, 6974), 'vsts_info_provider.models.RepositoryInfo', 'RepositoryInfo', (['"""222"""', '"""repo222"""', '"""https://collection111.visualstudio.com/project1/_git/repo222"""', 'project_info'], {}), "('222', 'repo222',\n 'https://collection111.visualstudio.com/project1/_git/repo222',\n project_info)\n", (6870, 6974), False, 'from vsts_info_provider.models import TeamProjectInfo, RepositoryInfo, CollectionInfo, VstsInfo\n'), ((6982, 7035), 'vsts_info_provider.models.VstsInfo', 'VstsInfo', (['"""server1"""', 'collection_info', 'repository_info'], {}), "('server1', collection_info, repository_info)\n", (6990, 7035), False, 'from vsts_info_provider.models import TeamProjectInfo, RepositoryInfo, CollectionInfo, VstsInfo\n'), ((7533, 7589), 'continuous_delivery.models.ProvisioningConfiguration', 'ProvisioningConfiguration', (['"""abcd"""', 'None', 'None', 'ci_config'], {}), "('abcd', None, None, ci_config)\n", (7558, 7589), False, 'from continuous_delivery.models import ProvisioningConfiguration\n'), ((7150, 7235), 'continuous_delivery.models.CiArtifact', 'CiArtifact', (['"""333"""', '"""project1"""', '"""https://collection111.visualstudio.com/project1"""'], {}), "('333', 'project1', 'https://collection111.visualstudio.com/project1'\n )\n", (7160, 7235), False, 'from continuous_delivery.models import CiArtifact\n'), ((7244, 7352), 'continuous_delivery.models.CiArtifact', 'CiArtifact', (['"""123"""', '"""builddef123"""', '"""https://collection111.visualstudio.com/project1/build/definition/123"""'], {}), "('123', 'builddef123',\n 'https://collection111.visualstudio.com/project1/build/definition/123')\n", (7254, 7352), False, 'from continuous_delivery.models import CiArtifact\n'), ((7362, 7474), 'continuous_delivery.models.CiArtifact', 'CiArtifact', (['"""321"""', '"""releasedef321"""', '"""https://collection111.visualstudio.com/project1/release/definition/321"""'], {}), "('321', 'releasedef321',\n 'https://collection111.visualstudio.com/project1/release/definition/321')\n", (7372, 7474), False, 'from continuous_delivery.models import CiArtifact\n'), ((7484, 7516), 'continuous_delivery.models.CiResult', 'CiResult', (['status', 'status_message'], {}), '(status, status_message)\n', (7492, 7516), False, 'from continuous_delivery.models import CiResult\n')] |
from pyglet import app
from pyglet.gl import *
from pyglet.window import key, mouse, Window
import numpy as np
from scipy.spatial import cKDTree
from ctypes import *
import math
from map_meta_tools import load_map_meta
from map_geom_tools import load_map_geom
from map_plot_tools import load_map_plot
view_width = 640
view_height = 480
config = Config(sample_buffers=1, samples=8, double_buffer=True)
window = Window(config=config, width=view_width, height=view_height, resizable=True, caption='<none>')
VERTEX_SHADER_SOURCE = b'''
#version 330
layout(location = 0) in vec2 a_position;
layout(location = 1) in int a_region;
out vec4 v_color;
uniform mat3 u_map_to_clip;
layout(std140) uniform u_region_color_block {
vec4 u_region_color[3026];
};
void main()
{
v_color = u_region_color[a_region];
vec2 v_position = (u_map_to_clip * vec3(a_position, 1.0)).xy;
gl_Position = vec4(v_position, 0.0, 1.0);
}
'''
FRAGMENT_SHADER_SOURCE = b'''
#version 330
in vec4 v_color;
out vec4 f_color;
void main()
{
f_color = v_color;
}
'''
map_meta = load_map_meta()
map_geom = load_map_geom()
map_plot = load_map_plot()
vertex_array = map_geom['vertex_data']
element_array = map_geom['element_data']
color_array_dict = {
key.A: map_plot['age_data'],
key.W: map_plot['water_data'],
key.F: map_plot['forest_data'],
key.C: map_plot['cluster_data']
}
def build_shader(shader_info):
shader = glCreateShader(shader_info['type'])
glShaderSource(shader, 1,
pointer(cast(c_char_p(shader_info['source']), POINTER(GLchar))),
pointer(GLint(len(shader_info['source']))))
glCompileShader(shader)
return shader
def build_shader_program(shader_info_list):
shader_list = []
for shader_info in shader_info_list:
shader_list.append(build_shader(shader_info))
shader_program = glCreateProgram()
for shader in shader_list:
glAttachShader(shader_program, shader)
glLinkProgram(shader_program)
for shader in shader_list:
glDetachShader(shader_program, shader)
return shader_program
shader_program = build_shader_program([
{'type': GL_VERTEX_SHADER, 'source': VERTEX_SHADER_SOURCE},
{'type': GL_FRAGMENT_SHADER, 'source': FRAGMENT_SHADER_SOURCE}
])
uniform_map_to_clip = glGetUniformLocation(shader_program, c_char_p(b'u_map_to_clip'))
def update_buffer_content(buffer_type, buffer, buffer_data, buffer_usage):
glBindBuffer(buffer_type, buffer)
glBufferData(buffer_type, buffer_data.nbytes, buffer_data.ctypes.data_as(POINTER(GLvoid)), buffer_usage)
glBindBuffer(buffer_type, 0)
def build_buffer(buffer_type, buffer_data, buffer_usage):
buffer = GLuint()
glGenBuffers(1, pointer(buffer))
update_buffer_content(buffer_type, buffer, buffer_data, buffer_usage)
buffer_element_size = buffer_data.nbytes // buffer_data.shape[0]
buffer_element_count = buffer_data.nbytes // buffer_element_size
return buffer, buffer_element_size, buffer_element_count
vertex_buffer, vertex_size, _ = build_buffer(GL_ARRAY_BUFFER, vertex_array, GL_STATIC_DRAW)
element_buffer, element_size, element_count = build_buffer(GL_ELEMENT_ARRAY_BUFFER, element_array, GL_STATIC_DRAW)
region_color_buffer, _, _ = build_buffer(GL_UNIFORM_BUFFER, color_array_dict[key.F], GL_DYNAMIC_DRAW)
def on_launch():
uniform_region_color_block = glGetUniformBlockIndex(shader_program, c_char_p(b'u_region_color_block'))
glUniformBlockBinding(shader_program, uniform_region_color_block, 0)
glBindBufferBase(GL_UNIFORM_BUFFER, 0, region_color_buffer)
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, vertex_size, 0)
glEnableVertexAttribArray(1)
glVertexAttribIPointer(1, 1, GL_UNSIGNED_SHORT, vertex_size, 2 * sizeof(GLfloat))
glBindBuffer(GL_ARRAY_BUFFER, 0)
log_zoom = -8.0
mat_view_to_clip = None
mat_map_to_view = None
map_origin_x = (83748.4296875 + 732907.75) / 2
map_origin_y = (6629044.0 + 7776450.0) / 2
map_offset_x = 0.0
map_offset_y = 0.0
def update_view():
global mat_view_to_clip, mat_map_to_view
zoom = math.exp(log_zoom)
mid_x = map_origin_x + map_offset_x
mid_y = map_origin_y + map_offset_y
mat_view_to_clip = np.array([[2 / window.width, 0, -1],
[0, 2 / window.height, -1],
[0, 0, 1]], dtype='=f4')
mat_map_to_view = np.array([[zoom, 0, window.width / 2 - zoom * mid_x],
[0, zoom, window.height / 2 - zoom * mid_y],
[0, 0, 1]], dtype='=f4')
centroid_tree = cKDTree(map_geom['element_middle'])
centroid_tree_radius = np.max(map_geom['element_extent'])
def find_region_by_map_position(map_position):
def is_point_in_triangle(p, p0, p1, p2):
d1 = p - p2
d2 = p1 - p2
d = d2[1] * (p0[0] - p2[0]) - d2[0] * (p0[1] - p2[1])
s = d2[1] * d1[0] - d2[0] * d1[1]
t = (p2[1] - p0[1]) * d1[0] + (p0[0] - p2[0]) * d1[1]
if d < 0:
return s <= 0 and t <= 0 and s + t >= d
return s >= 0 and t >= 0 and s + t <= d
i_candidates = centroid_tree.query_ball_point(vec_mouse_map_position[:2], centroid_tree_radius, p=np.inf)
for i in i_candidates:
a = vertex_array[element_array[i][0]]
b = vertex_array[element_array[i][1]]
c = vertex_array[element_array[i][2]]
if is_point_in_triangle(map_position[:2], a[0], b[0], c[0]):
return a[1]
return None
vec_mouse_view_position = None
vec_mouse_map_position = None
fix_mouse_map_position = False
@window.event
def on_key_press(symbol, modifiers):
if symbol in color_array_dict.keys():
update_buffer_content(GL_UNIFORM_BUFFER, region_color_buffer, color_array_dict[symbol], GL_DYNAMIC_DRAW)
@window.event
def on_key_release(symbol, modifiers):
pass
@window.event
def on_mouse_press(x, y, button, modifiers):
global fix_mouse_map_position
if button == mouse.LEFT:
window.set_mouse_cursor(window.get_system_mouse_cursor(Window.CURSOR_SIZE))
fix_mouse_map_position = True
if button == mouse.RIGHT:
region = find_region_by_map_position(vec_mouse_map_position[:2])
if region is not None:
code = map_meta['region_code_data'][region]
name = map_meta['region_name_data'][region]
window.set_caption(f'{code} {name}')
else:
window.set_caption('<none>')
@window.event
def on_mouse_release(x, y, button, modifiers):
global fix_mouse_map_position
if button == mouse.LEFT:
window.set_mouse_cursor(None)
fix_mouse_map_position = False
@window.event
def on_mouse_motion(x, y, dx, dy):
global vec_mouse_view_position, vec_mouse_map_position
global mat_map_to_view
vec_mouse_view_position = np.array((x, y, 1.0))
vec_mouse_map_position = np.linalg.inv(mat_map_to_view) @ np.array((x, y, 1.0))
@window.event
def on_mouse_enter(x, y):
pass
@window.event
def on_mouse_leave(x, y):
global vec_mouse_view_position
vec_mouse_view_position = None
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
global vec_mouse_map_position
global mat_map_to_view
global map_offset_x, map_offset_y
new_vec_mouse_map_position = np.linalg.inv(mat_map_to_view) @ np.array((x, y, 1.0))
if not fix_mouse_map_position:
vec_mouse_map_position = new_vec_mouse_map_position
else:
map_offset_x += vec_mouse_map_position[0] - new_vec_mouse_map_position[0]
map_offset_y += vec_mouse_map_position[1] - new_vec_mouse_map_position[1]
update_view()
@window.event
def on_mouse_scroll(x, y, scroll_x, scroll_y):
global log_zoom
log_zoom = np.clip(log_zoom + 0.1 * scroll_y, -8.0, -2.0)
global mat_map_to_view
global vec_mouse_map_position
global map_offset_x, map_offset_y
update_view()
new_vec_mouse_map_position = np.linalg.inv(mat_map_to_view) @ np.array((x, y, 1.0))
map_offset_x += vec_mouse_map_position[0] - new_vec_mouse_map_position[0]
map_offset_y += vec_mouse_map_position[1] - new_vec_mouse_map_position[1]
update_view()
@window.event
def on_resize(width, height):
global view_width, view_height
# avoid width == 0, height == 0
view_width = max(width, 1)
view_height = max(height, 1)
update_view()
@window.event
def on_draw():
glViewport(0, 0, view_width, view_height)
# clear screen
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(shader_program)
# update projection matrix
global mat_view_to_clip, mat_map_to_view
map_to_clip = mat_view_to_clip @ mat_map_to_view
glUniformMatrix3fv(uniform_map_to_clip, 1, GL_TRUE, map_to_clip.ctypes.data_as(POINTER(GLfloat)))
# bind vertices
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
# bind and draw elements
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, element_buffer)
glDrawElements(GL_TRIANGLES, element_count * 3, GL_UNSIGNED_INT, 0)
# remove bindings
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
glUseProgram(0)
glFlush()
on_launch()
app.run()
| [
"numpy.clip",
"pyglet.app.run",
"map_geom_tools.load_map_geom",
"scipy.spatial.cKDTree",
"map_plot_tools.load_map_plot",
"map_meta_tools.load_map_meta",
"numpy.max",
"numpy.array",
"numpy.linalg.inv",
"pyglet.window.Window",
"math.exp"
] | [((413, 510), 'pyglet.window.Window', 'Window', ([], {'config': 'config', 'width': 'view_width', 'height': 'view_height', 'resizable': '(True)', 'caption': '"""<none>"""'}), "(config=config, width=view_width, height=view_height, resizable=True,\n caption='<none>')\n", (419, 510), False, 'from pyglet.window import key, mouse, Window\n'), ((1066, 1081), 'map_meta_tools.load_map_meta', 'load_map_meta', ([], {}), '()\n', (1079, 1081), False, 'from map_meta_tools import load_map_meta\n'), ((1093, 1108), 'map_geom_tools.load_map_geom', 'load_map_geom', ([], {}), '()\n', (1106, 1108), False, 'from map_geom_tools import load_map_geom\n'), ((1120, 1135), 'map_plot_tools.load_map_plot', 'load_map_plot', ([], {}), '()\n', (1133, 1135), False, 'from map_plot_tools import load_map_plot\n'), ((4682, 4717), 'scipy.spatial.cKDTree', 'cKDTree', (["map_geom['element_middle']"], {}), "(map_geom['element_middle'])\n", (4689, 4717), False, 'from scipy.spatial import cKDTree\n'), ((4741, 4775), 'numpy.max', 'np.max', (["map_geom['element_extent']"], {}), "(map_geom['element_extent'])\n", (4747, 4775), True, 'import numpy as np\n'), ((9277, 9286), 'pyglet.app.run', 'app.run', ([], {}), '()\n', (9284, 9286), False, 'from pyglet import app\n'), ((4174, 4192), 'math.exp', 'math.exp', (['log_zoom'], {}), '(log_zoom)\n', (4182, 4192), False, 'import math\n'), ((4297, 4390), 'numpy.array', 'np.array', (['[[2 / window.width, 0, -1], [0, 2 / window.height, -1], [0, 0, 1]]'], {'dtype': '"""=f4"""'}), "([[2 / window.width, 0, -1], [0, 2 / window.height, -1], [0, 0, 1]],\n dtype='=f4')\n", (4305, 4390), True, 'import numpy as np\n'), ((4476, 4604), 'numpy.array', 'np.array', (['[[zoom, 0, window.width / 2 - zoom * mid_x], [0, zoom, window.height / 2 - \n zoom * mid_y], [0, 0, 1]]'], {'dtype': '"""=f4"""'}), "([[zoom, 0, window.width / 2 - zoom * mid_x], [0, zoom, window.\n height / 2 - zoom * mid_y], [0, 0, 1]], dtype='=f4')\n", (4484, 4604), True, 'import numpy as np\n'), ((6916, 6937), 'numpy.array', 'np.array', (['(x, y, 1.0)'], {}), '((x, y, 1.0))\n', (6924, 6937), True, 'import numpy as np\n'), ((7830, 7876), 'numpy.clip', 'np.clip', (['(log_zoom + 0.1 * scroll_y)', '(-8.0)', '(-2.0)'], {}), '(log_zoom + 0.1 * scroll_y, -8.0, -2.0)\n', (7837, 7876), True, 'import numpy as np\n'), ((6967, 6997), 'numpy.linalg.inv', 'np.linalg.inv', (['mat_map_to_view'], {}), '(mat_map_to_view)\n', (6980, 6997), True, 'import numpy as np\n'), ((7000, 7021), 'numpy.array', 'np.array', (['(x, y, 1.0)'], {}), '((x, y, 1.0))\n', (7008, 7021), True, 'import numpy as np\n'), ((7386, 7416), 'numpy.linalg.inv', 'np.linalg.inv', (['mat_map_to_view'], {}), '(mat_map_to_view)\n', (7399, 7416), True, 'import numpy as np\n'), ((7419, 7440), 'numpy.array', 'np.array', (['(x, y, 1.0)'], {}), '((x, y, 1.0))\n', (7427, 7440), True, 'import numpy as np\n'), ((8029, 8059), 'numpy.linalg.inv', 'np.linalg.inv', (['mat_map_to_view'], {}), '(mat_map_to_view)\n', (8042, 8059), True, 'import numpy as np\n'), ((8062, 8083), 'numpy.array', 'np.array', (['(x, y, 1.0)'], {}), '((x, y, 1.0))\n', (8070, 8083), True, 'import numpy as np\n')] |
import h5py as py
import matplotlib.pyplot as plt
# Numerical result
hdf5_file = py.File("../Build/TestsParallel/t3d_me_mt_block_sliding.h5", "r")
th_grp = hdf5_file['TimeHistory']['sliding']
th_num = th_grp.attrs['output_num']
cal_time = []
rb_x = []
rb_y = []
rb_z = []
rb_cfx = []
rb_cfy = []
rb_cfz = []
for th_id in range(th_num):
frame_grp = th_grp['frame_%d' % th_id]
cal_time.append(frame_grp.attrs['current_time'])
rb_grp = frame_grp['RigidCube']
rb_x.append(rb_grp.attrs['x'])
rb_y.append(rb_grp.attrs['y'])
rb_z.append(rb_grp.attrs['z'])
rb_cfx.append(rb_grp.attrs['fx_cont'])
rb_cfy.append(rb_grp.attrs['fy_cont'])
rb_cfz.append(rb_grp.attrs['fz_cont'])
hdf5_file.close()
data_file = open("../Build/TestsParallel/t3d_me_mt_block_sliding_res.csv", "w")
data_file.write("time, x, y, z, fx_cont, fy_cont, fz_cont,\n")
for i in range(len(cal_time)):
data_file.write("%f, %f, %f, %f, %f, %f, %f,\n" % (cal_time[i], \
rb_x[i], rb_y[i], rb_z[i], rb_cfx[i], rb_cfy[i], rb_cfz[i]))
data_file.close()
fig = plt.figure()
plot1 = fig.subplots(1, 1)
line1, = plot1.plot(cal_time, rb_x)
# line1, = plot1.plot(cal_time, rb_cfz)
# Analytical solution
#a = 3.0 / 9.0 # smooth
#a = (3.0 - 3.0 * 0.2) / 9.0 # frictional
a = (3.0 - 9.0 * 0.1) / 9.0 # sticky
an_rb_x = []
for i in range(len(cal_time)):
an_rb_x.append(0.5 * a * cal_time[i] * cal_time[i] + rb_x[0])
line2, = plot1.plot(cal_time, an_rb_x, '--')
cal_time_range = [min(cal_time), max(cal_time)]
rb_x_range = [min(rb_x), max(rb_x)]
plt.xlim(cal_time_range)
plt.ylim(rb_x_range)
plt.legend(handles=[line1,line2], labels=['MPM', 'Analytical'])
plt.show()
| [
"h5py.File",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((82, 147), 'h5py.File', 'py.File', (['"""../Build/TestsParallel/t3d_me_mt_block_sliding.h5"""', '"""r"""'], {}), "('../Build/TestsParallel/t3d_me_mt_block_sliding.h5', 'r')\n", (89, 147), True, 'import h5py as py\n'), ((1075, 1087), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1085, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1581), 'matplotlib.pyplot.xlim', 'plt.xlim', (['cal_time_range'], {}), '(cal_time_range)\n', (1565, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1602), 'matplotlib.pyplot.ylim', 'plt.ylim', (['rb_x_range'], {}), '(rb_x_range)\n', (1590, 1602), True, 'import matplotlib.pyplot as plt\n'), ((1604, 1668), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[line1, line2]', 'labels': "['MPM', 'Analytical']"}), "(handles=[line1, line2], labels=['MPM', 'Analytical'])\n", (1614, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1676, 1678), True, 'import matplotlib.pyplot as plt\n')] |
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.mynmap import mynmap
class nmapnfsshares(actionModule):
def __init__(self, config, display, lock):
super(nmapnfsshares, self).__init__(config, display, lock)
self.title = "NMap NFS Share Scan"
self.shortName = "NmapNFSShareScan"
self.description = "execute [nmap -p111 --script=nfs-ls,nfs-showmount] on each target"
self.requirements = ["nmap"]
self.triggers = ["newPort111"]
self.safeLevel = 5
def getTargets(self):
self.targets = kb.get('host/*/tcpport/111')
def process(self):
# load any targets we are interested in
self.getTargets()
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
# add the new IP to the already seen list
self.addseentarget(t)
self.display.verbose(self.shortName + " - Connecting to " + t)
# run nmap
n = mynmap(self.config, self.display)
scan_results = n.run(target=t, flags="--script=nfs-ls,nfs-showmount", ports="111", vector=self.vector,
filetag=t + "_NFSSHARESCAN")['scan']
tree = ET.parse(n.outfile + '.xml')
root = tree.getroot()
for volumestable in root.iter("table"):
if volumestable.attrib.has_key('key') and volumestable.attrib['key'] == "volumes":
for volume in volumestable:
sharename = ""
shareinfo = ""
files = {}
for elem in volume:
if elem.attrib["key"] == "volume":
sharename = elem.text.replace("/", "%2F")
if elem.attrib["key"] == "info":
shareinfo = elem[0].text.replace("/", "%2F")
if elem.attrib["key"] == "files":
for file in elem:
newfile = {}
for fileprop in file:
newfile[fileprop.attrib["key"]] = fileprop.text
files[newfile["filename"]] = newfile
kb.add("host/" + t + "/shares/NFS/" + sharename + "/" + str("Info: " + shareinfo))
for file in files:
# TODO - Maybe revisit adding more file properties here in addition to names
kb.add("host/" + t + "/shares/NFS/" + sharename + "/Files/" + str(file).replace("/", "%2F"))
return
| [
"core.mynmap.mynmap",
"xml.etree.ElementTree.parse",
"core.keystore.KeyStore.get"
] | [((707, 735), 'core.keystore.KeyStore.get', 'kb.get', (['"""host/*/tcpport/111"""'], {}), "('host/*/tcpport/111')\n", (713, 735), True, 'from core.keystore import KeyStore as kb\n'), ((1216, 1249), 'core.mynmap.mynmap', 'mynmap', (['self.config', 'self.display'], {}), '(self.config, self.display)\n', (1222, 1249), False, 'from core.mynmap import mynmap\n'), ((1467, 1495), 'xml.etree.ElementTree.parse', 'ET.parse', (["(n.outfile + '.xml')"], {}), "(n.outfile + '.xml')\n", (1475, 1495), True, 'import xml.etree.ElementTree as ET\n')] |
#!/usr/bin/env python
import sys, os, inspect, argparse
from natsort import natsorted
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import lib.library as lib
from Bio import SeqIO
from collections import OrderedDict
#setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(prog='gff2prot.py',
description = '''Script to convert GFF3 and FASTA to tbl, proteins, transcripts.''',
epilog = """Written by <NAME> (2018) <EMAIL>""",
formatter_class = MyFormatter)
parser.add_argument('-g', '--gff3', required=True, help='Genome annotation GFF3 format')
parser.add_argument('-f', '--fasta', required=True, help='Genome in FASTA format')
args=parser.parse_args()
def scaffold2Dict(input):
#get scaffold names/lengths
scaffLen = {}
with open(input, 'rU') as seqin:
for record in SeqIO.parse(seqin, 'fasta'):
if not record.id in scaffLen:
scaffLen[record.id] = len(record.seq)
return scaffLen
def dicts2tbl(genesDict, scaff2genes, scaffLen, SeqCenter, SeqRefNum):
'''
function to take funannotate annotation dictionaries and convert to NCBI tbl output
'''
duplicates = 0
pseudo = 0
nocds = 0
for k,v in natsorted(scaff2genes.items()):
sys.stdout.write('>Feature %s\n' % k)
sys.stdout.write('1\t%s\tREFERENCE\n' % scaffLen.get(k))
sys.stdout.write('\t\t\t%s\t%s\n' % (SeqCenter, SeqRefNum))
for genes in v: #now loop through each gene on the scaffold
geneInfo = genesDict.get(genes) #single funannotate standard dictionary
if 'pseudo' in geneInfo:
if geneInfo['pseudo']:
log.debug('{:} is pseudo, skipping'.format(genes))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not geneInfo['CDS']:
log.debug('Skipping {:} because no CDS found.'.format(genes))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not len(geneInfo['ids']) == len(geneInfo['mRNA']) == len(geneInfo['CDS']):
log.debug('Incompatible annotation found: {:}\n{:}'.format(genes, geneInfo))
duplicates += 1
continue
if geneInfo['type'] == 'mRNA' and len(geneInfo['CDS']) == 0:
nocds += 1
continue
if geneInfo['type'] == None:
continue
#check for partial models
if True in geneInfo['partialStart']:
ps = '<'
else:
ps = ''
if True in geneInfo['partialStop']:
pss = '>'
else:
pss = ''
#if geneInfo['type'] == 'rRNA' or geneInfo['type'] == 'tRNA':
#ps = '<'
#pss = '>'
#now write gene model
if geneInfo['strand'] == '+':
sys.stdout.write('%s%i\t%s%i\tgene\n' % (ps, geneInfo['location'][0], pss, geneInfo['location'][1]))
sys.stdout.write('\t\t\tlocus_tag\t%s\n' % genes)
else:
sys.stdout.write('%s%i\t%s%i\tgene\n' % (ps, geneInfo['location'][1], pss, geneInfo['location'][0]))
sys.stdout.write('\t\t\tlocus_tag\t%s\n' % genes)
#now will output the gene models with -T1, -T2, -T3 annotations based on expression values
#means need to get the order
order = []
if len(geneInfo['ids']) > 1: #multiple transcripts, so get order of highest TPM
tpms = []
for num,tpm in enumerate(geneInfo['note']):
for item in tpm:
if item.startswith('TPM:'):
value = float(item.split(':')[-1])
tpms.append((value,num))
if len(tpms) > 0:
for x in sorted(tpms, reverse=True):
order.append(x[1])
else:
order = range(0,len(geneInfo['ids']))
else:
order.append(0)
for num,i in enumerate(order): #now write mRNA and CDS features
if geneInfo['ids'][i].startswith('evm.model'): #if from predict, rename to match locus_tag
protein_id = genes+'-T'+str(num+1)
else:
protein_id = geneInfo['ids'][i]
if geneInfo['type'] == 'mRNA':
if geneInfo['partialStart'][i] == False:
ps = ''
else:
ps = '<'
if geneInfo['partialStop'][i] == False:
pss = ''
else:
pss = '>'
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1: #single exon, so slightly differnt method
sys.stdout.write('%s%s\t%s%s\tmRNA\n' % (ps, exon[0], pss, exon[1]))
elif num == 0:
sys.stdout.write('%s%s\t%s\tmRNA\n' % (ps, exon[0], exon[1]))
elif num == len(geneInfo['mRNA'][i]) - 1: #this is last one
sys.stdout.write('%s\t%s%s\n' % (exon[0], pss, exon[1]))
else:
sys.stdout.write('%s\t%s\n' % (exon[0], exon[1]))
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
sys.stdout.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
sys.stdout.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
if num == 0 and num == len(geneInfo['CDS'][i]) - 1: #single exon, so slightly differnt method
sys.stdout.write('%s%s\t%s%s\tCDS\n' % (ps, cds[0], pss, cds[1]))
elif num == 0:
sys.stdout.write('%s%s\t%s\tCDS\n' % (ps, cds[0], cds[1]))
elif num == len(geneInfo['CDS'][i]) - 1: #this is last one
sys.stdout.write('%s\t%s%s\n' % (cds[0], pss, cds[1]))
else:
sys.stdout.write('%s\t%s\n' % (cds[0], cds[1]))
sys.stdout.write('\t\t\tcodon_start\t%i\n' % geneInfo['codon_start'][i])
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
sys.stdout.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
sys.stdout.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
else: #means this is on crick strand
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1: #single exon, so slightly differnt method
sys.stdout.write('%s%s\t%s%s\tmRNA\n' % (ps, exon[1], pss, exon[0]))
elif num == 0:
sys.stdout.write('%s%s\t%s\tmRNA\n' % (ps, exon[1], exon[0]))
elif num == len(geneInfo['mRNA'][i]) - 1: #this is last one
sys.stdout.write('%s\t%s%s\n' % (exon[1], pss, exon[0]))
else:
sys.stdout.write('%s\t%s\n' % (exon[1], exon[0]))
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
sys.stdout.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
sys.stdout.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
if num == 0 and num == len(geneInfo['CDS'][i]) - 1: #single exon, so slightly differnt method
sys.stdout.write('%s%s\t%s%s\tCDS\n' % (ps, cds[1], pss, cds[0]))
elif num == 0:
sys.stdout.write('%s%s\t%s\tCDS\n' % (ps, cds[1], cds[0]))
elif num == (len(geneInfo['CDS'][i]) - 1): #this is last one
sys.stdout.write('%s\t%s%s\n' % (cds[1], pss, cds[0]))
else:
sys.stdout.write('%s\t%s\n' % (cds[1], cds[0]))
sys.stdout.write('\t\t\tcodon_start\t%i\n' % geneInfo['codon_start'][i])
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
sys.stdout.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
sys.stdout.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
elif geneInfo['type'] == 'tRNA':
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
#sys.stdout.write('<%s\t>%s\t%s\n' % (exon[0], exon[1], geneInfo['type']))
sys.stdout.write('%s\t%s\t%s\n' % (exon[0], exon[1], geneInfo['type']))
else:
sys.stdout.write('%s\t%s\n' % (exon[0], exon[1]))
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
sys.stdout.write('\t\t\tpseudo\n')
else:
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
#sys.stdout.write('<%s\t>%s\t%s\n' % (exon[1], exon[0], geneInfo['type']))
sys.stdout.write('%s\t%s\t%s\n' % (exon[1], exon[0], geneInfo['type']))
else:
sys.stdout.write('%s\t%s\n' % (exon[1], exon[0]))
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
sys.stdout.write('\t\t\tpseudo\n')
elif geneInfo['type'] == 'rRNA':
if geneInfo['strand'] == '+':
#sys.stdout.write('<%s\t>%s\t%s\n' % (geneInfo['location'][0],geneInfo['location'][1], geneInfo['type']))
sys.stdout.write('%s\t%s\t%s\n' % (geneInfo['location'][0],geneInfo['location'][1], geneInfo['type']))
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
else:
#sys.stdout.write('<%s\t>%s\t%s\n' % (geneInfo['location'][1],geneInfo['location'][0], geneInfo['type']))
sys.stdout.write('%s\t%s\t%s\n' % (geneInfo['location'][1],geneInfo['location'][0], geneInfo['type']))
sys.stdout.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
if any(i > 0 for i in [duplicates,pseudo,nocds]):
sys.stderr.write('Skipped {:,} annotations: {:,} pseudo genes; {:,} no CDS; {:,} duplicated features'.format(sum([pseudo,nocds,duplicates]),pseudo,nocds,duplicates))
#load into dictionary
Genes = {}
Genes = lib.gff2dict(args.gff3, args.fasta, Genes)
#sort the dictionary
def _sortDict(d):
return (d[1]['location'][0], d[1]['location'][1])
#now sort dictionary by contig and location, rename using prefix, translate to protein space to get proper start/stop info
sGenes = sorted(Genes.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
scaff2genes = {}
for k,v in sortedGenes.items():
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [k]
else:
scaff2genes[v['contig']].append(k)
#get length of scaffolds
scaffLen = scaffold2Dict(args.fasta)
#now write table
dicts2tbl(sortedGenes, scaff2genes, scaffLen, 'CFMR', '12345')
#dict2tbl(Genes, scaffLen)
| [
"collections.OrderedDict",
"sys.path.insert",
"argparse.ArgumentParser",
"inspect.currentframe",
"lib.library.gff2dict",
"os.path.dirname",
"Bio.SeqIO.parse",
"sys.stdout.write"
] | [((186, 213), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (201, 213), False, 'import sys, os, inspect, argparse\n'), ((214, 243), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (229, 243), False, 'import sys, os, inspect, argparse\n'), ((522, 725), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""gff2prot.py"""', 'description': '"""Script to convert GFF3 and FASTA to tbl, proteins, transcripts."""', 'epilog': '"""Written by <NAME> (2018) <EMAIL>"""', 'formatter_class': 'MyFormatter'}), "(prog='gff2prot.py', description=\n 'Script to convert GFF3 and FASTA to tbl, proteins, transcripts.',\n epilog='Written by <NAME> (2018) <EMAIL>', formatter_class=MyFormatter)\n", (545, 725), False, 'import sys, os, inspect, argparse\n'), ((11993, 12035), 'lib.library.gff2dict', 'lib.gff2dict', (['args.gff3', 'args.fasta', 'Genes'], {}), '(args.gff3, args.fasta, Genes)\n', (12005, 12035), True, 'import lib.library as lib\n'), ((12318, 12337), 'collections.OrderedDict', 'OrderedDict', (['sGenes'], {}), '(sGenes)\n', (12329, 12337), False, 'from collections import OrderedDict\n'), ((1077, 1104), 'Bio.SeqIO.parse', 'SeqIO.parse', (['seqin', '"""fasta"""'], {}), "(seqin, 'fasta')\n", (1088, 1104), False, 'from Bio import SeqIO\n'), ((1525, 1562), 'sys.stdout.write', 'sys.stdout.write', (["('>Feature %s\\n' % k)"], {}), "('>Feature %s\\n' % k)\n", (1541, 1562), False, 'import sys, os, inspect, argparse\n'), ((1636, 1695), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\t%s\\t%s\\n' % (SeqCenter, SeqRefNum))"], {}), "('\\t\\t\\t%s\\t%s\\n' % (SeqCenter, SeqRefNum))\n", (1652, 1695), False, 'import sys, os, inspect, argparse\n'), ((148, 170), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (168, 170), False, 'import sys, os, inspect, argparse\n'), ((3220, 3324), 'sys.stdout.write', 'sys.stdout.write', (["('%s%i\\t%s%i\\tgene\\n' % (ps, geneInfo['location'][0], pss, geneInfo[\n 'location'][1]))"], {}), "('%s%i\\t%s%i\\tgene\\n' % (ps, geneInfo['location'][0], pss,\n geneInfo['location'][1]))\n", (3236, 3324), False, 'import sys, os, inspect, argparse\n'), ((3337, 3386), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tlocus_tag\\t%s\\n' % genes)"], {}), "('\\t\\t\\tlocus_tag\\t%s\\n' % genes)\n", (3353, 3386), False, 'import sys, os, inspect, argparse\n'), ((3421, 3525), 'sys.stdout.write', 'sys.stdout.write', (["('%s%i\\t%s%i\\tgene\\n' % (ps, geneInfo['location'][1], pss, geneInfo[\n 'location'][0]))"], {}), "('%s%i\\t%s%i\\tgene\\n' % (ps, geneInfo['location'][1], pss,\n geneInfo['location'][0]))\n", (3437, 3525), False, 'import sys, os, inspect, argparse\n'), ((3538, 3587), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tlocus_tag\\t%s\\n' % genes)"], {}), "('\\t\\t\\tlocus_tag\\t%s\\n' % genes)\n", (3554, 3587), False, 'import sys, os, inspect, argparse\n'), ((5900, 5964), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (5916, 5964), False, 'import sys, os, inspect, argparse\n'), ((5989, 6061), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)"], {}), "('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)\n", (6005, 6061), False, 'import sys, os, inspect, argparse\n'), ((6088, 6152), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)"], {}), "('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)\n", (6104, 6152), False, 'import sys, os, inspect, argparse\n'), ((6892, 6964), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tcodon_start\\t%i\\n' % geneInfo['codon_start'][i])"], {}), "('\\t\\t\\tcodon_start\\t%i\\n' % geneInfo['codon_start'][i])\n", (6908, 6964), False, 'import sys, os, inspect, argparse\n'), ((6989, 7053), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (7005, 7053), False, 'import sys, os, inspect, argparse\n'), ((7078, 7150), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)"], {}), "('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)\n", (7094, 7150), False, 'import sys, os, inspect, argparse\n'), ((7177, 7241), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)"], {}), "('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)\n", (7193, 7241), False, 'import sys, os, inspect, argparse\n'), ((8113, 8177), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (8129, 8177), False, 'import sys, os, inspect, argparse\n'), ((8202, 8274), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)"], {}), "('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)\n", (8218, 8274), False, 'import sys, os, inspect, argparse\n'), ((8301, 8365), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)"], {}), "('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)\n", (8317, 8365), False, 'import sys, os, inspect, argparse\n'), ((9107, 9179), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tcodon_start\\t%i\\n' % geneInfo['codon_start'][i])"], {}), "('\\t\\t\\tcodon_start\\t%i\\n' % geneInfo['codon_start'][i])\n", (9123, 9179), False, 'import sys, os, inspect, argparse\n'), ((9204, 9268), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (9220, 9268), False, 'import sys, os, inspect, argparse\n'), ((9293, 9365), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)"], {}), "('\\t\\t\\ttranscript_id\\tgnl|ncbi|%s_mrna\\n' % protein_id)\n", (9309, 9365), False, 'import sys, os, inspect, argparse\n'), ((9392, 9456), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)"], {}), "('\\t\\t\\tprotein_id\\tgnl|ncbi|%s\\n' % protein_id)\n", (9408, 9456), False, 'import sys, os, inspect, argparse\n'), ((10023, 10087), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (10039, 10087), False, 'import sys, os, inspect, argparse\n'), ((10712, 10776), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (10728, 10776), False, 'import sys, os, inspect, argparse\n'), ((5377, 5445), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s%s\\tmRNA\\n' % (ps, exon[0], pss, exon[1]))"], {}), "('%s%s\\t%s%s\\tmRNA\\n' % (ps, exon[0], pss, exon[1]))\n", (5393, 5445), False, 'import sys, os, inspect, argparse\n'), ((6380, 6445), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s%s\\tCDS\\n' % (ps, cds[0], pss, cds[1]))"], {}), "('%s%s\\t%s%s\\tCDS\\n' % (ps, cds[0], pss, cds[1]))\n", (6396, 6445), False, 'import sys, os, inspect, argparse\n'), ((7573, 7641), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s%s\\tmRNA\\n' % (ps, exon[1], pss, exon[0]))"], {}), "('%s%s\\t%s%s\\tmRNA\\n' % (ps, exon[1], pss, exon[0]))\n", (7589, 7641), False, 'import sys, os, inspect, argparse\n'), ((8593, 8658), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s%s\\tCDS\\n' % (ps, cds[1], pss, cds[0]))"], {}), "('%s%s\\t%s%s\\tCDS\\n' % (ps, cds[1], pss, cds[0]))\n", (8609, 8658), False, 'import sys, os, inspect, argparse\n'), ((10178, 10212), 'sys.stdout.write', 'sys.stdout.write', (['"""\t\t\tpseudo\n"""'], {}), "('\\t\\t\\tpseudo\\n')\n", (10194, 10212), False, 'import sys, os, inspect, argparse\n'), ((10867, 10901), 'sys.stdout.write', 'sys.stdout.write', (['"""\t\t\tpseudo\n"""'], {}), "('\\t\\t\\tpseudo\\n')\n", (10883, 10901), False, 'import sys, os, inspect, argparse\n'), ((11155, 11263), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\t%s\\n' % (geneInfo['location'][0], geneInfo['location'][1],\n geneInfo['type']))"], {}), "('%s\\t%s\\t%s\\n' % (geneInfo['location'][0], geneInfo[\n 'location'][1], geneInfo['type']))\n", (11171, 11263), False, 'import sys, os, inspect, argparse\n'), ((11282, 11346), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (11298, 11346), False, 'import sys, os, inspect, argparse\n'), ((11530, 11638), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\t%s\\n' % (geneInfo['location'][1], geneInfo['location'][0],\n geneInfo['type']))"], {}), "('%s\\t%s\\t%s\\n' % (geneInfo['location'][1], geneInfo[\n 'location'][0], geneInfo['type']))\n", (11546, 11638), False, 'import sys, os, inspect, argparse\n'), ((11657, 11721), 'sys.stdout.write', 'sys.stdout.write', (["('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])"], {}), "('\\t\\t\\tproduct\\t%s\\n' % geneInfo['product'][i])\n", (11673, 11721), False, 'import sys, os, inspect, argparse\n'), ((5521, 5582), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s\\tmRNA\\n' % (ps, exon[0], exon[1]))"], {}), "('%s%s\\t%s\\tmRNA\\n' % (ps, exon[0], exon[1]))\n", (5537, 5582), False, 'import sys, os, inspect, argparse\n'), ((6521, 6579), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s\\tCDS\\n' % (ps, cds[0], cds[1]))"], {}), "('%s%s\\t%s\\tCDS\\n' % (ps, cds[0], cds[1]))\n", (6537, 6579), False, 'import sys, os, inspect, argparse\n'), ((7717, 7778), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s\\tmRNA\\n' % (ps, exon[1], exon[0]))"], {}), "('%s%s\\t%s\\tmRNA\\n' % (ps, exon[1], exon[0]))\n", (7733, 7778), False, 'import sys, os, inspect, argparse\n'), ((8734, 8792), 'sys.stdout.write', 'sys.stdout.write', (["('%s%s\\t%s\\tCDS\\n' % (ps, cds[1], cds[0]))"], {}), "('%s%s\\t%s\\tCDS\\n' % (ps, cds[1], cds[0]))\n", (8750, 8792), False, 'import sys, os, inspect, argparse\n'), ((9811, 9882), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\t%s\\n' % (exon[0], exon[1], geneInfo['type']))"], {}), "('%s\\t%s\\t%s\\n' % (exon[0], exon[1], geneInfo['type']))\n", (9827, 9882), False, 'import sys, os, inspect, argparse\n'), ((9949, 9998), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\n' % (exon[0], exon[1]))"], {}), "('%s\\t%s\\n' % (exon[0], exon[1]))\n", (9965, 9998), False, 'import sys, os, inspect, argparse\n'), ((10500, 10571), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\t%s\\n' % (exon[1], exon[0], geneInfo['type']))"], {}), "('%s\\t%s\\t%s\\n' % (exon[1], exon[0], geneInfo['type']))\n", (10516, 10571), False, 'import sys, os, inspect, argparse\n'), ((10638, 10687), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\n' % (exon[1], exon[0]))"], {}), "('%s\\t%s\\n' % (exon[1], exon[0]))\n", (10654, 10687), False, 'import sys, os, inspect, argparse\n'), ((5703, 5759), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s%s\\n' % (exon[0], pss, exon[1]))"], {}), "('%s\\t%s%s\\n' % (exon[0], pss, exon[1]))\n", (5719, 5759), False, 'import sys, os, inspect, argparse\n'), ((5826, 5875), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\n' % (exon[0], exon[1]))"], {}), "('%s\\t%s\\n' % (exon[0], exon[1]))\n", (5842, 5875), False, 'import sys, os, inspect, argparse\n'), ((6699, 6753), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s%s\\n' % (cds[0], pss, cds[1]))"], {}), "('%s\\t%s%s\\n' % (cds[0], pss, cds[1]))\n", (6715, 6753), False, 'import sys, os, inspect, argparse\n'), ((6820, 6867), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\n' % (cds[0], cds[1]))"], {}), "('%s\\t%s\\n' % (cds[0], cds[1]))\n", (6836, 6867), False, 'import sys, os, inspect, argparse\n'), ((7899, 7955), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s%s\\n' % (exon[1], pss, exon[0]))"], {}), "('%s\\t%s%s\\n' % (exon[1], pss, exon[0]))\n", (7915, 7955), False, 'import sys, os, inspect, argparse\n'), ((8022, 8071), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\n' % (exon[1], exon[0]))"], {}), "('%s\\t%s\\n' % (exon[1], exon[0]))\n", (8038, 8071), False, 'import sys, os, inspect, argparse\n'), ((8914, 8968), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s%s\\n' % (cds[1], pss, cds[0]))"], {}), "('%s\\t%s%s\\n' % (cds[1], pss, cds[0]))\n", (8930, 8968), False, 'import sys, os, inspect, argparse\n'), ((9035, 9082), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\t%s\\n' % (cds[1], cds[0]))"], {}), "('%s\\t%s\\n' % (cds[1], cds[0]))\n", (9051, 9082), False, 'import sys, os, inspect, argparse\n')] |
import os, glob
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torch.nn.functional as F
from PIL import Image
import numpy as np
import torch
import cv2
import random, math
class FirstStageDataset(Dataset):
def __init__(self, occ_path, img_path, lmk_path, test=False, flag=None):
self.occ_path = occ_path
self.img_path = img_path
self.lmk_path = lmk_path
self.test = test
self.flag = flag
self.transform = transforms.Compose([
transforms.ToTensor()
])
self.color_aug = transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3)
print('Load data list')
self.img_list = glob.glob(img_path+'/*.jpg')
print("The number of data: {}".format(len(self.img_list)))
def __len__(self):
return len(self.img_list)
def __rmul__(self, v):
self.img_list = v * self.img_list
return self
def get_rot_mat(self, angle):
"""
angle : radian
"""
angle = torch.tensor(angle)
return torch.tensor([[torch.cos(angle), -torch.sin(angle), 0],
[torch.sin(angle), torch.cos(angle), 0]])
def __getitem__(self, index):
filename = os.path.basename(self.img_list[index])
img = Image.open(self.img_list[index]).convert('RGB')
if os.path.exists(os.path.join(self.occ_path, filename)):
# Non-occluded augmentation
if not self.test and torch.rand(1) < 0.5:
occluded = img.copy()
else:
occluded = Image.open(os.path.join(self.occ_path, filename)).convert('RGB')
else:
occluded = img.copy()
lmk = np.load(os.path.join(self.lmk_path, filename[:-3]+'npy'))
lmk = lmk[:,:2]
if self.test:
occluded = self.transform(occluded)
return occluded, lmk
# Flags to prevent from 3DDFAv2 error propagation
flag = torch.ones(1)
if self.flag is not None and filename[:len(self.flag)] == self.flag:
flag = flag * 0.9
# Brightness, contrast, saturation augmentation
if torch.rand(1) < 0.5:
color_trans = transforms.ColorJitter.get_params(
self.color_aug.brightness, self.color_aug.contrast,
self.color_aug.saturation, self.color_aug.hue
)
img = color_trans(img)
occluded = color_trans(occluded)
occluded = self.transform(occluded)
img = self.transform(img)
# Low-resolution augmentation
if not self.test and torch.rand(1) < 0.25:
# scale_factor = -0.4 * torch.rand(1) + 0.5
occluded = F.interpolate(occluded.unsqueeze(0), scale_factor=0.25, mode='bilinear', align_corners=True)
occluded = F.interpolate(occluded, (224,224), mode='bilinear', align_corners=True).squeeze(0)
# Rotation augmentation
if not self.test and torch.rand(1) < 0.25:
angle = random.random()*math.pi/2 - (math.pi / 4)
M = self.get_rot_mat(angle)
occluded = occluded.unsqueeze(0)
img = img.unsqueeze(0)
grid = F.affine_grid(M[None,...], occluded.size())
occluded = F.grid_sample(occluded, grid)
img = F.grid_sample(img, grid)
occluded = occluded.squeeze(0)
img = img.squeeze(0)
ones = np.ones(shape=(68,1))
M = cv2.getRotationMatrix2D((112,112), angle*(180/math.pi), 1.0)
lmk = np.hstack([lmk, ones])
lmk = M.dot(lmk.T).T
if not self.test and torch.rand(1) < 0.25:
sy = random.randint(0,56)
sx = random.randint(0,56)
h = random.randint(112, 168)
w = random.randint(112, 168)
occluded[:,:sy,:] = 0.0
occluded[:,:,:sx] = 0.0
occluded[:,sy+h:,sx+w:] = 0.0
return occluded, img, lmk, flag
class LP_Dataset(Dataset):
def __init__(self, img_path, lmk_path):
self.img_path = img_path
self.lmk_path = lmk_path
# self.pose_path = pose_path
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.lmk_list = glob.glob(lmk_path+'/*.npy')
# self.img_list = glob.glob(img_path+'/*.jpg')
# self.img_list += glob.glob(img_path+'/*.png')
print("The number of data: {}".format(len(self.lmk_list)))
def __len__(self):
return len(self.lmk_list)
def __getitem__(self, index):
filename = os.path.basename(self.lmk_list[index])
lmk = np.load(self.lmk_list[index])
if lmk.shape[1]==3:
lmk = lmk[:,:2]
img = Image.open(os.path.join(self.img_path, filename[:-3]+'jpg')).convert('RGB')
lmk = torch.from_numpy(lmk)
return self.transform(img), lmk
class MaskedFaceDataset(Dataset):
def __init__(self, mfd_path, ori_path, p=0.5):
self.mfd_path = mfd_path
self.ori_path = ori_path
self.p = p
self.transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor()
])
self.color_aug = transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.2/3.14)
# self.erasing_transform_rand = transforms.RandomErasing(p=0.5, scale=(0.05,0.1), ratio=(0.1, 3.3), value='random')
self.mfd_list = glob.glob(mfd_path+'/*.jpg')
self.mfd_list += glob.glob(mfd_path+'/*.png')
# self.lp_list = glob.glob(lp_path+'/*.jpg')
print('The number of data: {}'.format(len(self.mfd_list)))
def __len__(self):
return len(self.mfd_list)
def __getitem__(self, index):
color_trans = transforms.ColorJitter.get_params(
self.color_aug.brightness, self.color_aug.contrast,
self.color_aug.saturation, self.color_aug.hue
)
# if torch.rand(1) < self.p:
# ori = Image.open(self.lp_list[index % len(self.lp_list)]).convert('RGB')
# ori = self.transform(ori)
# mf = ori.detach().clone()
# if torch.rand(1) < 0.25:
# x = random.randint(70,175)
# y = random.randint(70,175)
# w = random.randint(10, 70)
# h = random.randint(10, 70)
# random_box = -1 * torch.rand(3,1,1) + 1
# noise = -0.2*torch.rand(3,224,224) + 0.1
# random_box = (random_box.expand_as(ori) + noise)
# mf[:,y:y+h,x:x+w] = random_box[:,y:y+h,x:x+w]
# mf = torch.clamp(mf, 0.0, 1.0)
name = os.path.basename(self.mfd_list[index]).split('_')[0]
ori = Image.open(os.path.join(self.ori_path, name+'.jpg')).convert('RGB')
ori = color_trans(ori)
ori = self.transform(ori)
if torch.rand(1) < 0.5:
mf = Image.open(self.mfd_list[index]).convert('RGB')
mf = color_trans(mf)
mf = self.transform(mf)
else:
mf = ori.detach().clone()
if torch.rand(1) < self.p:
mf = torch.flip(mf, dims=[2])
ori = torch.flip(ori, dims=[2])
if torch.rand(1) < 0.25:
mf = F.interpolate(mf.unsqueeze(0), scale_factor=0.25, mode='bilinear', align_corners=True)
mf = F.interpolate(mf, scale_factor=4.0, mode='bilinear', align_corners=True).squeeze(0)
return mf, ori
| [
"numpy.hstack",
"torch.sin",
"torch.from_numpy",
"torchvision.transforms.ColorJitter",
"torch.cos",
"torch.flip",
"torch.nn.functional.interpolate",
"torch.nn.functional.grid_sample",
"torchvision.transforms.ColorJitter.get_params",
"torchvision.transforms.ToTensor",
"random.randint",
"glob.glob",
"numpy.ones",
"torchvision.transforms.Resize",
"cv2.getRotationMatrix2D",
"PIL.Image.open",
"os.path.join",
"torch.tensor",
"os.path.basename",
"random.random",
"numpy.load",
"torch.rand",
"torch.ones"
] | [((592, 660), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.3)', 'contrast': '(0.3)', 'saturation': '(0.3)'}), '(brightness=0.3, contrast=0.3, saturation=0.3)\n', (614, 660), True, 'import torchvision.transforms as transforms\n'), ((717, 747), 'glob.glob', 'glob.glob', (["(img_path + '/*.jpg')"], {}), "(img_path + '/*.jpg')\n", (726, 747), False, 'import os, glob\n'), ((1063, 1082), 'torch.tensor', 'torch.tensor', (['angle'], {}), '(angle)\n', (1075, 1082), False, 'import torch\n'), ((1283, 1321), 'os.path.basename', 'os.path.basename', (['self.img_list[index]'], {}), '(self.img_list[index])\n', (1299, 1321), False, 'import os, glob\n'), ((2014, 2027), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (2024, 2027), False, 'import torch\n'), ((4333, 4363), 'glob.glob', 'glob.glob', (["(lmk_path + '/*.npy')"], {}), "(lmk_path + '/*.npy')\n", (4342, 4363), False, 'import os, glob\n'), ((4657, 4695), 'os.path.basename', 'os.path.basename', (['self.lmk_list[index]'], {}), '(self.lmk_list[index])\n', (4673, 4695), False, 'import os, glob\n'), ((4710, 4739), 'numpy.load', 'np.load', (['self.lmk_list[index]'], {}), '(self.lmk_list[index])\n', (4717, 4739), True, 'import numpy as np\n'), ((4901, 4922), 'torch.from_numpy', 'torch.from_numpy', (['lmk'], {}), '(lmk)\n', (4917, 4922), False, 'import torch\n'), ((5294, 5383), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.3)', 'contrast': '(0.3)', 'saturation': '(0.3)', 'hue': '(0.2 / 3.14)'}), '(brightness=0.3, contrast=0.3, saturation=0.3, hue=\n 0.2 / 3.14)\n', (5316, 5383), True, 'import torchvision.transforms as transforms\n'), ((5526, 5556), 'glob.glob', 'glob.glob', (["(mfd_path + '/*.jpg')"], {}), "(mfd_path + '/*.jpg')\n", (5535, 5556), False, 'import os, glob\n'), ((5580, 5610), 'glob.glob', 'glob.glob', (["(mfd_path + '/*.png')"], {}), "(mfd_path + '/*.png')\n", (5589, 5610), False, 'import os, glob\n'), ((5853, 5990), 'torchvision.transforms.ColorJitter.get_params', 'transforms.ColorJitter.get_params', (['self.color_aug.brightness', 'self.color_aug.contrast', 'self.color_aug.saturation', 'self.color_aug.hue'], {}), '(self.color_aug.brightness, self.color_aug\n .contrast, self.color_aug.saturation, self.color_aug.hue)\n', (5886, 5990), True, 'import torchvision.transforms as transforms\n'), ((1410, 1447), 'os.path.join', 'os.path.join', (['self.occ_path', 'filename'], {}), '(self.occ_path, filename)\n', (1422, 1447), False, 'import os, glob\n'), ((1762, 1812), 'os.path.join', 'os.path.join', (['self.lmk_path', "(filename[:-3] + 'npy')"], {}), "(self.lmk_path, filename[:-3] + 'npy')\n", (1774, 1812), False, 'import os, glob\n'), ((2203, 2216), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2213, 2216), False, 'import torch\n'), ((2250, 2387), 'torchvision.transforms.ColorJitter.get_params', 'transforms.ColorJitter.get_params', (['self.color_aug.brightness', 'self.color_aug.contrast', 'self.color_aug.saturation', 'self.color_aug.hue'], {}), '(self.color_aug.brightness, self.color_aug\n .contrast, self.color_aug.saturation, self.color_aug.hue)\n', (2283, 2387), True, 'import torchvision.transforms as transforms\n'), ((3317, 3346), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['occluded', 'grid'], {}), '(occluded, grid)\n', (3330, 3346), True, 'import torch.nn.functional as F\n'), ((3365, 3389), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['img', 'grid'], {}), '(img, grid)\n', (3378, 3389), True, 'import torch.nn.functional as F\n'), ((3499, 3521), 'numpy.ones', 'np.ones', ([], {'shape': '(68, 1)'}), '(shape=(68, 1))\n', (3506, 3521), True, 'import numpy as np\n'), ((3537, 3602), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(112, 112)', '(angle * (180 / math.pi))', '(1.0)'], {}), '((112, 112), angle * (180 / math.pi), 1.0)\n', (3560, 3602), False, 'import cv2\n'), ((3616, 3638), 'numpy.hstack', 'np.hstack', (['[lmk, ones]'], {}), '([lmk, ones])\n', (3625, 3638), True, 'import numpy as np\n'), ((3741, 3762), 'random.randint', 'random.randint', (['(0)', '(56)'], {}), '(0, 56)\n', (3755, 3762), False, 'import random, math\n'), ((3779, 3800), 'random.randint', 'random.randint', (['(0)', '(56)'], {}), '(0, 56)\n', (3793, 3800), False, 'import random, math\n'), ((3816, 3840), 'random.randint', 'random.randint', (['(112)', '(168)'], {}), '(112, 168)\n', (3830, 3840), False, 'import random, math\n'), ((3857, 3881), 'random.randint', 'random.randint', (['(112)', '(168)'], {}), '(112, 168)\n', (3871, 3881), False, 'import random, math\n'), ((6993, 7006), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7003, 7006), False, 'import torch\n'), ((7225, 7238), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7235, 7238), False, 'import torch\n'), ((7266, 7290), 'torch.flip', 'torch.flip', (['mf'], {'dims': '[2]'}), '(mf, dims=[2])\n', (7276, 7290), False, 'import torch\n'), ((7309, 7334), 'torch.flip', 'torch.flip', (['ori'], {'dims': '[2]'}), '(ori, dims=[2])\n', (7319, 7334), False, 'import torch\n'), ((7347, 7360), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7357, 7360), False, 'import torch\n'), ((534, 555), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (553, 555), True, 'import torchvision.transforms as transforms\n'), ((1336, 1368), 'PIL.Image.open', 'Image.open', (['self.img_list[index]'], {}), '(self.img_list[index])\n', (1346, 1368), False, 'from PIL import Image\n'), ((2664, 2677), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2674, 2677), False, 'import torch\n'), ((3026, 3039), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (3036, 3039), False, 'import torch\n'), ((3702, 3715), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (3712, 3715), False, 'import torch\n'), ((4274, 4295), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4293, 4295), True, 'import torchvision.transforms as transforms\n'), ((5194, 5223), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (5211, 5223), True, 'import torchvision.transforms as transforms\n'), ((5236, 5257), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5255, 5257), True, 'import torchvision.transforms as transforms\n'), ((1113, 1129), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (1122, 1129), False, 'import torch\n'), ((1184, 1200), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (1193, 1200), False, 'import torch\n'), ((1202, 1218), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (1211, 1218), False, 'import torch\n'), ((1523, 1536), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1533, 1536), False, 'import torch\n'), ((2881, 2953), 'torch.nn.functional.interpolate', 'F.interpolate', (['occluded', '(224, 224)'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(occluded, (224, 224), mode='bilinear', align_corners=True)\n", (2894, 2953), True, 'import torch.nn.functional as F\n'), ((4821, 4871), 'os.path.join', 'os.path.join', (['self.img_path', "(filename[:-3] + 'jpg')"], {}), "(self.img_path, filename[:-3] + 'jpg')\n", (4833, 4871), False, 'import os, glob\n'), ((6781, 6819), 'os.path.basename', 'os.path.basename', (['self.mfd_list[index]'], {}), '(self.mfd_list[index])\n', (6797, 6819), False, 'import os, glob\n'), ((6859, 6901), 'os.path.join', 'os.path.join', (['self.ori_path', "(name + '.jpg')"], {}), "(self.ori_path, name + '.jpg')\n", (6871, 6901), False, 'import os, glob\n'), ((7031, 7063), 'PIL.Image.open', 'Image.open', (['self.mfd_list[index]'], {}), '(self.mfd_list[index])\n', (7041, 7063), False, 'from PIL import Image\n'), ((7490, 7562), 'torch.nn.functional.interpolate', 'F.interpolate', (['mf'], {'scale_factor': '(4.0)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(mf, scale_factor=4.0, mode='bilinear', align_corners=True)\n", (7503, 7562), True, 'import torch.nn.functional as F\n'), ((1132, 1148), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (1141, 1148), False, 'import torch\n'), ((3068, 3083), 'random.random', 'random.random', ([], {}), '()\n', (3081, 3083), False, 'import random, math\n'), ((1638, 1675), 'os.path.join', 'os.path.join', (['self.occ_path', 'filename'], {}), '(self.occ_path, filename)\n', (1650, 1675), False, 'import os, glob\n')] |
from collections import defaultdict
from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type
def get_graphical_mapping(current_example_row, cache, model_config, composite_component_allocation):
"""
Get graphical mapping for BRS.
:param current_example_row:
:param cache:
:param model_config:
:param composite_component_allocation:
:return: tuple of form ("GraphicalReporting", "server_uid")
"""
try:
# Get first key of composite_component_allocation
first_composite_key = list(composite_component_allocation.keys())[0]
length_composite = len(composite_component_allocation[first_composite_key])
for column_name in model_config["components"]:
allocation_name = column_name.replace("AllocationDegreeImpl:", "")
context = get_element_by_identifier(element_tree=cache.get_xml_tree("allocation"),
search_string=allocation_name,
attribute="entityName")
if "_MNz1gHqQEd6uqIqMUZizUw" == context.get("id"):
# check if column (with name of component) of current test data is allocated to existing server
if current_example_row[column_name] in model_config["server"].keys():
# if component is allocated to existing server append allocation to list
for server_string in model_config["server"]:
# if component is part of composite
if current_example_row[column_name] == server_string:
server_id_clean = model_config["server"][current_example_row[column_name]]
server_name = get_by_id(element=cache.get_xml_tree("resourceenvironment"),
element_id=server_id_clean).get("entityName")
if length_composite == 1:
server_uid = '#{server_name}-2#'.format(server_name=server_name)
else:
server_uid = '#{server_name}#'.format(server_name=server_name)
if server_uid:
graphical_mapping = ("GraphicalReporting", server_uid)
return graphical_mapping
except IndexError:
# Simple Heuristics has no composite_component_mapping
pass
return {}
def get_composite_component(current_example_row, cache, model_config):
"""
maps component_id to dict of {cpu_id: False, ...}
:param current_example_row:
:param cache:
:param model_config:
:return: nested mapping_dict = { #there can be multiple components
component_id = { #components can be deployed on multiple servers
cpu_id: False,
...
},
...
}
"""
mapping_dict = defaultdict(lambda: {})
# for context in
for column_name in model_config["components"]:
allocation_name = column_name.replace("AllocationDegreeImpl:", "")
context = get_element_by_identifier(element_tree=cache.get_xml_tree("allocation"),
search_string=allocation_name,
attribute="entityName")
system_id = get_linkage_id(identifier="assemblyContext_AllocationContext", element_tree=context)
assembly_context = get_by_id(element=cache.get_xml_tree("system"), element_id=system_id)
component = assembly_context.find("./encapsulatedComponent__AssemblyContext")
if component.get(get_xml_schema_type()) == "repository:CompositeComponent":
repo_id = get_linkage_id(element_tree=assembly_context, identifier="encapsulatedComponent__AssemblyContext")
composite_component = get_by_id(element=cache.get_xml_tree("repository"), element_id=repo_id)
for composed_structure in composite_component.findall("./assemblyContexts__ComposedStructure"):
component_id = composed_structure.get("encapsulatedComponent__AssemblyContext")
# check if column (with name of component) of current test data is allocated to existing server
if current_example_row[column_name] in model_config["server"].keys():
# if component is allocated to existing server append allocation to list
for server_id in model_config["server"]:
# if component is part of composite
if current_example_row[column_name] == server_id:
temp_server_id = model_config["server"][current_example_row[column_name]]
mapping_dict[component_id].update({temp_server_id: False})
return mapping_dict
def get_component_allocations(current_example_row, model_config):
"""
Get allocation of components from CSV test data
:param current_example_row: CSV test data as pandas data frame
:param model_config: configurations for pcm model that contain server names, component names, ...
:return: allocations dict = {
allocation_id: cpu_id,
...
}
"""
allocations = {}
# get allocation for every component that is declared in the config file
for component in model_config["components"]:
# check if column (with name of component) of current test data is allocated to existing server
if current_example_row[component] in model_config["server"].keys():
# if component is allocated to existing server append allocation to list
for server_id in model_config["server"]:
# if component is part of composite
if current_example_row[component] == server_id:
# map component id : server id
allocations[model_config['components'][component]] = model_config["server"][server_id]
return allocations
def get_cpu_to_seff_id_mapping(current_example_row, model_config, allocations, cache):
"""
get Mapping from cpu id to list of deployed seffs on this server
:param current_example_row:
:param model_config:
:param allocations:
:param cache:
:return: nested component_connectors_dict = {
cpu_id : [seff_id, seff_id2, seff_id3, ...],
cpu_id2: [seff_id3, seff_id4, seff_id5, ...],
...
}
"""
alloc_root = cache.get_xml_tree("allocation")
component_connectors = {"component_connectors": {}}
for cpu_key in model_config["cpu_rate"]:
cpu_id = model_config["cpu_rate"][cpu_key]
for allocation_id in allocations.keys():
allocation_context = get_by_id(element=alloc_root, element_id=allocation_id)
for key in current_example_row.keys():
if allocation_context.get('entityName') in key and cpu_id in current_example_row[key]:
assembly_context_id = get_linkage_id(identifier="assemblyContext_AllocationContext",
element_tree=allocation_context)
seff_ids = _get_seff_ids_from_assembly_context(assembly_context_id, cache=cache)
nested_dict = component_connectors["component_connectors"]
if cpu_id in nested_dict.keys(): # if key already exists, append
for seff_id in seff_ids:
component_connectors["component_connectors"][cpu_id].append(seff_id)
else: # create key
component_connectors["component_connectors"].update({cpu_id: seff_ids})
return component_connectors
def _get_seff_ids_from_assembly_context(assembly_context_id, cache):
"""
Search for seffs from assembly_context
:param assembly_context_id:
:param cache:
:return: list of seff_ids that belong to assembly_context
"""
file_root = cache.get_xml_tree('system')
composed_structure = get_by_id(element=file_root, element_id=assembly_context_id)
component_id = get_linkage_id(identifier="encapsulatedComponent__AssemblyContext", element_tree=composed_structure)
list_of_seff_ids = []
file_root = cache.get_xml_tree('repository')
component = get_by_id(element=file_root, element_id=component_id)
component_type = component.get(get_xml_schema_type())
if component_type == 'repository:CompositeComponent':
for composed_structure in component.findall('./assemblyContexts__ComposedStructure'):
assembly_context_id = composed_structure.get('encapsulatedComponent__AssemblyContext')
assembly_context = get_by_id(element=file_root, element_id=assembly_context_id)
for seff in assembly_context.findall("./serviceEffectSpecifications__BasicComponent"):
seff_id = seff.get("describedService__SEFF")
list_of_seff_ids.append(seff_id)
else:
for seff in component.findall("./serviceEffectSpecifications__BasicComponent"):
seff_id = seff.get("describedService__SEFF")
list_of_seff_ids.append(seff_id)
return list_of_seff_ids
def get_cpu_rates(current_example_row, model_config):
"""
Get cpu rates of server from CSV test data
:param current_example_row: CSV test data as pandas data frame
:param model_config: configurations for pcm model that contain server names, component names, ...
:return: nested cpu rates dict = {
"cpu_rates": {
cpu_id : cpu_rate,
cpu_id2: cpu_rate2,
...
}
"""
cpu_rates = {"cpu_rates": {}}
for cpu_key in model_config["cpu_rate"]:
cpu_column_name = cpu_key
config = float(current_example_row[cpu_column_name])
cpu_rates["cpu_rates"].update({model_config["cpu_rate"][cpu_key]: config})
return cpu_rates
def get_assembled_components(current_example_row, model_config):
"""
Get assembled components
:param current_example_row: CSV test data as pandas data frame
:param model_config: configurations for pcm model that contain server names, component names, ...
:return: assembled components dict = { # maps from system to repository
assembled_component_id : component_id,
...
}
excluded_components = [unused_component_id1, unused_component_id2, ... ]
excluded_design_options = { # maps from repository
component_id : [design_option_id1, design_option_id2, ...],
...
}
"""
assembled_components = {}
excluded_components = []
excluded_design_options = {}
# find translation for every assembled component id and append to list
assembled_components_config = model_config["assembled_components"]
for component in assembled_components_config:
new_option = None
tmp_excluded = []
options = assembled_components_config[component]["options"]
for option in options:
# if current id is example id of csv file append translation to assembled_components
if option == current_example_row[component]:
assembled_components[assembled_components_config[component]["id"]] = options[option]
new_option = options[option]
else:
tmp_excluded.append(options[option])
excluded_components.append(options[option])
excluded_design_options[new_option] = tmp_excluded
return assembled_components, excluded_components, excluded_design_options
| [
"pyopteryx.utils.xml_utils.get_xml_schema_type",
"pyopteryx.utils.xml_utils.get_by_id",
"collections.defaultdict",
"pyopteryx.utils.xml_utils.get_linkage_id"
] | [((2934, 2958), 'collections.defaultdict', 'defaultdict', (['(lambda : {})'], {}), '(lambda : {})\n', (2945, 2958), False, 'from collections import defaultdict\n'), ((8020, 8080), 'pyopteryx.utils.xml_utils.get_by_id', 'get_by_id', ([], {'element': 'file_root', 'element_id': 'assembly_context_id'}), '(element=file_root, element_id=assembly_context_id)\n', (8029, 8080), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((8100, 8204), 'pyopteryx.utils.xml_utils.get_linkage_id', 'get_linkage_id', ([], {'identifier': '"""encapsulatedComponent__AssemblyContext"""', 'element_tree': 'composed_structure'}), "(identifier='encapsulatedComponent__AssemblyContext',\n element_tree=composed_structure)\n", (8114, 8204), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((8292, 8345), 'pyopteryx.utils.xml_utils.get_by_id', 'get_by_id', ([], {'element': 'file_root', 'element_id': 'component_id'}), '(element=file_root, element_id=component_id)\n', (8301, 8345), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((3359, 3448), 'pyopteryx.utils.xml_utils.get_linkage_id', 'get_linkage_id', ([], {'identifier': '"""assemblyContext_AllocationContext"""', 'element_tree': 'context'}), "(identifier='assemblyContext_AllocationContext', element_tree\n =context)\n", (3373, 3448), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((8381, 8402), 'pyopteryx.utils.xml_utils.get_xml_schema_type', 'get_xml_schema_type', ([], {}), '()\n', (8400, 8402), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((3733, 3836), 'pyopteryx.utils.xml_utils.get_linkage_id', 'get_linkage_id', ([], {'element_tree': 'assembly_context', 'identifier': '"""encapsulatedComponent__AssemblyContext"""'}), "(element_tree=assembly_context, identifier=\n 'encapsulatedComponent__AssemblyContext')\n", (3747, 3836), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((6723, 6778), 'pyopteryx.utils.xml_utils.get_by_id', 'get_by_id', ([], {'element': 'alloc_root', 'element_id': 'allocation_id'}), '(element=alloc_root, element_id=allocation_id)\n', (6732, 6778), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((8686, 8746), 'pyopteryx.utils.xml_utils.get_by_id', 'get_by_id', ([], {'element': 'file_root', 'element_id': 'assembly_context_id'}), '(element=file_root, element_id=assembly_context_id)\n', (8695, 8746), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((3652, 3673), 'pyopteryx.utils.xml_utils.get_xml_schema_type', 'get_xml_schema_type', ([], {}), '()\n', (3671, 3673), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n'), ((6975, 7075), 'pyopteryx.utils.xml_utils.get_linkage_id', 'get_linkage_id', ([], {'identifier': '"""assemblyContext_AllocationContext"""', 'element_tree': 'allocation_context'}), "(identifier='assemblyContext_AllocationContext', element_tree\n =allocation_context)\n", (6989, 7075), False, 'from pyopteryx.utils.xml_utils import get_element_by_identifier, get_by_id, get_linkage_id, get_xml_schema_type\n')] |
from time import time
import logging
import io
import sys
from numpy import ndarray
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.datasets._lfw import _load_imgs
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from cloudmesh.common.Benchmark import Benchmark
from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata
from os import listdir, makedirs, remove
from os.path import dirname, join, exists, isdir
from cloudmesh.common.Shell import Shell
from cloudmesh.common.util import path_expand
from joblib import dump, load
from typing import Tuple
# This examples is takenfrom https://scikit-learn.org/stable/auto_examples/applications/plot_face_recognition.html#sphx-glr-auto-examples-applications-plot-face-recognition-py
TARGETS = (
RemoteFileMetadata(
filename='pairsDevTrain.txt',
url='https://ndownloader.figshare.com/files/5976012',
checksum=('1d454dada7dfeca0e7eab6f65dc4e97a'
'6312d44cf142207be28d688be92aabfa')),
RemoteFileMetadata(
filename='pairsDevTest.txt',
url='https://ndownloader.figshare.com/files/5976009',
checksum=('7cb06600ea8b2814ac26e946201cdb30'
'4296262aad67d046a16a7ec85d0ff87c')),
RemoteFileMetadata(
filename='pairs.txt',
url='https://ndownloader.figshare.com/files/5976006',
checksum=('ea42330c62c92989f9d7c03237ed5d59'
'1365e89b3e649747777b70e692dc1592')),
)
class EigenfacesSVM:
"""TODO"""
@classmethod
def download_data(cls, images_filename: str ='lfw-funneled.tgz', images_url: str ='https://ndownloader.figshare.com/files/5976015', images_checksum: str ='b47c8422c8cded889dc5a13418c4bc2a'
'bbda121092b3533a83306f90d900100a', data_home: str =None, data_subdir: str ="lfw_home", image_subdir: str ="lfw_funneled", target_filenames: list =[], target_urls: list =[], target_checksums: list =[]):
'''
'''
# this function is based on SKLearn's _check_fetch_lfw function.
Benchmark.Start()
archive = RemoteFileMetadata(
images_filename,
images_url,
checksum=(images_checksum))
if target_filenames != []:
target_attributes = zip(target_filenames, target_urls, target_checksums)
targets = ()
for target in target_attributes:
filename, url, checksum = target
targets = targets + (RemoteFileMetadata(filename, url, checksum))
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, data_subdir)
if not exists(lfw_home):
makedirs(lfw_home)
for target in TARGETS:
target_filepath = join(lfw_home, target.filename)
_fetch_remote(target, dirname=lfw_home)
data_folder_path = join(lfw_home, image_subdir)
archive_path = join(lfw_home, archive.filename)
_fetch_remote(archive, dirname=lfw_home)
import tarfile
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
Benchmark.Stop()
return f'Data downloaded to {lfw_home}'
@classmethod
def train(cls) -> str:
"""
run eigenfaces_svm example
:return type: str
"""
#print(__doc__)
#Benchmark.Start()
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# #############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
result = "Total dataset size:\n"
result += "n_samples: %d\n" % n_samples
result += "n_features: %d\n" % n_features
result += "n_classes: %d\n" % n_classes
# #############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# #############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
result += "Extracting the top %d eigenfaces from %d faces\n" \
% (n_components, X_train.shape[0])
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
result += "done in %0.3fs\n" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
result += "Projecting the input data on the eigenfaces orthonormal basis\n"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
result += "done in %0.3fs\n" % (time() - t0)
# #############################################################################
# Train a SVM classification model
result += "Fitting the classifier to the training set\n"
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(
SVC(kernel='rbf', class_weight='balanced'), param_grid
)
clf = clf.fit(X_train_pca, y_train)
result += "done in %0.3fs\n" % (time() - t0)
result += "Best estimator found by grid search:\n"
result += "%s\n" % clf.best_estimator_
# #############################################################################
# Quantitative evaluation of the model quality on the test set
result += "Predicting people's names on the test set\n"
t0 = time()
y_pred = clf.predict(X_test_pca)
result += "done in %0.3fs\n" % (time() - t0)
result += "%s\n" % str(classification_report(y_test, y_pred, target_names=target_names))
result += "%s\n" % str(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
EigenfacesSVM.store_model("eigenfaces-svm", clf, pca, target_names)
#Benchmark.Stop()
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
#Benchmark.print()
result += new_stdout.getvalue()
sys.stdout = old_stdout
print(result)
return result
@classmethod
def predict(cls, image_file_paths: str, model_name: str = "eigenfaces-svm") -> str:
"""
Make a prediction based on training configuration
"""
image_file_paths = image_file_paths.split(",")
clf, pca, target_names = EigenfacesSVM.load_model(model_name)
slice_ = (slice(70, 195), slice(78, 172))
color = False
resize = 0.4
faces = _load_imgs(image_file_paths, slice_, color, resize)
X = faces.reshape(len(faces), -1)
X_pca = pca.transform(X)
y_pred = clf.predict(X_pca)
return str(target_names[y_pred])
@staticmethod
def store_model(name: str, model: GridSearchCV, pca: PCA, target_names: ndarray):
"""
Use joblib to dump the model into a .joblib file
Stored model can be found in
Can be found in ~/.cloudmesh/eigenfaces-svm
"""
model_dir = '~/.cloudmesh/eigenfaces-svm'
Shell.mkdir(path_expand(model_dir))
dump(model, path_expand(f'{model_dir}/{name}_model.joblib'))
dump(pca, path_expand(f'{model_dir}/{name}_pca.joblib'))
dump(target_names, path_expand(f'{model_dir}/{name}_target_names.joblib'))
@staticmethod
def load_model(name: str) -> Tuple[GridSearchCV, PCA, ndarray]:
return load(path_expand(f'~/.cloudmesh/eigenfaces-svm/{name}_model.joblib')), \
load(path_expand(f'~/.cloudmesh/eigenfaces-svm/{name}_pca.joblib')), \
load(path_expand(f'~/.cloudmesh/eigenfaces-svm/{name}_target_names.joblib'))
# Code for local testing & debug
#eigenfaces_svm = EigenfacesSVM()
#eigenfaces_svm.download_data()
#eigenfaces_svm.train()
#for i in range(100,531):
#print(eigenfaces_svm.predict([f"/home/anthony/scikit_learn_data/lfw_home/lfw_funneled/George_W_Bush/George_W_Bush_0{i}.jpg"]))
#print(eigenfaces_svm.predict(["/home/anthony/scikit_learn_data/lfw_home/lfw_funneled/Colin_Powell/Colin_Powell_0001.jpg","/home/anthony/scikit_learn_data/lfw_home/lfw_funneled/George_W_Bush/George_W_Bush_0002.jpg"]))
| [
"tarfile.open",
"sklearn.metrics.classification_report",
"sklearn.datasets._base._fetch_remote",
"os.remove",
"sklearn.datasets._base.get_data_home",
"os.path.exists",
"sklearn.decomposition.PCA",
"io.StringIO",
"sklearn.datasets._lfw._load_imgs",
"sklearn.model_selection.train_test_split",
"cloudmesh.common.util.path_expand",
"cloudmesh.common.Benchmark.Benchmark.Stop",
"cloudmesh.common.Benchmark.Benchmark.Start",
"sklearn.datasets._base.RemoteFileMetadata",
"time.time",
"sklearn.svm.SVC",
"logging.basicConfig",
"os.makedirs",
"sklearn.datasets.fetch_lfw_people",
"os.path.join"
] | [((997, 1186), 'sklearn.datasets._base.RemoteFileMetadata', 'RemoteFileMetadata', ([], {'filename': '"""pairsDevTrain.txt"""', 'url': '"""https://ndownloader.figshare.com/files/5976012"""', 'checksum': '"""1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa"""'}), "(filename='pairsDevTrain.txt', url=\n 'https://ndownloader.figshare.com/files/5976012', checksum=\n '1d454dada7dfeca0e7eab6f65dc4e97a6312d44cf142207be28d688be92aabfa')\n", (1015, 1186), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((1231, 1419), 'sklearn.datasets._base.RemoteFileMetadata', 'RemoteFileMetadata', ([], {'filename': '"""pairsDevTest.txt"""', 'url': '"""https://ndownloader.figshare.com/files/5976009"""', 'checksum': '"""7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c"""'}), "(filename='pairsDevTest.txt', url=\n 'https://ndownloader.figshare.com/files/5976009', checksum=\n '7cb06600ea8b2814ac26e946201cdb304296262aad67d046a16a7ec85d0ff87c')\n", (1249, 1419), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((1464, 1645), 'sklearn.datasets._base.RemoteFileMetadata', 'RemoteFileMetadata', ([], {'filename': '"""pairs.txt"""', 'url': '"""https://ndownloader.figshare.com/files/5976006"""', 'checksum': '"""ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592"""'}), "(filename='pairs.txt', url=\n 'https://ndownloader.figshare.com/files/5976006', checksum=\n 'ea42330c62c92989f9d7c03237ed5d591365e89b3e649747777b70e692dc1592')\n", (1482, 1645), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((2265, 2282), 'cloudmesh.common.Benchmark.Benchmark.Start', 'Benchmark.Start', ([], {}), '()\n', (2280, 2282), False, 'from cloudmesh.common.Benchmark import Benchmark\n'), ((2301, 2374), 'sklearn.datasets._base.RemoteFileMetadata', 'RemoteFileMetadata', (['images_filename', 'images_url'], {'checksum': 'images_checksum'}), '(images_filename, images_url, checksum=images_checksum)\n', (2319, 2374), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((2757, 2791), 'sklearn.datasets._base.get_data_home', 'get_data_home', ([], {'data_home': 'data_home'}), '(data_home=data_home)\n', (2770, 2791), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((2811, 2839), 'os.path.join', 'join', (['data_home', 'data_subdir'], {}), '(data_home, data_subdir)\n', (2815, 2839), False, 'from os.path import dirname, join, exists, isdir\n'), ((3079, 3107), 'os.path.join', 'join', (['lfw_home', 'image_subdir'], {}), '(lfw_home, image_subdir)\n', (3083, 3107), False, 'from os.path import dirname, join, exists, isdir\n'), ((3131, 3163), 'os.path.join', 'join', (['lfw_home', 'archive.filename'], {}), '(lfw_home, archive.filename)\n', (3135, 3163), False, 'from os.path import dirname, join, exists, isdir\n'), ((3172, 3212), 'sklearn.datasets._base._fetch_remote', '_fetch_remote', (['archive'], {'dirname': 'lfw_home'}), '(archive, dirname=lfw_home)\n', (3185, 3212), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((3314, 3334), 'os.remove', 'remove', (['archive_path'], {}), '(archive_path)\n', (3320, 3334), False, 'from os import listdir, makedirs, remove\n'), ((3343, 3359), 'cloudmesh.common.Benchmark.Benchmark.Stop', 'Benchmark.Stop', ([], {}), '()\n', (3357, 3359), False, 'from cloudmesh.common.Benchmark import Benchmark\n'), ((3647, 3720), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (3666, 3720), False, 'import logging\n'), ((3912, 3965), 'sklearn.datasets.fetch_lfw_people', 'fetch_lfw_people', ([], {'min_faces_per_person': '(70)', 'resize': '(0.4)'}), '(min_faces_per_person=70, resize=0.4)\n', (3928, 3965), False, 'from sklearn.datasets import fetch_lfw_people\n'), ((4900, 4955), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(X, y, test_size=0.25, random_state=42)\n', (4916, 4955), False, 'from sklearn.model_selection import train_test_split\n'), ((5382, 5388), 'time.time', 'time', ([], {}), '()\n', (5386, 5388), False, 'from time import time\n'), ((5722, 5728), 'time.time', 'time', ([], {}), '()\n', (5726, 5728), False, 'from time import time\n'), ((6081, 6087), 'time.time', 'time', ([], {}), '()\n', (6085, 6087), False, 'from time import time\n'), ((6764, 6770), 'time.time', 'time', ([], {}), '()\n', (6768, 6770), False, 'from time import time\n'), ((7210, 7223), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (7221, 7223), False, 'import io\n'), ((7821, 7872), 'sklearn.datasets._lfw._load_imgs', '_load_imgs', (['image_file_paths', 'slice_', 'color', 'resize'], {}), '(image_file_paths, slice_, color, resize)\n', (7831, 7872), False, 'from sklearn.datasets._lfw import _load_imgs\n'), ((2856, 2872), 'os.path.exists', 'exists', (['lfw_home'], {}), '(lfw_home)\n', (2862, 2872), False, 'from os.path import dirname, join, exists, isdir\n'), ((2886, 2904), 'os.makedirs', 'makedirs', (['lfw_home'], {}), '(lfw_home)\n', (2894, 2904), False, 'from os import listdir, makedirs, remove\n'), ((2967, 2998), 'os.path.join', 'join', (['lfw_home', 'target.filename'], {}), '(lfw_home, target.filename)\n', (2971, 2998), False, 'from os.path import dirname, join, exists, isdir\n'), ((3011, 3050), 'sklearn.datasets._base._fetch_remote', '_fetch_remote', (['target'], {'dirname': 'lfw_home'}), '(target, dirname=lfw_home)\n', (3024, 3050), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n'), ((6258, 6300), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'class_weight': '"""balanced"""'}), "(kernel='rbf', class_weight='balanced')\n", (6261, 6300), False, 'from sklearn.svm import SVC\n'), ((8371, 8393), 'cloudmesh.common.util.path_expand', 'path_expand', (['model_dir'], {}), '(model_dir)\n', (8382, 8393), False, 'from cloudmesh.common.util import path_expand\n'), ((8416, 8463), 'cloudmesh.common.util.path_expand', 'path_expand', (['f"""{model_dir}/{name}_model.joblib"""'], {}), "(f'{model_dir}/{name}_model.joblib')\n", (8427, 8463), False, 'from cloudmesh.common.util import path_expand\n'), ((8483, 8528), 'cloudmesh.common.util.path_expand', 'path_expand', (['f"""{model_dir}/{name}_pca.joblib"""'], {}), "(f'{model_dir}/{name}_pca.joblib')\n", (8494, 8528), False, 'from cloudmesh.common.util import path_expand\n'), ((8557, 8611), 'cloudmesh.common.util.path_expand', 'path_expand', (['f"""{model_dir}/{name}_target_names.joblib"""'], {}), "(f'{model_dir}/{name}_target_names.joblib')\n", (8568, 8611), False, 'from cloudmesh.common.util import path_expand\n'), ((3245, 3279), 'tarfile.open', 'tarfile.open', (['archive_path', '"""r:gz"""'], {}), "(archive_path, 'r:gz')\n", (3257, 3279), False, 'import tarfile\n'), ((5403, 5471), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""', 'whiten': '(True)'}), "(n_components=n_components, svd_solver='randomized', whiten=True)\n", (5406, 5471), False, 'from sklearn.decomposition import PCA\n'), ((5543, 5549), 'time.time', 'time', ([], {}), '()\n', (5547, 5549), False, 'from time import time\n'), ((5857, 5863), 'time.time', 'time', ([], {}), '()\n', (5861, 5863), False, 'from time import time\n'), ((6407, 6413), 'time.time', 'time', ([], {}), '()\n', (6411, 6413), False, 'from time import time\n'), ((6852, 6858), 'time.time', 'time', ([], {}), '()\n', (6856, 6858), False, 'from time import time\n'), ((6897, 6961), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'target_names': 'target_names'}), '(y_test, y_pred, target_names=target_names)\n', (6918, 6961), False, 'from sklearn.metrics import classification_report\n'), ((8720, 8783), 'cloudmesh.common.util.path_expand', 'path_expand', (['f"""~/.cloudmesh/eigenfaces-svm/{name}_model.joblib"""'], {}), "(f'~/.cloudmesh/eigenfaces-svm/{name}_model.joblib')\n", (8731, 8783), False, 'from cloudmesh.common.util import path_expand\n'), ((8808, 8869), 'cloudmesh.common.util.path_expand', 'path_expand', (['f"""~/.cloudmesh/eigenfaces-svm/{name}_pca.joblib"""'], {}), "(f'~/.cloudmesh/eigenfaces-svm/{name}_pca.joblib')\n", (8819, 8869), False, 'from cloudmesh.common.util import path_expand\n'), ((8894, 8964), 'cloudmesh.common.util.path_expand', 'path_expand', (['f"""~/.cloudmesh/eigenfaces-svm/{name}_target_names.joblib"""'], {}), "(f'~/.cloudmesh/eigenfaces-svm/{name}_target_names.joblib')\n", (8905, 8964), False, 'from cloudmesh.common.util import path_expand\n'), ((2691, 2734), 'sklearn.datasets._base.RemoteFileMetadata', 'RemoteFileMetadata', (['filename', 'url', 'checksum'], {}), '(filename, url, checksum)\n', (2709, 2734), False, 'from sklearn.datasets._base import get_data_home, _fetch_remote, RemoteFileMetadata\n')] |
from django.conf import settings
from djeff import djeff
from django.core.exceptions import ImproperlyConfigured
class DjeffMiddleware(object):
def process_response(self, request, response):
try:
if settings.DJEFF:
response.content = djeff.djeffify_html(response.content.decode())
except AttributeError:
raise ImproperlyConfigured(
'DJEFF is not configured in django settings. Set "DJEFF = True" to enable djeffing'
)
return response
| [
"django.core.exceptions.ImproperlyConfigured"
] | [((373, 488), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""DJEFF is not configured in django settings. Set "DJEFF = True" to enable djeffing"""'], {}), '(\n \'DJEFF is not configured in django settings. Set "DJEFF = True" to enable djeffing\'\n )\n', (393, 488), False, 'from django.core.exceptions import ImproperlyConfigured\n')] |
"""
Implement Red–black tree
* https://en.wikipedia.org/wiki/Red%E2%80%93black_tree
* http://fujimura2.fiw-web.net/java/mutter/tree/red-black-tree.html
"""
import argparse
import logging
import random
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s',
)
log = logging.getLogger(__file__)
BLACK = 'black'
RED = 'red'
def is_leaf(node):
return isinstance(node, Leaf)
def find_root(node):
if node.parent is None:
return node
else:
return find_root(node.parent)
def replace_node(node, child):
if child.key < node.parent.key:
node.parent.left = child
elif node.parent.key < child.key:
node.parent.right = child
node.key = child.key
node.left = child.left
node.right = child.right
return node, child
def grandparent(node):
parent = node.parent
if parent is None:
return None
return parent.parent
def sibling(node):
parent = node.parent
if parent is None:
return None
elif node is parent.left:
return parent.right
else:
return parent.left
def uncle(node):
parent = node.parent
if parent is None:
return None
return sibling(parent)
def rotate_left(node, tree):
log.debug('rotate_left: %s' % node)
new_node = node.right
assert new_node is not LEAF
node.right = new_node.left
node.right.parent = node
new_node.left = node
new_node.parent = node.parent
node.parent = new_node
if new_node.parent is None:
tree.root = new_node
else:
if new_node.key < new_node.parent.key:
new_node.parent.left = new_node
elif new_node.parent.key < new_node.key:
new_node.parent.right = new_node
else:
assert False, 'rotate left: %s' % node
def rotate_right(node, tree):
log.debug('rotate_right: %s' % node)
new_node = node.left
assert new_node is not LEAF
node.left = new_node.right
node.left.parent = node
new_node.right = node
new_node.parent = node.parent
node.parent = new_node
if new_node.parent is None:
tree.root = new_node
else:
if new_node.key < new_node.parent.key:
new_node.parent.left = new_node
elif new_node.parent.key < new_node.key:
new_node.parent.right = new_node
else:
assert False, 'rotate left: %s' % node
def find_max_node(node):
max_node = node
while max_node.right is not LEAF:
max_node = max_node.right
return max_node
class Leaf:
def __init__(self):
self.left = None
self.right = None
self.color = BLACK
def __repr__(self):
return '%s [Leaf]' % self.color
LEAF = Leaf()
class Node:
def __init__(self, key, color='', parent=None):
self.key = key
self.color = color
self.left = LEAF
self.right = LEAF
self.parent = parent
def __iter__(self):
return iter(self.children)
def __repr__(self):
pkey = 'na' if self.parent is None else self.parent.key
return '%s [%s] p:%s' % (self.color, self.key, pkey)
@property
def children(self):
children = []
if self.left is not None:
children.append(self.left)
if self.right is not None:
children.append(self.right)
return children
@property
def number_of_children(self):
num = 0
if self.left is not LEAF:
num += 1
if self.right is not LEAF:
num += 1
return num
def search(self, key):
if self.key == key:
return self
if not is_leaf(self):
if key < self.key:
if self.left is not LEAF:
return self.left.search(key)
elif self.key < key:
if self.right is not LEAF:
return self.right.search(key)
def show(self, verbose):
from anytree import Node as AnyNode, RenderTree
verbose = False # TODO
def visit(node, parent, verbose):
any_node = AnyNode(repr(node), parent=parent)
for child in node:
if is_leaf(child):
if verbose:
AnyNode(repr(child), parent=any_node)
else:
visit(child, any_node, verbose)
root = self
root_any_node = AnyNode(repr(root))
for child in root:
if is_leaf(child):
if verbose:
AnyNode(repr(child), parent=root_any_node)
else:
visit(child, root_any_node, verbose)
s = ''
for pre, fill, node in RenderTree(root_any_node):
s += '%s%s\n' % (pre, node.name)
return s
class RedBlackTree:
def __init__(self, root):
self.root = root
def search(self, key):
return self.root.search(key)
def insert_recurse(self, root, node):
if node.key < root.key:
if is_leaf(root.left):
root.left = node
root.left.parent = root
else:
self.insert_recurse(root.left, node)
elif root.key <= node.key: # for duplicated key
if is_leaf(root.right):
root.right = node
root.right.parent = root
else:
self.insert_recurse(root.right, node)
else:
assert False
def insert_case1(self, node):
log.debug('insert_case1')
if node.parent is None:
node.color = BLACK
def insert_case2(self, node):
log.debug('insert_case2')
pass # do nothing
def insert_case3(self, node):
log.debug('insert_case3')
node.parent.color = BLACK
uncle(node).color = BLACK
g = grandparent(node)
g.color = RED
self.insert_repair_tree(g)
def insert_case4_step2(self, node):
log.debug('insert_case4 step2')
p = node.parent
g = grandparent(node)
if node is p.left:
rotate_right(g, self)
else:
rotate_left(g, self)
p.color = BLACK
g.color = RED
def insert_case4(self, node):
log.debug('insert_case4')
p = node.parent
g = grandparent(node)
if node is g.left.right:
rotate_left(p, self)
node = node.left
elif node is g.right.left:
rotate_right(p, self)
node = node.right
self.insert_case4_step2(node)
def insert_repair_tree(self, node):
if node.parent is None:
self.insert_case1(node)
elif node.parent.color is BLACK:
self.insert_case2(node)
elif uncle(node).color is RED:
self.insert_case3(node)
else:
self.insert_case4(node)
def insert(self, key):
node = Node(key, RED)
log.debug('insert: %s' % repr(node))
self.insert_recurse(self.root, node)
self.insert_repair_tree(node)
def delete_case6(self, node):
log.debug('delete_case6')
s = sibling(node)
s.color = node.parent.color
node.parent.color = BLACK
if node is node.parent.left:
s.right.color = BLACK
rotate_left(node.parent, self)
else:
s.left.color = BLACK
rotate_right(node.parent, self)
def delete_case5(self, node):
log.debug('delete_case5')
s = sibling(node)
if s.color is BLACK:
if (
node is node.parent.left and
s is not LEAF and
s.right.color is BLACK and
s.left.color is RED
):
s.color = RED
s.left.color = BLACK
rotate_right(s, self)
elif (
node is node.parent.right and
s is not LEAF and
s.left.color is BLACK and
s.right.color is RED
):
s.color = RED
s.right.color = BLACK
rotate_left(s)
self.delete_case6(node)
def delete_case4(self, node):
log.debug('delete_case4')
s = sibling(node)
if (
node.parent.color is RED and
s is not LEAF and
s.color is BLACK and
s.left.color is BLACK and
s.right.color is BLACK
):
s.color = RED
node.parent.color = BLACK
else:
self.delete_case5(node)
def delete_case3(self, node):
log.debug('delete_case3')
s = sibling(node)
if (
node.parent.color is BLACK and
s.color is BLACK and
s.left.color is BLACK and
s.right.color is BLACK
):
s.color = RED
self.delete_case1(node.parent)
else:
self.delete_case4(node)
def delete_case2(self, node):
log.debug('delete_case2')
s = sibling(node)
if s.color is RED:
node.parent.color = RED
s.color = BLACK
if node is node.parent.left:
rotate_left(node.parent, self)
else:
rotate_right(node.parent, self)
self.delete_case3(node)
def delete_case1(self, node):
log.debug('delete_case1')
if node.parent is not None:
self.delete_case2(node)
def delete_one_child(self, node):
log.debug('delete_one_child')
child = node.left if is_leaf(node.right) else node.right
node, child = replace_node(node, child)
if node.color is BLACK:
if child.color is RED:
child.color = BLACK
else:
self.delete_case1(child)
def delete_no_child(self, node):
log.debug('delete_no_child')
p = node.parent
if node.color is BLACK:
if node is p.left:
rotate_left(p, self)
else:
rotate_right(p, self)
# FIXME: need condtion to set color
p.parent.color = RED
p.color = BLACK
s = sibling(p)
s.color = BLACK
for c in p.children:
if c is not LEAF:
c.color = RED
if node.key < node.parent.key:
node.parent.left = LEAF
elif node.parent.key < node.key:
node.parent.right = LEAF
elif node.key == node.parent.key:
node.parent.left = LEAF # with left_max_node
def _delete(self, key, start_node):
# TODO: work in progress
node = start_node.search(key)
if node.number_of_children == 0:
self.delete_no_child(node)
elif node.number_of_children <= 1:
self.delete_one_child(node)
elif node.number_of_children == 2:
left_max_node = find_max_node(node.left)
node.key = left_max_node.key
self._delete(left_max_node.key, node.left)
def delete(self, key):
self._delete(key, self.root)
def show(self, verbose):
print(self.root.show(verbose))
def parse_argument():
parser = argparse.ArgumentParser()
parser.set_defaults(
delete_nums=[],
max_num=50,
verbose=False,
)
parser.add_argument(
'--max-num', dest='max_num', type=int,
help='set maximum number of key range',
)
parser.add_argument(
'--delete-num', dest='delete_nums', type=int, nargs='*',
help='set delete key',
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='set verbose mode',
)
args = parser.parse_args()
if args.verbose:
log.setLevel(logging.DEBUG)
return args
def main():
args = parse_argument()
log.debug(args)
if args.max_num == 3:
from test_red_black_tree import three_data as data
elif args.max_num == 5:
from test_red_black_tree import five_data as data
elif args.max_num == 8:
from test_red_black_tree import eight_data as data
elif args.max_num == 12:
from test_red_black_tree import twelve_data as data
elif args.max_num == 17:
from test_red_black_tree import seventeen_data as data
else:
data = list(random.sample(range(args.max_num), args.max_num))
log.info(data)
first = data.pop(0)
root = Node(first, BLACK)
rbt = RedBlackTree(root)
for i in data:
rbt.insert(i)
print('=== initial tree structure ===')
rbt.show(args.verbose)
if len(args.delete_nums) > 0:
for num in args.delete_nums:
rbt.delete(num)
print('=== deleted %d, after deleting ===' % num)
rbt.show(args.verbose)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"test_red_black_tree.seventeen_data.pop",
"argparse.ArgumentParser",
"anytree.RenderTree"
] | [((205, 298), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s: %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s: %(message)s')\n", (224, 298), False, 'import logging\n'), ((311, 338), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (328, 338), False, 'import logging\n'), ((11317, 11342), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11340, 11342), False, 'import argparse\n'), ((12529, 12540), 'test_red_black_tree.seventeen_data.pop', 'data.pop', (['(0)'], {}), '(0)\n', (12537, 12540), True, 'from test_red_black_tree import seventeen_data as data\n'), ((4748, 4773), 'anytree.RenderTree', 'RenderTree', (['root_any_node'], {}), '(root_any_node)\n', (4758, 4773), False, 'from anytree import Node as AnyNode, RenderTree\n')] |
#!/usr/bin/env python3
'''
'''
__VERSION__ = (0, 0, 0)
__VERSION_STRING__ = '.'.join(str(i) for i in __VERSION__)
__DEFAULT_HOST__ = '0.0.0.0'
__DEFAULT_PORT__ = 12345
import logging ; log = logging.getLogger('YIM-SERVER'
if __name__ == '__main__'
else __name__)
import sys
import server
from utils import AP
arg = AP(
description='Yet Another Instant Messanger Server',
).add(
'--version', action='version', version=__VERSION_STRING__,
help='Version ({})'.format(__VERSION_STRING__)
).add(
'-q', '--quiet', action='count', default=0
).add(
'-v', '--verbose', action='count', default=0
).add(
'-H', '--host', help='address to bind to',
default=__DEFAULT_HOST__
).add(
'-P', '--port', help='port to listen on',
default=__DEFAULT_PORT__, type=int
).end_of_args()
def main(logLevel=logging.DEBUG):
logging.basicConfig(
level = logLevel,
)
if arg.verbose > arg.quiet:
log.setLevel(logging.DEBUG)
elif arg.verbose < arg.quiet:
log.setLevel(logging.ERROR)
try:
log.info('START (%r:%r)', arg.host, arg.port)
server_environment = dict(
host = arg.host,
port = arg.port,
)
with server.ChatServer(**server_environment) as srv:
log.info('SERVING %r...', server_environment)
srv.serve()
finally:
log.info('STOP')
if __name__ == '__main__':
sys.exit(main())
# vim: set ft=python ai et ts=4 sts=4 sw=4 colorcolumn=80: #
| [
"logging.getLogger",
"utils.AP",
"logging.basicConfig",
"server.ChatServer"
] | [((194, 265), 'logging.getLogger', 'logging.getLogger', (["('YIM-SERVER' if __name__ == '__main__' else __name__)"], {}), "('YIM-SERVER' if __name__ == '__main__' else __name__)\n", (211, 265), False, 'import logging\n'), ((924, 959), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logLevel'}), '(level=logLevel)\n', (943, 959), False, 'import logging\n'), ((1294, 1333), 'server.ChatServer', 'server.ChatServer', ([], {}), '(**server_environment)\n', (1311, 1333), False, 'import server\n'), ((403, 457), 'utils.AP', 'AP', ([], {'description': '"""Yet Another Instant Messanger Server"""'}), "(description='Yet Another Instant Messanger Server')\n", (405, 457), False, 'from utils import AP\n')] |
import PoGoCLI
from PIL import Image
import pyautogui as pg
def Find_Button(button):
PoGoCLI.Update_Screenshot()
img = Image.open('phoneScreen.png')
# First Find Menu Button
base_width = 1080
wpercent = (base_width/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
pil_image = img.resize((base_width,hsize), Image.ANTIALIAS)
button_pos = pg.locate(button, pil_image, grayscale = False, confidence = .7)
button_center = pg.center(button_pos)
button_x, button_y = button_center
# We downsized wpercent
# Now we got to correct for that
corrected_x = button_x / wpercent
corrected_y = button_y / wpercent
PoGoCLI.Click(corrected_x, corrected_y)
return (corrected_x, corrected_y) | [
"PoGoCLI.Click",
"PIL.Image.open",
"PoGoCLI.Update_Screenshot",
"pyautogui.locate",
"pyautogui.center"
] | [((91, 118), 'PoGoCLI.Update_Screenshot', 'PoGoCLI.Update_Screenshot', ([], {}), '()\n', (116, 118), False, 'import PoGoCLI\n'), ((130, 159), 'PIL.Image.open', 'Image.open', (['"""phoneScreen.png"""'], {}), "('phoneScreen.png')\n", (140, 159), False, 'from PIL import Image\n'), ((395, 456), 'pyautogui.locate', 'pg.locate', (['button', 'pil_image'], {'grayscale': '(False)', 'confidence': '(0.7)'}), '(button, pil_image, grayscale=False, confidence=0.7)\n', (404, 456), True, 'import pyautogui as pg\n'), ((481, 502), 'pyautogui.center', 'pg.center', (['button_pos'], {}), '(button_pos)\n', (490, 502), True, 'import pyautogui as pg\n'), ((690, 729), 'PoGoCLI.Click', 'PoGoCLI.Click', (['corrected_x', 'corrected_y'], {}), '(corrected_x, corrected_y)\n', (703, 729), False, 'import PoGoCLI\n')] |
from tokens import TOKEN
from info import *
from parse import Parser
from config import *
def cg_block(f, block):
for stat in block['stats']:
cg_stat(f, stat)
if block['ret_exps'] is not None:
cg_ret_exps(f, block['ret_exps'])
def is_vararg_or_call(exp):
return type(exp) is dict and (exp.get('exp_type', None) == TOKEN.VARARG or exp.get('op', None) == 'call')
def cg_ret_exps(f, ret_exps):
n = len(ret_exps)
if n == 0:
f.emit('return', 0, 0)
flag = is_vararg_or_call(ret_exps[-1])
for i in range(n):
r = f.alloc_reg()
if i == n - 1 and flag:
cg_exp(f, ret_exps[i], r, -1)
else:
cg_exp(f, ret_exps[i], r, 1)
f.free_regs(n)
a = f.used_regs
if flag:
f.emit('return', a, -1)
else:
f.emit('return', a, n)
def cg_stat(f, stat):
t = stat.get('type', stat.get('op'))
if t == 'call':
cg_func_call_stat(f, stat)
elif t == 'break':
cg_break_stat(f, stat)
elif t == 'do':
cg_do_stat(f, stat)
elif t == 'repeat':
cg_repeat_stat(f, stat)
elif t == 'while':
cg_while_stat(f, stat)
elif t == 'if':
cg_if_stat(f, stat)
elif t == 'for':
if stat['num']:
cg_for_num_stat(f, stat)
else:
cg_for_in_stat(f, stat)
elif t == 'assign':
cg_assign_stat(f, stat)
elif t == 'func':
if stat.get('local', False):
cg_local_func_def_stat(f, stat)
elif t == 'var':
if stat.get('local', False):
cg_local_var_stat(f, stat)
elif t == 'empty':
pass
# TODO: support goto label
else:
print('not support stat %s' % t)
def cg_local_func_def_stat(f, stat):
r = f.add_local_var(stat['name'])
cg_func_def_exp(f, stat['exp'], r)
def cg_func_call_stat(f, stat):
r = f.alloc_reg()
cg_func_call_exp(f, stat, r, 0)
f.free_reg()
def cg_break_stat(f, stat):
if stat is not None:
pc = f.emit('jump', 0, 0)
f.add_break_jump(pc)
def cg_do_stat(f, stat):
f.enter_scope(False)
cg_block(f, stat['block'])
f.close_open_up_values()
f.exit_scope()
def cg_while_stat(f, stat):
pc_before = f.pc()
r = f.alloc_reg()
cg_exp(f, stat['exp'], r, 1)
f.free_reg()
f.emit('test', r, 0)
pc_jump_to_end = f.emit('jump', 0, 0)
f.enter_scope(True)
cg_block(f, stat['block'])
f.close_open_up_values()
f.emit('jump', 0, pc_before - f.pc() - 1)
f.exit_scope()
f.fix_b(pc_jump_to_end, f.pc() - pc_jump_to_end)
def cg_repeat_stat(f, stat):
f.enter_scope(True)
pc_before = f.pc()
cg_block(f, stat['block'])
r = f.alloc_reg()
cg_exp(f, stat['exp'], r, 1)
f.free_reg()
f.emit('test', r, 0)
f.emit('jump', f.get_jump_arg(), pc_before - f.pc() - 1)
f.exit_scope()
def cg_if_stat(f, stat):
pc_jumps_to_ends = []
pc_jump_to_next = -1
for i in range(len(stat['exps'])):
exp = stat['exps'][i]
if pc_jump_to_next >= 0:
f.fix_b(pc_jump_to_next, f.pc() - pc_jump_to_next)
r = f.alloc_reg()
cg_exp(f, exp, r, 1)
f.free_reg()
f.emit('test', r, 0)
pc_jump_to_next = f.emit('jump', 0, 0)
f.enter_scope(False)
cg_block(f, stat['blocks'][i])
f.close_open_up_values()
f.exit_scope()
if i < len(stat['exps']) - 1:
pc_jumps_to_ends.append(f.emit('jump', 0, 0))
else:
pc_jumps_to_ends.append(pc_jump_to_next)
for pc in pc_jumps_to_ends:
f.fix_b(pc, f.pc() - pc)
def cg_for_num_stat(f, stat):
f.enter_scope(True)
cg_local_var_stat(f, {'names': ['(for idx)', '(for limit)', '(for step)'], 'exps': stat['exps']})
f.add_local_var(stat['name'])
a = f.used_regs - 4
pc_for_prep = f.emit('for_prep', a, 0)
cg_block(f, stat['block'])
f.close_open_up_values()
pc_for_loop = f.emit('for_loop', a, 0)
f.fix_b(pc_for_prep, pc_for_loop - pc_for_prep - 1)
f.fix_b(pc_for_loop, pc_for_prep - pc_for_loop)
f.exit_scope()
def cg_for_in_stat(f, stat):
f.enter_scope(True)
cg_local_var_stat(f, {'names': ['(for gen)', '(for state)', '(for ctrl)'], 'exps': stat['exps']})
for name in stat['names']:
f.add_local_var(name)
pc_jump_to_tfc = f.emit('jump', 0, 0)
cg_block(f, stat['block'])
f.close_open_up_values()
f.fix_b(pc_jump_to_tfc, f.pc() - pc_jump_to_tfc)
r_gen = f.slot_of_local_var('(for gen)')
f.emit('t_for_call', r_gen, len(stat['names']))
f.emit('t_for_loop', r_gen + 2, pc_jump_to_tfc - f.pc() - 1)
f.exit_scope()
def cg_local_var_stat(f, stat):
exps = stat['exps']
names = stat['names']
n_exps = len(exps)
n_names = len(names)
old_regs = f.used_regs
if n_exps == n_names:
for exp in exps:
r = f.alloc_reg()
cg_exp(f, exp, r, 1)
elif n_exps > n_names:
for i in range(n_exps):
a = f.alloc_reg()
if i == n_exps - 1 and is_vararg_or_call(exps[i]):
cg_exp(f, exps[i], a, 0)
else:
cg_exp(f, exps[i], a, 1)
else:
multi_ret = False
for i in range(n_exps):
a = f.alloc_reg()
if i == n_exps - 1 and is_vararg_or_call(exps[i]):
multi_ret = True
n = n_names - n_exps + 1
cg_exp(f, exps[i], a, n)
f.alloc_regs(n - 1)
else:
cg_exp(f, exps[i], a, 1)
if not multi_ret:
n = n_names - n_exps
a = f.alloc_regs(n)
f.emit('load_nil', a, n - 1)
f.used_regs = old_regs
for name in stat['names']:
f.add_local_var(name)
def cg_assign_stat(f, stat):
n_exps = len(stat['exps'])
n_vars = len(stat['vars'])
old_regs = f.used_regs
table_regs = [-1] * n_vars
k_regs = [-1] * n_vars
v_regs = [-1] * n_vars
for i in range(n_vars):
var = stat['vars'][i]
if type(var) is dict and var.get('op', None) is 'access':
table_regs[i] = f.alloc_reg()
cg_exp(f, var['1'], table_regs[i], 1)
k_regs[i] = f.alloc_reg()
cg_exp(f, var['2'], k_regs[i], 1)
for i in range(n_vars):
v_regs[i] = f.used_regs + i
if n_exps >= n_vars:
for i in range(n_exps):
exp = stat['exps'][i]
a = f.alloc_reg()
if i >= n_vars and i == n_exps - 1 and is_vararg_or_call(exp):
cg_exp(f, exp, a, 0)
else:
cg_exp(f, exp, a, 1)
else:
multi_ret = False
for i in range(n_exps):
exp = stat['exps'][i]
a = f.alloc_reg()
if i == n_exps - 1 and is_vararg_or_call(exp):
multi_ret = True
n = n_vars - n_exps + 1
cg_exp(f, exp, a, n)
f.alloc_reg(n - 1)
else:
cg_exp(f, exp, a, 1)
if not multi_ret:
n = n_vars - n_exps
a = f.alloc_regs(n)
f.emit('load_nil', a, n - 1)
for i in range(n_vars):
var = stat['vars'][i]
if type(var) is str:
a = f.slot_of_local_var(var)
if a >= 0:
f.emit('move', a, v_regs[i])
else:
b = f.index_of_up_value(var)
if b >= 0:
f.emit('set_up_val', v_regs[i], b)
else:
a = f.index_of_up_value('_ENV')
b = 0x100 + f.index_of_constant(var)
f.emit('set_tab_up', a, b, v_regs[i])
else:
f.emit('set_table', table_regs[i], k_regs[i], v_regs[i])
f.used_regs = old_regs
def cg_exp(f, exp, r, n):
if type(exp) is str:
cg_name(f, exp, r)
return
op = exp.get('op', exp.get('exp_type', None))
if op == TOKEN.NIL:
f.emit('load_nil', r, n - 1)
elif op == TOKEN.FALSE:
f.emit('load_bool', r, 0, 0)
elif op == TOKEN.TRUE:
f.emit('load_bool', r, 1, 0)
elif op == TOKEN.NUMBER or op == TOKEN.STRING:
f.emit('load_k', r, exp['content'])
elif op == 'parenthesis':
cg_exp(f, exp['1'], r, 1)
elif op == TOKEN.VARARG:
assert f.is_vararg
f.emit('vararg', r, n)
elif op == 'def':
cg_func_def_exp(f, exp, r)
elif op == 'table':
cg_table_exp(f, exp, r)
elif op == 'call':
cg_func_call_exp(f, exp, r, n)
elif op == 'access':
cg_table_access_exp(f, exp, r)
elif op == TOKEN.OP_CONCAT:
cg_concat_exp(f, exp, r)
elif '2' in exp:
cg_2op_exp(f, exp, r)
elif '1' in exp:
cg_1op_exp(f, exp, r)
else:
print('not support op', op)
def new_func_info(parent, fd):
info = FuncInfo()
info.breaks.append([])
info.parent = parent
info.is_vararg = fd['params']['var']
info.param_num = len(fd['params']['params'])
return info
def cg_func_def_exp(f, exp, a):
sub = new_func_info(f, exp)
f.sub_funcs.append(sub)
for name in exp['params']['params']:
sub.add_local_var(name)
cg_block(sub, exp['block'])
sub.exit_scope()
sub.emit('return', 0, 0)
bx = len(f.sub_funcs) - 1
f.emit('closure', a, bx)
def cg_table_exp(f, exp, a):
n_arr = 0
for k in exp['keys']:
if k is None:
n_arr += 1
n_exp = len(exp['keys'])
multi_ret = n_exp > 0 and is_vararg_or_call(exp['values'][-1])
f.emit('new_table', a, n_arr, n_exp - n_arr)
idx = 0
for i in range(n_exp):
k = exp['keys'][i]
v = exp['values'][i]
if k is None:
idx += 1
tmp = f.alloc_reg()
if i == n_exp - 1 and multi_ret:
cg_exp(f, v, tmp, -1)
else:
cg_exp(f, v, tmp, 1)
if idx % FIELDS_PER_FLUSH == 0 or idx == n_arr:
n = idx % FIELDS_PER_FLUSH
if n == 0:
n = FIELDS_PER_FLUSH
c = (idx - 1) // FIELDS_PER_FLUSH + 1
f.free_regs(n)
if i == n_exp - 1 and multi_ret:
f.emit('set_list', a, 0, c)
else:
f.emit('set_list', a, n, c)
else:
b = f.alloc_reg()
cg_exp(f, k, b, 1)
c = f.alloc_reg()
cg_exp(f, v, c, 1)
f.free_regs(2)
f.emit('set_table', a, b, c)
def cg_1op_exp(f, exp, a):
b = f.alloc_reg()
cg_exp(f, exp['1'], b, 1)
f.emit(exp['op'], a, b)
f.free_reg()
def cg_2op_exp(f, exp, a):
if exp['op'] in [TOKEN.OP_AND, TOKEN.OP_OR]:
b = f.alloc_reg()
cg_exp(f, exp['1'], b, 1)
f.free_reg()
if exp['op'] == TOKEN.OP_AND:
f.emit('test_set', a, b, 0)
else:
f.emit('test_set', a, b, 1)
pc_jump = f.emit('jump', 0, 0)
b = f.alloc_reg()
cg_exp(f, exp['2'], b, 1)
f.free_reg()
f.emit('move', a, b)
f.fix_b(pc_jump, f.pc() - pc_jump)
else:
b = f.alloc_reg()
cg_exp(f, exp['1'], b, 1)
c = f.alloc_reg()
cg_exp(f, exp['2'], c, 1)
f.emit(exp['op'], a, b, c)
f.free_regs(2)
def cg_concat_exp(f, exp, a):
for sub in exp['1']:
r = f.alloc_reg()
cg_exp(f, sub, r, 1)
c = f.used_regs - 1
b = c - len(exp['1']) + 1
f.free_regs(c - b + 1)
f.emit(TOKEN.OP_CONCAT, a, b, c)
def cg_name(f, name, a):
r = f.slot_of_local_var(name)
if r >= 0:
f.emit('move', a, r)
else:
idx = f.index_of_up_value(name)
if idx >= 0:
f.emit('get_up_val', a, idx)
else:
cg_table_access_exp(f, {'op': 'access', '1': '_ENV',
'2': {'exp_type': TOKEN.STRING, 'content': name, 'line': -1}}, a)
def cg_table_access_exp(f, exp, a):
b = f.alloc_reg()
cg_exp(f, exp['1'], b, 1)
c = f.alloc_reg()
cg_exp(f, exp['2'], c, 1)
f.emit('get_table', a, b, c)
f.free_regs(2)
def cg_func_call_exp(f, exp, a, n):
n_args = len(exp['args'])
last_vararg_or_call = False
cg_exp(f, exp['exp'], a, 1)
if exp['name'] is not None:
c = 0x100 + f.index_of_constant(exp['name'])
f.emit('self', a, a, c)
for i in range(n_args):
tmp = f.alloc_reg()
arg = exp['args'][i]
if i == n_args - 1 and is_vararg_or_call(arg):
last_vararg_or_call = True
cg_exp(f, arg, tmp, -1)
else:
cg_exp(f, arg, tmp, 1)
f.free_regs(n_args)
if exp['name'] is not None:
n_args += 1
if last_vararg_or_call:
n_args = -1
f.emit('call', a, n_args, n)
def intermediate(code):
parser = Parser(code)
block = parser.parse()
# print(block)
fd = {'params': {'var': True, 'params': []}, 'block': block}
info = new_func_info(None, fd)
info.add_local_var('_ENV')
cg_func_def_exp(info, fd, 0)
# print(info.sub_funcs[0].ins)
return info
| [
"parse.Parser"
] | [((12900, 12912), 'parse.Parser', 'Parser', (['code'], {}), '(code)\n', (12906, 12912), False, 'from parse import Parser\n')] |
import datetime
import pytz
### Add the functionality of logging in account creation details when creating the object
class Account:
""" Simple account class with balance """
__power = 'Tow'
@staticmethod
def _current_time():
utc_time = datetime.datetime.utcnow()
return pytz.utc.localize(utc_time)
def __init__(self, name, balance):
self._name = name
self.__balance = balance
self._transaction_list = [(Account._current_time(), balance)] # done
print("Account created for " + self._name)
self.__balance+=-2300
# logging this as deposit
def deposit(self, amount):
if amount > 0:
self.__balance += amount
self._transaction_list.append((Account._current_time(), amount))
def withdraw(self, amount):
if 0 < amount <= self.__balance:
self.__balance -= amount
self._transaction_list.append((Account._current_time(), -amount))
else:
print("The amount must be greater than zero and no more then your account balance")
def show_balance(self):
print("Balance is {}".format(self.__balance))
def show_transactions(self):
for date, amount in self._transaction_list:
if amount > 0:
tran_type = "deposited"
else:
tran_type = "withdrawn"
amount *= -1
if __name__ == '__main__':
tim = Account("Tim", 0)
# time.__balance = 99 wrong, it will make a new variable
tim._Account__balance = 99 # we set __balance to 23
tim.show_balance() #but it wasn't changed
print(tim.__dict__)
| [
"pytz.utc.localize",
"datetime.datetime.utcnow"
] | [((263, 289), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (287, 289), False, 'import datetime\n'), ((305, 332), 'pytz.utc.localize', 'pytz.utc.localize', (['utc_time'], {}), '(utc_time)\n', (322, 332), False, 'import pytz\n')] |
import argparse
import os
from model import Model
import process_data
def is_dir(dir_name):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dir_name):
msg = "{0} does not exist".format(dir_name)
raise argparse.ArgumentTypeError(msg)
else:
return dir_name
# creation of the parser
parser = argparse.ArgumentParser(description='Train the feed-foward layer')
# requiered paramters
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--data_dir', metavar='\b', required=True, type=is_dir, help='directory of the data')
# optional parameters
optional_args = parser.add_argument_group('optional arguments')
parser.add_argument('--save_dir', metavar='\b', action="store", default=os.getcwd()+'/', type=is_dir, help='directory to save checkpoints')
parser.add_argument('--arch', metavar='\b', action="store", default="vgg13", help='architecture of pretrained model [vgg13 vgg16 vgg19 alexnet]')
parser.add_argument('--learning_rate', metavar='\b', action="store", default=0.001, type=float, help='learning_rate')
parser.add_argument('--hidden_units', metavar='\b', action="store", default=2048, type=int, help='hidden units number')
parser.add_argument('--epochs', metavar='\b', action="store", default=2, type=int, help='epochs number')
parser.add_argument('--gpu', action="store_true", help='gpu')
args = parser.parse_args() # the parameters are now stocked in the namespace args
train_datasets, valid_datasets, test_datasets = process_data.load_datasets(args.data_dir)
print("Datasets loaded.")
train_loader, valid_loader, test_loader = process_data.get_dataloaders(train_datasets, valid_datasets, test_datasets)
print("Dataloaders defined.")
model = Model(train_loader, valid_loader, test_loader, args.arch, args.hidden_units, args.epochs, args.learning_rate, args.save_dir, args.gpu)
print("Download pre-trained model", args.arch, "..")
model.load_pretrained()
print("Pre-trained model loaded")
print("Training of the model starting..")
model.train()
print("Training of the model finished")
print("Test of the accuracy of the model on the test dataset..")
test_accuracy = model.test()
print("Test accuracy: {:.3f}".format(test_accuracy))
print("Saving of the model..")
model.include_mapping(train_datasets)
model.save()
print("Model saved.")
| [
"model.Model",
"argparse.ArgumentParser",
"argparse.ArgumentTypeError",
"os.getcwd",
"process_data.load_datasets",
"os.path.isdir",
"process_data.get_dataloaders"
] | [((346, 412), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the feed-foward layer"""'}), "(description='Train the feed-foward layer')\n", (369, 412), False, 'import argparse\n'), ((1521, 1562), 'process_data.load_datasets', 'process_data.load_datasets', (['args.data_dir'], {}), '(args.data_dir)\n', (1547, 1562), False, 'import process_data\n'), ((1632, 1707), 'process_data.get_dataloaders', 'process_data.get_dataloaders', (['train_datasets', 'valid_datasets', 'test_datasets'], {}), '(train_datasets, valid_datasets, test_datasets)\n', (1660, 1707), False, 'import process_data\n'), ((1747, 1885), 'model.Model', 'Model', (['train_loader', 'valid_loader', 'test_loader', 'args.arch', 'args.hidden_units', 'args.epochs', 'args.learning_rate', 'args.save_dir', 'args.gpu'], {}), '(train_loader, valid_loader, test_loader, args.arch, args.hidden_units,\n args.epochs, args.learning_rate, args.save_dir, args.gpu)\n', (1752, 1885), False, 'from model import Model\n'), ((154, 177), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (167, 177), False, 'import os\n'), ((245, 276), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (271, 276), False, 'import argparse\n'), ((770, 781), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (779, 781), False, 'import os\n')] |
'''
Module gives user easy access to the information in .json file
'''
import json
import pprint
def read_data(path):
'''
Return data from .json file
'''
with open(path, mode='r', encoding='utf-8') as timeline:
data = json.load(timeline)
return data
def is_list(data):
'''
Print the information about list
'''
print('It is a list')
print('Choose one of the next function by printing its number:')
print('1. Get length of list')
print('2. Get the content of list')
print('3. Get the content of element by printing its index')
print('0. Exit')
while True:
action = input('Your choise: ')
if action == '1':
print(len(data))
elif action == '2':
pprint.pprint(data)
elif action == '3':
el_index = input('Input index of element: ')
pprint.pprint(data[int(el_index)])
elif action == '0':
break
else:
print('Wrong function. Try again.')
def is_dict(data):
'''
Print the information about dict
'''
print('It is a dictionary')
print('Choose one of the next function by printing its number:')
print('1. Get length of dictionary')
print('2. Get keys of dictionary')
print('3. Get the value of key by printing its name')
print('4. Get the whole content of dictionary')
print('0. Exit')
while True:
action = input('Your choise: ')
if action == '1':
print(len(data))
elif action == '2':
print(data.keys())
elif action == '3':
el_name = input('Input the key: ')
pprint.pprint(data[el_name])
elif action == '4':
pprint.pprint(data)
elif action == '0':
break
else:
print('Wrong function. Try again.')
def main():
path_to_file = input('Please, print path to the file: ')
data = read_data(path_to_file)
if type(data) == list:
is_list(data)
elif type(data) == dict:
is_dict(data)
else:
print('It is neither list nor dictionary')
if __name__ == "__main__":
main() | [
"json.load",
"pprint.pprint"
] | [((244, 263), 'json.load', 'json.load', (['timeline'], {}), '(timeline)\n', (253, 263), False, 'import json\n'), ((761, 780), 'pprint.pprint', 'pprint.pprint', (['data'], {}), '(data)\n', (774, 780), False, 'import pprint\n'), ((1664, 1692), 'pprint.pprint', 'pprint.pprint', (['data[el_name]'], {}), '(data[el_name])\n', (1677, 1692), False, 'import pprint\n'), ((1733, 1752), 'pprint.pprint', 'pprint.pprint', (['data'], {}), '(data)\n', (1746, 1752), False, 'import pprint\n')] |
import pathlib
import numpy as np
import tensorflow as tf
from PIL import Image
from object_detection.utils import ops as utils_ops
# Methods
def load_model(model_name):
# This url may change in the future
# Go to https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md
# for further information
base_url = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/'
model_file = model_name + '.tar.gz'
model_dir = tf.keras.utils.get_file(
fname=model_name,
origin=base_url + model_file,
untar=True
)
model_dir = pathlib.Path(model_dir) / "saved_model"
model = tf.saved_model.load(str(model_dir))
return model
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
model_fn = model.signatures['serving_default']
output_dict = model_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy() for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'], image.shape[0], image.shape[1]
)
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def show_inference(model, path_image):
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = np.array(Image.open(path_image))
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
return output_dict
def filter_vehicles(output_dict, width, height, threshold):
# x_min, y_min, x_max, y_max
boxes = list()
for detection_box, detection_class, detection_score in zip(output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores']):
if detection_score > threshold:
if detection_class in [3, 6, 8]:
boxes.append([detection_box[1] * width,
detection_box[0] * height,
detection_box[3] * width,
detection_box[2] * height])
boxes.sort(key=lambda y: y[0])
return boxes
def gap_detection(boxes, width, draw, car_color):
gaps = list()
# If there is no vehicle, there is a gap
if not boxes:
gaps.append((0, width))
else:
gaps = list() # x_min, x_max, long car1, long car2
if boxes[0][0] > 0:
gaps.append((0, boxes[0][0], boxes[0][2] - boxes[0][0]))
for x in range(0, len(boxes) - 1):
x_min, y_min, x_max, y_max = boxes[x]
draw.line(
[(x_min, y_min), (x_max, y_min), (x_max, y_max), (x_min, y_max), (x_min, y_min)],
fill=car_color,
width=5
)
if boxes[x][2] < boxes[x + 1][0]:
gaps.append(
[boxes[x][2], boxes[x + 1][0], boxes[x][2] - boxes[x][0], boxes[x + 1][2] - boxes[x + 1][0]]
)
x_min, y_min, x_max, y_max = boxes[len(boxes) - 1]
draw.line([(x_min, y_min), (x_max, y_min), (x_max, y_max), (x_min, y_max), (x_min, y_min)], fill=car_color,
width=5)
if boxes[len(boxes) - 1][2] < width:
gaps.append([boxes[len(boxes) - 1][2], width, boxes[len(boxes) - 1][2] - boxes[len(boxes) - 1][0]])
return gaps
def valid_gaps(gaps, height, width, draw, gap_color):
solution = list()
for gap in range(0, len(gaps)):
gap_size = gaps[gap][1] - gaps[gap][0]
if len(gaps[gap]) == 2:
draw.line([(0, height / 2), (width, height / 2)], fill=gap_color, width=5)
solution.append([gaps[gap][0], gaps[gap][1]])
elif len(gaps[gap]) == 3:
if gaps[gap][0] == 0:
if gap_size >= (gaps[gap][2] / 3):
solution.append([gaps[gap][0], gaps[gap][1]])
draw.line([(0, height / 2), (gaps[gap][1], height / 2)], fill=gap_color, width=5)
elif gaps[gap][1] == width:
if gap_size >= (5 / 3) * gaps[gap][2]:
solution.append([gaps[gap][0], gaps[gap][1]])
draw.line([(gaps[gap][0], height / 2), (width, height / 2)], fill=gap_color, width=5)
else:
if gap_size >= (gaps[gap][2] + gaps[gap][3]) / 6:
solution.append([gaps[gap][0], gaps[gap][1]])
draw.line([(gaps[gap][0], height / 2), (gaps[gap][1], height / 2)], fill=gap_color, width=5)
return solution
| [
"PIL.Image.open",
"pathlib.Path",
"object_detection.utils.ops.reframe_box_masks_to_image_masks",
"numpy.asarray",
"tensorflow.keras.utils.get_file",
"tensorflow.convert_to_tensor",
"tensorflow.cast"
] | [((499, 586), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', ([], {'fname': 'model_name', 'origin': '(base_url + model_file)', 'untar': '(True)'}), '(fname=model_name, origin=base_url + model_file,\n untar=True)\n', (522, 586), True, 'import tensorflow as tf\n'), ((801, 818), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (811, 818), True, 'import numpy as np\n'), ((917, 944), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (937, 944), True, 'import tensorflow as tf\n'), ((630, 653), 'pathlib.Path', 'pathlib.Path', (['model_dir'], {}), '(model_dir)\n', (642, 653), False, 'import pathlib\n'), ((1864, 2006), 'object_detection.utils.ops.reframe_box_masks_to_image_masks', 'utils_ops.reframe_box_masks_to_image_masks', (["output_dict['detection_masks']", "output_dict['detection_boxes']", 'image.shape[0]', 'image.shape[1]'], {}), "(output_dict['detection_masks'],\n output_dict['detection_boxes'], image.shape[0], image.shape[1])\n", (1906, 2006), True, 'from object_detection.utils import ops as utils_ops\n'), ((2060, 2109), 'tensorflow.cast', 'tf.cast', (['(detection_masks_reframed > 0.5)', 'tf.uint8'], {}), '(detection_masks_reframed > 0.5, tf.uint8)\n', (2067, 2109), True, 'import tensorflow as tf\n'), ((2423, 2445), 'PIL.Image.open', 'Image.open', (['path_image'], {}), '(path_image)\n', (2433, 2445), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""
This script contains the transformations between world and different sensors.
"""
# Credit to https://github.com/MukhlasAdib/CARLA-2DBBox/blob/master/carla_vehicle_annotator.py
# Author: <NAME> <<EMAIL>>
# License: MIT
import numpy as np
from matplotlib import cm
from opencda.opencda_carla import Transform
VIRIDIS = np.array(cm.get_cmap('viridis').colors)
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])
def get_camera_intrinsic(sensor):
"""
Retrieve the camera intrinsic matrix
Args:
-sensor (carla.sensor.camera.rgb): The CARLA sensor object.
Returns:
-matrix_k (np.ndarray): The 2D intrinsic matrix.
"""
VIEW_WIDTH = int(sensor.attributes['image_size_x'])
VIEW_HEIGHT = int(sensor.attributes['image_size_y'])
VIEW_FOV = int(float(sensor.attributes['fov']))
matrix_k = np.identity(3)
matrix_k[0, 2] = VIEW_WIDTH / 2.0
matrix_k[1, 2] = VIEW_HEIGHT / 2.0
matrix_k[0, 0] = matrix_k[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0))
return matrix_k
def create_bb_points(vehicle):
"""
Extract the eight vertices of the bounding box from the vehicle.
Args:
-vehicle (carla.Vehicle or ObstacleVehicle): The object vehicle.
Returns:
- bbx(np.ndarray): 3d bounding box.
"""
bbx = np.zeros((8, 4))
extent = vehicle.bounding_box.extent
bbx[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
bbx[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
bbx[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
bbx[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
bbx[4, :] = np.array([extent.x, extent.y, extent.z, 1])
bbx[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
bbx[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
bbx[7, :] = np.array([extent.x, -extent.y, extent.z, 1])
return bbx
def x_to_world_transformation(transform):
"""
Get the transformation matrix from x(it can be vehicle or sensor) coordinates to world coordinate.
Args:
-transform (carla.Transform): The transform that contains location and rotation.
Returns:
-matrix (np.ndarray): The transformation matrix
"""
rotation = transform.rotation
location = transform.location
# used for rotation matrix
c_y = np.cos(np.radians(rotation.yaw))
s_y = np.sin(np.radians(rotation.yaw))
c_r = np.cos(np.radians(rotation.roll))
s_r = np.sin(np.radians(rotation.roll))
c_p = np.cos(np.radians(rotation.pitch))
s_p = np.sin(np.radians(rotation.pitch))
matrix = np.identity(4)
# translation matrix
matrix[0, 3] = location.x
matrix[1, 3] = location.y
matrix[2, 3] = location.z
# rotation matrix
matrix[0, 0] = c_p * c_y
matrix[0, 1] = c_y * s_p * s_r - s_y * c_r
matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r
matrix[1, 0] = s_y * c_p
matrix[1, 1] = s_y * s_p * s_r + c_y * c_r
matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r
matrix[2, 0] = s_p
matrix[2, 1] = -c_p * s_r
matrix[2, 2] = c_p * c_r
return matrix
def bbx_to_world(cords, vehicle):
"""
Convert bounding box coordinate at vehicle reference to world reference.
Args:
-cords (np.ndarray): Bounding box coordinates with 8 vertices, shape (n, 4).
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
Returns:
-bb_world_cords (np.ndarray): Bounding box coordinates under word reference.
"""
bb_transform = Transform(vehicle.bounding_box.location)
# bounding box to vehicle transformation matrix
bb_vehicle_matrix = x_to_world_transformation(bb_transform)
# vehicle to world transformation matrix
vehicle_world_matrix = x_to_world_transformation(vehicle.get_transform())
# bounding box to world transformation matrix
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
# 8 vertices are relative to bbx center, thus multiply with bbx_2_world to get the world coords.
bb_world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return bb_world_cords
def world_to_sensor(cords, sensor_transform):
"""
Transform coordinate from world reference to sensor reference.
Args:
-cords (np.ndarray): Coordinates under world reference, shape:(4, n).
-sensor_transform (carla.Transform): sensor position in the world, shape:(3, 1).
Returns:
-sensor_cords(np.ndarray): Coordinates in sensor reference.
"""
sensor_world_matrix = x_to_world_transformation(sensor_transform)
world_sensor_matrix = np.linalg.inv(sensor_world_matrix)
sensor_cords = np.dot(world_sensor_matrix, cords)
return sensor_cords
def sensor_to_world(cords, sensor_transform):
"""
Project
Args:
-cords (np.ndarray): Coordinates under sensor reference.
-sensor_transform (carla.Transform): sensor position in the world
Returns:
-world_cords (np.ndarray): Coordinates projected to world space.
"""
sensor_world_matrix = x_to_world_transformation(sensor_transform)
world_cords = np.dot(sensor_world_matrix, cords)
return world_cords
def vehicle_to_sensor(cords, vehicle, sensor_transform):
"""
Transform coordinates from vehicle reference to sensor reference
Args:
-cords (np.ndarray): Coordinates under vehicle reference, shape (n, 4).
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
-sensor_transform (carla.Transform): sensor position in the world, shape(3, 1).
Returns:
-(np.ndarray): Coordinates in sensor reference, shape(4, n).
"""
world_cord = bbx_to_world(cords, vehicle)
sensor_cord = world_to_sensor(world_cord, sensor_transform)
return sensor_cord
def get_bounding_box(vehicle, camera, sensor_transform):
"""
Get vehicle bounding box and project to sensor image.
Args:
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
-camera (carla.sensor.camera.rgb): The CARLA sensor object.
-sensor_transform (carla.Transform): sensor position in the world
Returns:
-camera_bbox (np.ndarray): Bounding box coordinates in sensor image.
"""
camera_k_matrix = get_camera_intrinsic(camera)
# bb_cords is relative to bbx center(approximate the vehicle center)
bb_cords = create_bb_points(vehicle)
# bbx coordinates in sensor coordinate system. shape: (3, 8)
cords_x_y_z = vehicle_to_sensor(bb_cords, vehicle, sensor_transform)[:3, :]
# refer to https://github.com/carla-simulator/carla/issues/553
cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :].reshape(1, 8),
-cords_x_y_z[2, :].reshape(1, 8),
cords_x_y_z[0, :].reshape(1, 8)])
# bounding box in sensor image. Shape:(8, 3)
bbox = np.transpose(np.dot(camera_k_matrix, cords_y_minus_z_x))
new_x = (bbox[:, 0] / bbox[:, 2]).reshape(8, 1)
new_y = (bbox[:, 1] / bbox[:, 2]).reshape(8, 1)
new_z = bbox[:, 2].reshape(8, 1)
camera_bbox = np.concatenate([new_x, new_y, new_z], axis=1)
return camera_bbox
def p3d_to_p2d_bb(p3d_bb):
"""
Draw 2D bounding box (4 vertices) from 3D bounding box (8 vertices) in image.
2D bounding box is represented by two corner points
Args:
-p3d_bb (np.array): The objective 3D bounding box.
Returns:
"""
min_x = np.amin(p3d_bb[:, 0])
min_y = np.amin(p3d_bb[:, 1])
max_x = np.amax(p3d_bb[:, 0])
max_y = np.amax(p3d_bb[:, 1])
p2d_bb = np.array([[min_x, min_y], [max_x, max_y]])
return p2d_bb
def get_2d_bb(vehicle, sensor, senosr_transform):
"""
Summarize 2D bounding box creation
Args:
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
-sensor (carla.sensor.camera.rgb): The CARLA sensor object.
-senosr_transform (carla.Transform): sensor position in the world
Returns:
-p2d_bb (np.ndarray): 2d bounding box in camera image.
"""
p3d_bb = get_bounding_box(vehicle, sensor, senosr_transform)
p2d_bb = p3d_to_p2d_bb(p3d_bb)
return p2d_bb
def project_lidar_to_camera(lidar, camera, point_cloud, rgb_image):
"""
Project lidar to camera space.
Args:
-lidar (carla.Sensor): Lidar sensor.
-camera (carla.Sensor): Camera seonsor.
-point_cloud (np.ndarray): cloud points, (x, y, z, intensity).
-rgb_image (np.ndarray): rgb image from camera.
Returns:
-rgb_image (np.ndarray): new rgb image with lidar points projected.
-points_2d (np.ndarray): point clouds projected to camera space.
"""
# Lidar intensity array of shape (p_cloud_size,) but, for now, let's
# focus on the 3D points.
intensity = np.array(point_cloud[:, 3])
# Point cloud in lidar sensor space array of shape (3, p_cloud_size).
local_lidar_points = np.array(point_cloud[:, :3]).T
# Add an extra 1.0 at the end of each 3d point so it becomes of
# shape (4, p_cloud_size) and it can be multiplied by a (4, 4) matrix.
local_lidar_points = np.r_[
local_lidar_points, [np.ones(local_lidar_points.shape[1])]]
# This (4, 4) matrix transforms the points from lidar space to world space.
lidar_2_world = x_to_world_transformation(lidar.get_transform())
# transform lidar points from lidar space to world space
world_points = np.dot(lidar_2_world, local_lidar_points)
# project world points to camera space
sensor_points = world_to_sensor(world_points, camera.get_transform())
# (x, y ,z) -> (y, -z, x)
point_in_camera_coords = np.array([
sensor_points[1],
sensor_points[2] * -1,
sensor_points[0]])
# retrieve camera intrinsic
K = get_camera_intrinsic(camera)
# project the 3d points in camera space to image space
points_2d = np.dot(K, point_in_camera_coords)
# normalize x,y,z
points_2d = np.array([
points_2d[0, :] / points_2d[2, :],
points_2d[1, :] / points_2d[2, :],
points_2d[2, :]])
image_w = int(camera.attributes['image_size_x'])
image_h = int(camera.attributes['image_size_y'])
# remove points out the camera scope
points_2d = points_2d.T
intensity = intensity.T
points_in_canvas_mask = \
(points_2d[:, 0] > 0.0) & (points_2d[:, 0] < image_w) & \
(points_2d[:, 1] > 0.0) & (points_2d[:, 1] < image_h) & \
(points_2d[:, 2] > 0.0)
new_points_2d = points_2d[points_in_canvas_mask]
new_intensity = intensity[points_in_canvas_mask]
# Extract the screen coords (uv) as integers.
u_coord = new_points_2d[:, 0].astype(np.int)
v_coord = new_points_2d[:, 1].astype(np.int)
# Since at the time of the creation of this script, the intensity function
# is returning high values, these are adjusted to be nicely visualized.
new_intensity = 4 * new_intensity - 3
color_map = np.array([
np.interp(new_intensity, VID_RANGE, VIRIDIS[:, 0]) * 255.0,
np.interp(new_intensity, VID_RANGE, VIRIDIS[:, 1]) * 255.0,
np.interp(new_intensity, VID_RANGE, VIRIDIS[:, 2]) * 255.0]).astype(np.int).T
for i in range(len(new_points_2d)):
rgb_image[v_coord[i] - 1: v_coord[i] + 1, u_coord[i] - 1: u_coord[i] + 1] = color_map[i]
return rgb_image, points_2d | [
"numpy.identity",
"numpy.radians",
"numpy.amin",
"numpy.tan",
"numpy.ones",
"opencda.opencda_carla.Transform",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.interp",
"numpy.transpose",
"numpy.amax",
"matplotlib.cm.get_cmap"
] | [((401, 440), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'VIRIDIS.shape[0]'], {}), '(0.0, 1.0, VIRIDIS.shape[0])\n', (412, 440), True, 'import numpy as np\n'), ((865, 879), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (876, 879), True, 'import numpy as np\n'), ((1340, 1356), 'numpy.zeros', 'np.zeros', (['(8, 4)'], {}), '((8, 4))\n', (1348, 1356), True, 'import numpy as np\n'), ((1415, 1459), 'numpy.array', 'np.array', (['[extent.x, extent.y, -extent.z, 1]'], {}), '([extent.x, extent.y, -extent.z, 1])\n', (1423, 1459), True, 'import numpy as np\n'), ((1476, 1521), 'numpy.array', 'np.array', (['[-extent.x, extent.y, -extent.z, 1]'], {}), '([-extent.x, extent.y, -extent.z, 1])\n', (1484, 1521), True, 'import numpy as np\n'), ((1538, 1584), 'numpy.array', 'np.array', (['[-extent.x, -extent.y, -extent.z, 1]'], {}), '([-extent.x, -extent.y, -extent.z, 1])\n', (1546, 1584), True, 'import numpy as np\n'), ((1601, 1646), 'numpy.array', 'np.array', (['[extent.x, -extent.y, -extent.z, 1]'], {}), '([extent.x, -extent.y, -extent.z, 1])\n', (1609, 1646), True, 'import numpy as np\n'), ((1663, 1706), 'numpy.array', 'np.array', (['[extent.x, extent.y, extent.z, 1]'], {}), '([extent.x, extent.y, extent.z, 1])\n', (1671, 1706), True, 'import numpy as np\n'), ((1723, 1767), 'numpy.array', 'np.array', (['[-extent.x, extent.y, extent.z, 1]'], {}), '([-extent.x, extent.y, extent.z, 1])\n', (1731, 1767), True, 'import numpy as np\n'), ((1784, 1829), 'numpy.array', 'np.array', (['[-extent.x, -extent.y, extent.z, 1]'], {}), '([-extent.x, -extent.y, extent.z, 1])\n', (1792, 1829), True, 'import numpy as np\n'), ((1846, 1890), 'numpy.array', 'np.array', (['[extent.x, -extent.y, extent.z, 1]'], {}), '([extent.x, -extent.y, extent.z, 1])\n', (1854, 1890), True, 'import numpy as np\n'), ((2617, 2631), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2628, 2631), True, 'import numpy as np\n'), ((3531, 3571), 'opencda.opencda_carla.Transform', 'Transform', (['vehicle.bounding_box.location'], {}), '(vehicle.bounding_box.location)\n', (3540, 3571), False, 'from opencda.opencda_carla import Transform\n'), ((3885, 3932), 'numpy.dot', 'np.dot', (['vehicle_world_matrix', 'bb_vehicle_matrix'], {}), '(vehicle_world_matrix, bb_vehicle_matrix)\n', (3891, 3932), True, 'import numpy as np\n'), ((4614, 4648), 'numpy.linalg.inv', 'np.linalg.inv', (['sensor_world_matrix'], {}), '(sensor_world_matrix)\n', (4627, 4648), True, 'import numpy as np\n'), ((4668, 4702), 'numpy.dot', 'np.dot', (['world_sensor_matrix', 'cords'], {}), '(world_sensor_matrix, cords)\n', (4674, 4702), True, 'import numpy as np\n'), ((5127, 5161), 'numpy.dot', 'np.dot', (['sensor_world_matrix', 'cords'], {}), '(sensor_world_matrix, cords)\n', (5133, 5161), True, 'import numpy as np\n'), ((7115, 7160), 'numpy.concatenate', 'np.concatenate', (['[new_x, new_y, new_z]'], {'axis': '(1)'}), '([new_x, new_y, new_z], axis=1)\n', (7129, 7160), True, 'import numpy as np\n'), ((7465, 7486), 'numpy.amin', 'np.amin', (['p3d_bb[:, 0]'], {}), '(p3d_bb[:, 0])\n', (7472, 7486), True, 'import numpy as np\n'), ((7499, 7520), 'numpy.amin', 'np.amin', (['p3d_bb[:, 1]'], {}), '(p3d_bb[:, 1])\n', (7506, 7520), True, 'import numpy as np\n'), ((7533, 7554), 'numpy.amax', 'np.amax', (['p3d_bb[:, 0]'], {}), '(p3d_bb[:, 0])\n', (7540, 7554), True, 'import numpy as np\n'), ((7567, 7588), 'numpy.amax', 'np.amax', (['p3d_bb[:, 1]'], {}), '(p3d_bb[:, 1])\n', (7574, 7588), True, 'import numpy as np\n'), ((7602, 7644), 'numpy.array', 'np.array', (['[[min_x, min_y], [max_x, max_y]]'], {}), '([[min_x, min_y], [max_x, max_y]])\n', (7610, 7644), True, 'import numpy as np\n'), ((8821, 8848), 'numpy.array', 'np.array', (['point_cloud[:, 3]'], {}), '(point_cloud[:, 3])\n', (8829, 8848), True, 'import numpy as np\n'), ((9455, 9496), 'numpy.dot', 'np.dot', (['lidar_2_world', 'local_lidar_points'], {}), '(lidar_2_world, local_lidar_points)\n', (9461, 9496), True, 'import numpy as np\n'), ((9675, 9744), 'numpy.array', 'np.array', (['[sensor_points[1], sensor_points[2] * -1, sensor_points[0]]'], {}), '([sensor_points[1], sensor_points[2] * -1, sensor_points[0]])\n', (9683, 9744), True, 'import numpy as np\n'), ((9915, 9948), 'numpy.dot', 'np.dot', (['K', 'point_in_camera_coords'], {}), '(K, point_in_camera_coords)\n', (9921, 9948), True, 'import numpy as np\n'), ((9988, 10089), 'numpy.array', 'np.array', (['[points_2d[0, :] / points_2d[2, :], points_2d[1, :] / points_2d[2, :],\n points_2d[2, :]]'], {}), '([points_2d[0, :] / points_2d[2, :], points_2d[1, :] / points_2d[2,\n :], points_2d[2, :]])\n', (9996, 10089), True, 'import numpy as np\n'), ((358, 380), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (369, 380), False, 'from matplotlib import cm\n'), ((2356, 2380), 'numpy.radians', 'np.radians', (['rotation.yaw'], {}), '(rotation.yaw)\n', (2366, 2380), True, 'import numpy as np\n'), ((2399, 2423), 'numpy.radians', 'np.radians', (['rotation.yaw'], {}), '(rotation.yaw)\n', (2409, 2423), True, 'import numpy as np\n'), ((2442, 2467), 'numpy.radians', 'np.radians', (['rotation.roll'], {}), '(rotation.roll)\n', (2452, 2467), True, 'import numpy as np\n'), ((2486, 2511), 'numpy.radians', 'np.radians', (['rotation.roll'], {}), '(rotation.roll)\n', (2496, 2511), True, 'import numpy as np\n'), ((2530, 2556), 'numpy.radians', 'np.radians', (['rotation.pitch'], {}), '(rotation.pitch)\n', (2540, 2556), True, 'import numpy as np\n'), ((2575, 2601), 'numpy.radians', 'np.radians', (['rotation.pitch'], {}), '(rotation.pitch)\n', (2585, 2601), True, 'import numpy as np\n'), ((4080, 4099), 'numpy.transpose', 'np.transpose', (['cords'], {}), '(cords)\n', (4092, 4099), True, 'import numpy as np\n'), ((6911, 6953), 'numpy.dot', 'np.dot', (['camera_k_matrix', 'cords_y_minus_z_x'], {}), '(camera_k_matrix, cords_y_minus_z_x)\n', (6917, 6953), True, 'import numpy as np\n'), ((8949, 8977), 'numpy.array', 'np.array', (['point_cloud[:, :3]'], {}), '(point_cloud[:, :3])\n', (8957, 8977), True, 'import numpy as np\n'), ((1015, 1047), 'numpy.tan', 'np.tan', (['(VIEW_FOV * np.pi / 360.0)'], {}), '(VIEW_FOV * np.pi / 360.0)\n', (1021, 1047), True, 'import numpy as np\n'), ((9185, 9221), 'numpy.ones', 'np.ones', (['local_lidar_points.shape[1]'], {}), '(local_lidar_points.shape[1])\n', (9192, 9221), True, 'import numpy as np\n'), ((10998, 11048), 'numpy.interp', 'np.interp', (['new_intensity', 'VID_RANGE', 'VIRIDIS[:, 0]'], {}), '(new_intensity, VID_RANGE, VIRIDIS[:, 0])\n', (11007, 11048), True, 'import numpy as np\n'), ((11066, 11116), 'numpy.interp', 'np.interp', (['new_intensity', 'VID_RANGE', 'VIRIDIS[:, 1]'], {}), '(new_intensity, VID_RANGE, VIRIDIS[:, 1])\n', (11075, 11116), True, 'import numpy as np\n'), ((11134, 11184), 'numpy.interp', 'np.interp', (['new_intensity', 'VID_RANGE', 'VIRIDIS[:, 2]'], {}), '(new_intensity, VID_RANGE, VIRIDIS[:, 2])\n', (11143, 11184), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
import unittest
from pint.util import string_preprocessor
class TestStringProcessor(unittest.TestCase):
def _test(self, bef, aft):
for pattern in ('{}', '+{}+'):
b = pattern.format(bef)
a = pattern.format(aft)
self.assertEqual(string_preprocessor(b), a)
def test_rules(self):
self._test('bcd^3', 'bcd**3')
self._test('bcd squared', 'bcd**2')
self._test('bcd cubed', 'bcd**3')
self._test('sq bcd', 'bcd**2')
self._test('square bcd', 'bcd**2')
self._test('cubic bcd', 'bcd**3')
self._test('bcd efg', 'bcd*efg')
self._test('bcd efg', 'bcd*efg')
self._test('miles per hour', 'miles/hour')
self._test('1,234,567', '1234567')
self._test('1hour', '1*hour')
self._test('1.1hour', '1.1*hour')
self._test('1e-24', '1e-24')
self._test('1e+24', '1e+24')
self._test('1e24', '1e24')
self._test('1E-24', '1E-24')
self._test('1E+24', '1E+24')
self._test('1E24', '1E24')
self._test('g_0', 'g_0')
self._test('1g_0', '1*g_0')
self._test('g0', 'g0')
self._test('1g0', '1*g0')
self._test('g', 'g')
self._test('1g', '1*g')
| [
"pint.util.string_preprocessor"
] | [((388, 410), 'pint.util.string_preprocessor', 'string_preprocessor', (['b'], {}), '(b)\n', (407, 410), False, 'from pint.util import string_preprocessor\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-05-18 13:36:33
# @Author : guangqiang_xu (<EMAIL>)
# @Version : $Id$
from __future__ import unicode_literals
import youtube_dl
import json
import requests
from lxml import etree
import os
from selenium import webdriver
from lxml import etree
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
sess = requests.session()
def _get_youtube_video_info(url):
ydl_opts = {
'quiet': True,
'skip_download': True,
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
return ydl.extract_info(url)
def get_youtube_video_data(urls, keyword):
for url in urls:
item = {}
if "watch?" not in url:
continue
video_url = 'https://www.youtube.com' + url
data = _get_youtube_video_info(video_url)
stringdate = data['upload_date']
timeArray = time.strptime(stringdate, "%Y%m%d")
timestamp = int(time.mktime(timeArray))
if timestamp < 1506787200 or timestamp > 1520438400:
continue
# browser = webdriver.PhantomJS(desired_capabilities=cap)
browser = webdriver.Firefox()
try:
browser.get(video_url)
except:
continue
time.sleep(10)
start = int(time.time())
length=100
for i in range(0,30):
js="var q=document.documentElement.scrollTop="+str(length)
browser.execute_script(js)
print(js)
time.sleep(2)
length += 1800
txt = browser.page_source
html = etree.HTML(txt)
stringdate = data['upload_date']
timeArray = time.strptime(stringdate, "%Y%m%d")
timestamp = int(time.mktime(timeArray))
strDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
divs = html.xpath('//div[@id="contents"]/ytd-comment-thread-renderer')
comments = []
for div in divs:
try:
info = {}
info['user_name'] = div.xpath('.//a[@id="author-text"]/span/text()')[0].strip()
info['content'] = div.xpath('.//div[@id="content"]/yt-formatted-string[@id="content-text"]/text()')[0].strip()
strdate = div.xpath('.//yt-formatted-string[@id="published-time-text"]/a/text()')[0].strip()
if '个月前' in strdate:
num = int(strdate.replace(' 个月前', ''))
date = num * 30
timea = time.time()
info['timestamp'] = timea - date*86400
info['times'] = stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(info['timestamp']))
elif '周前' in strdate:
num = int(strdate.replace('周前', ''))
date = num * 7
timea = int(time.time())
info['time'] = stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timea - date*86400))
elif '天前' in strdate:
num = int(strdate.replace('周前', ''))
date = num * 86400
info['time'] = stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timea - date))
elif '小时前' in strdate:
num = int(strdate.replace('小时前', ''))
date = num * 3600
info['time'] = stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timea - date))
elif '分钟前' in strdate:
num = int(strdate.replace('分钟前', ''))
date = num * 60
info['time'] = stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timea - date))
comments.append(info)
except:
continue
item['title'] = data['title']
try:
item['summary'] = data['description']
except:
item['summary'] = ''
item['view_count'] = data['view_count']
item['like_count'] = data['like_count']
item['dislike_count'] = data['dislike_count']
item['time'] = strDate
item['timestamp'] = timestamp
item['comments'] = comments
item['id'] = data['id']
with open(keyword + '.json', 'a') as f:
f.write(json.dumps(item)+'\n')
browser.close()
def get_url(keyword):
page = 1
while 1:
url = "https://www.youtube.com/results?search_query={}&sp=CAI%253D&page={}".format(keyword, page)
response = sess.get(url,headers=headers)
txt = response.text
html = etree.HTML(txt)
urls = html.xpath('//h3/a/@href')
if len(urls) <= 0:
break
get_youtube_video_data(urls, keyword)
if __name__ == '__main__':
keywords = ["Влади́<NAME>", "Ксе́ния Анато́льевна Собча́к", "<NAME>", "<NAME>́вский", "Ал<NAME>"]
for i in keywords:
get_url(i)
| [
"time.strptime",
"requests.session",
"sys.setdefaultencoding",
"time.mktime",
"selenium.webdriver.Firefox",
"json.dumps",
"time.sleep",
"youtube_dl.YoutubeDL",
"lxml.etree.HTML",
"time.localtime",
"time.time"
] | [((345, 376), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (367, 376), False, 'import sys\n'), ((524, 542), 'requests.session', 'requests.session', ([], {}), '()\n', (540, 542), False, 'import requests\n'), ((665, 695), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (['ydl_opts'], {}), '(ydl_opts)\n', (685, 695), False, 'import youtube_dl\n'), ((1041, 1076), 'time.strptime', 'time.strptime', (['stringdate', '"""%Y%m%d"""'], {}), "(stringdate, '%Y%m%d')\n", (1054, 1076), False, 'import time\n'), ((1291, 1310), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (1308, 1310), False, 'from selenium import webdriver\n'), ((1404, 1418), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1414, 1418), False, 'import time\n'), ((1736, 1751), 'lxml.etree.HTML', 'etree.HTML', (['txt'], {}), '(txt)\n', (1746, 1751), False, 'from lxml import etree\n'), ((1813, 1848), 'time.strptime', 'time.strptime', (['stringdate', '"""%Y%m%d"""'], {}), "(stringdate, '%Y%m%d')\n", (1826, 1848), False, 'import time\n'), ((4755, 4770), 'lxml.etree.HTML', 'etree.HTML', (['txt'], {}), '(txt)\n', (4765, 4770), False, 'from lxml import etree\n'), ((1101, 1123), 'time.mktime', 'time.mktime', (['timeArray'], {}), '(timeArray)\n', (1112, 1123), False, 'import time\n'), ((1439, 1450), 'time.time', 'time.time', ([], {}), '()\n', (1448, 1450), False, 'import time\n'), ((1645, 1658), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1655, 1658), False, 'import time\n'), ((1873, 1895), 'time.mktime', 'time.mktime', (['timeArray'], {}), '(timeArray)\n', (1884, 1895), False, 'import time\n'), ((1951, 1976), 'time.localtime', 'time.localtime', (['timestamp'], {}), '(timestamp)\n', (1965, 1976), False, 'import time\n'), ((2646, 2657), 'time.time', 'time.time', ([], {}), '()\n', (2655, 2657), False, 'import time\n'), ((4462, 4478), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (4472, 4478), False, 'import json\n'), ((2801, 2834), 'time.localtime', 'time.localtime', (["info['timestamp']"], {}), "(info['timestamp'])\n", (2815, 2834), False, 'import time\n'), ((2998, 3009), 'time.time', 'time.time', ([], {}), '()\n', (3007, 3009), False, 'import time\n'), ((3094, 3130), 'time.localtime', 'time.localtime', (['(timea - date * 86400)'], {}), '(timea - date * 86400)\n', (3108, 3130), False, 'import time\n'), ((3347, 3375), 'time.localtime', 'time.localtime', (['(timea - date)'], {}), '(timea - date)\n', (3361, 3375), False, 'import time\n'), ((3595, 3623), 'time.localtime', 'time.localtime', (['(timea - date)'], {}), '(timea - date)\n', (3609, 3623), False, 'import time\n'), ((3841, 3869), 'time.localtime', 'time.localtime', (['(timea - date)'], {}), '(timea - date)\n', (3855, 3869), False, 'import time\n')] |
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from numpy import array
from concurrent.futures import ThreadPoolExecutor, wait
import fruitvectors
def bar_chart(praw_client, start_limit=0, end_limit=-1, recent=True):
fruit_vectors = fruitvectors.get_fruit_vectors(praw_client, recent, start_limit, end_limit)
scores, image_urls = fruitvectors.get_scores_and_urls(fruit_vectors)
_save_bar_chart(scores, image_urls)
def _save_bar_chart(scores: list, urls: list):
score_nums = []
for score_num in range(1, len(scores)+1):
score_nums.append(str(score_num))
vector_figure = plt.figure(figsize=[19.2, 10.8])
axes = plt.axes()
axes.set_title("Fruit Vectors")
bars = plt.bar(score_nums, scores)
axes.bar = bars
vector_figure.add_axes(axes)
vector_figure.savefig("./bar.png")
| [
"fruitvectors.get_fruit_vectors",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.axes",
"fruitvectors.get_scores_and_urls"
] | [((285, 360), 'fruitvectors.get_fruit_vectors', 'fruitvectors.get_fruit_vectors', (['praw_client', 'recent', 'start_limit', 'end_limit'], {}), '(praw_client, recent, start_limit, end_limit)\n', (315, 360), False, 'import fruitvectors\n'), ((386, 433), 'fruitvectors.get_scores_and_urls', 'fruitvectors.get_scores_and_urls', (['fruit_vectors'], {}), '(fruit_vectors)\n', (418, 433), False, 'import fruitvectors\n'), ((651, 683), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[19.2, 10.8]'}), '(figsize=[19.2, 10.8])\n', (661, 683), True, 'import matplotlib.pyplot as plt\n'), ((695, 705), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((753, 780), 'matplotlib.pyplot.bar', 'plt.bar', (['score_nums', 'scores'], {}), '(score_nums, scores)\n', (760, 780), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import context
from TheNounProjectAPI.api import API
from TheNounProjectAPI.exceptions import IncorrectType
class CollectionCustomURLs(unittest.TestCase):
def setUp(self):
key = "mock api key to satisfy type check in api._get_oauth()"
secret = "mock secret key to satisfy type check in api._get_oauth()"
self.api = API(key, secret, testing=True)
def _test_get_collection(self, identifier):
"""
Helper function to call api's get_collection in such a way that we only get the URL
and don't actually make the request.
"""
return self.api.get_collection(identifier).url
def test_get_collection_int(self):
"""
Test URL for get_collection with identifier 12
"""
identifier = 12
expected = "http://api.thenounproject.com/collection/12"
result = self._test_get_collection(identifier)
self.assertEqual(expected, result)
def test_get_collection_str(self):
"""
Test URL for get_collection with identifier "goat"
"""
identifier = "goat"
expected = "http://api.thenounproject.com/collection/goat"
result = self._test_get_collection(identifier)
self.assertEqual(expected, result)
def test_get_collection_none(self):
"""
Test URL for get_collection with illegal identifier None
"""
identifier = None
with self.assertRaises(IncorrectType):
self._test_get_collection(identifier)
def test_get_collection_bytes(self):
"""
Test URL for get_collection with identifier b"goat"
"""
identifier = b"goat"
with self.assertRaises(IncorrectType):
self._test_get_collection(identifier)
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"TheNounProjectAPI.api.API"
] | [((1823, 1838), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1836, 1838), False, 'import unittest\n'), ((364, 394), 'TheNounProjectAPI.api.API', 'API', (['key', 'secret'], {'testing': '(True)'}), '(key, secret, testing=True)\n', (367, 394), False, 'from TheNounProjectAPI.api import API\n')] |
import librosa
def to_fast_fourier(initial_melody):
n = len(initial_melody)
n_fft = 2048
padded_melody = librosa.util.fix_length(initial_melody, n + n_fft // 2)
transformed_melody = librosa.stft(padded_melody, n_fft=n_fft)
return transformed_melody
def from_fast_fourier(fourier, n):
initial_melody = librosa.istft(fourier, length=n)
return initial_melody
| [
"librosa.stft",
"librosa.util.fix_length",
"librosa.istft"
] | [((110, 165), 'librosa.util.fix_length', 'librosa.util.fix_length', (['initial_melody', '(n + n_fft // 2)'], {}), '(initial_melody, n + n_fft // 2)\n', (133, 165), False, 'import librosa\n'), ((188, 228), 'librosa.stft', 'librosa.stft', (['padded_melody'], {'n_fft': 'n_fft'}), '(padded_melody, n_fft=n_fft)\n', (200, 228), False, 'import librosa\n'), ((312, 344), 'librosa.istft', 'librosa.istft', (['fourier'], {'length': 'n'}), '(fourier, length=n)\n', (325, 344), False, 'import librosa\n')] |
import torch
from ..utility.linspace import *
from ..math.constant import *
from ..math.cos import *
from ..math.sin import *
def equilateral_polygon(n, device='cuda:0'):
"""
Creates the vertices of an equilateral n-gon
Parameters
----------
n : int
the number of vertices of the n-gon
device : str or torch.device (optional)
the device the tensors will be stored to (default is 'cuda:0')
Returns
-------
Tensor
the vertices of the n-gon
"""
t = linspace(0, PI2, n+1, device=device)[:-1]
return torch.cat((cos(t), sin(t), torch.zeros_like(t, device=device)), dim=1)
| [
"torch.zeros_like"
] | [((618, 652), 'torch.zeros_like', 'torch.zeros_like', (['t'], {'device': 'device'}), '(t, device=device)\n', (634, 652), False, 'import torch\n')] |
import pytest
from jubox import CodeCell, MarkdownCell, RawCell, JupyterCell
from nbformat.notebooknode import NotebookNode
from nbformat.v4 import new_code_cell, new_markdown_cell, new_raw_cell
test_classes = [(CodeCell,), (MarkdownCell,), (RawCell,)]
def get_test_id(tpl):
return tpl[0].__name__
@pytest.mark.parametrize("cls", test_classes, ids=get_test_id)
def test_initiation_from_empty(cls):
cls = cls[0]
cell = cls()
assert isinstance(cell._node, NotebookNode)
assert cell._node.source == ""
assert cell._node.cell_type == cls.cell_type
@pytest.mark.parametrize("cls", test_classes, ids=get_test_id)
def test_initiation_from_string(cls):
cls = cls[0]
cell = cls("This is a test cell")
assert isinstance(cell._node, NotebookNode)
assert cell._node.source == "This is a test cell"
assert cell._node.cell_type == cls.cell_type
@pytest.mark.parametrize("cls", test_classes, ids=get_test_id)
def test_initiation_from_string_with_tags(cls):
cls = cls[0]
cell = cls("This is a test cell", tags=["tagged"])
assert isinstance(cell._node, NotebookNode)
assert cell._node.source == "This is a test cell"
assert cell._node.cell_type == cls.cell_type
assert cell._node.metadata.tags == ["tagged"]
| [
"pytest.mark.parametrize"
] | [((309, 370), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', 'test_classes'], {'ids': 'get_test_id'}), "('cls', test_classes, ids=get_test_id)\n", (332, 370), False, 'import pytest\n'), ((577, 638), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', 'test_classes'], {'ids': 'get_test_id'}), "('cls', test_classes, ids=get_test_id)\n", (600, 638), False, 'import pytest\n'), ((886, 947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', 'test_classes'], {'ids': 'get_test_id'}), "('cls', test_classes, ids=get_test_id)\n", (909, 947), False, 'import pytest\n')] |
import random
arr_ml_topics = [
"Bagging",
"Bias Variance Tradeoff",
"Boosting",
"Bootstrap Resampling",
"Curse of Dimensionality",
"Cross Validation Strategies",
"Decision Trees",
"Eigenvalues and Eigenvectors",
"Gaussian/Normal Distribution",
"Gradient Descent",
"Hyperparameter Optimization",
"Hypothesis Testing",
"kNN",
"Linear Model Selection - Best Subset, Stepwise Selection",
"Linear Model - Dimensionality Reduction Methods",
"Logistic Regression",
"Loss, Cost and Object Function - Regression, Classification",
"MLE - Maximum Likelihood Estimator",
"Normal Distribution - Univariate & Multivariate",
"Parametric vs Non-parametric Models",
"Principal Component Analysis (PCA)",
"Random Forest",
"Simple Linear Regression",
"Support Vector Machines"
]
print(random.choices(arr_ml_topics, k=3))
| [
"random.choices"
] | [((865, 899), 'random.choices', 'random.choices', (['arr_ml_topics'], {'k': '(3)'}), '(arr_ml_topics, k=3)\n', (879, 899), False, 'import random\n')] |
"""
Definitions of the tier 2 pets: Crab, Dodo, Elephant, Flamingo, Hedgehog,
Peacock, Rat, Shrimp, Spider, and Swan.
"""
__all__ = ['Crab', 'Dodo', 'Elephant', 'Flamingo', 'Hedgehog', 'Peacock',
'Rat', 'Shrimp', 'Spider', 'Swan']
# Local application imports
import math
# Local application imports
from gym_snape.game import pets
from gym_snape.game.pets import Pet
from gym_snape.game.pets import tokens
from gym_snape.game.pets.pet import capture_action, duplicate_action
# Third party imports
import numpy as np
class Crab(Pet):
def __init__(self):
super().__init__()
self._name = 'CRAB'
self.attack = 3
self.health = 3
def on_buy(self):
"""Copy the health of the healthiest friend."""
super().on_buy()
health = max([f.health for f in self._friends if f])
self.health = health
class Dodo(Pet):
def __init__(self):
super().__init__()
self._name = 'DODO'
self.attack = 2
self.health = 3
@capture_action
def on_battle_start(self):
"""Give attack to friend ahead, scaling with level."""
i = self._friends.index(self) - 1
ability_modifier = self.level / 2
bonus_attack = math.floor(self.attack * ability_modifier)
while i > 0:
if self._friends[i]:
self._friends[i].attack += bonus_attack
break
i -= 1
super().on_battle_start()
class Elephant(Pet):
def __init__(self):
super().__init__()
self._name = 'ELEPHANT'
self.attack = 3
self.health = 5
@capture_action
def before_attack(self):
"""Deal 1 damage to 1/2/3 friends behind."""
super().before_attack()
i = self._friends.index(self) + 1
friends_hit = 0
while i < len(self._friends) and friends_hit < self.level:
if self._friends[i]:
self._friends[i].health -= 1
friends_hit += 1
else:
i += 1
class Flamingo(Pet):
def __init__(self):
super().__init__()
self._name = 'FLAMINGO'
self.attack = 3
self.health = 1
@capture_action
def on_faint(self):
"""Give the two friends behind +(1*level)/+(1*level)."""
i = self._friends.index(self) + 1
ability_casts = 0
while i < len(self._friends) and ability_casts < 2:
if self._friends[i]:
self._friends[i].attack += 1 * self.level
self._friends[i].health += 1 * self.level
ability_casts += 1
i += 1
super().on_faint()
class Hedgehog(Pet):
def __init__(self):
super().__init__()
self._name = 'HEDGEHOG'
self.attack = 3
self.health = 2
@capture_action
def on_faint(self):
"""Deal 2*level damage to all."""
super().on_faint()
dmg = 2 * self.level
for friend in self._friends:
if friend:
friend.health -= dmg
if self._enemies: # might not have enemies if fainted in shop
for enemy in self._enemies:
if enemy:
enemy.health -= dmg
class Peacock(Pet):
def __init__(self):
super().__init__()
self._name = 'PEACOCK'
self.attack = 1
self.health = 5
@capture_action
def on_hurt(self):
"""Gain 2*level attack."""
super().on_hurt()
self.attack += 2 * self.level
class Rat(Pet):
def __init__(self):
super().__init__()
self._name = 'RAT'
self.attack = 4
self.health = 5
@capture_action
def on_faint(self):
"""Summon a dirty rat on the enemy team."""
super().on_faint()
self._enemies.append(tokens.DirtyRat())
class Shrimp(Pet):
def __init__(self):
super().__init__()
self._name = 'SHRIMP'
self.attack = 2
self.health = 3
def on_friend_sold(self):
"""Give a random friend +(1*level) health."""
super().on_friend_sold()
choices = []
for friend in self._friends:
if friend and id(friend) != id(self):
choices.append(friend)
n_chosen = min(len(choices), 1)
if n_chosen == 1:
chosen = np.random.choice(choices, n_chosen, replace=False)
for c in chosen:
c.health += 1 * self.level
class Spider(Pet):
def __init__(self):
super().__init__()
self._name = 'SPIDER'
self.attack = 2
self.health = 2
@capture_action
def on_faint(self):
"""Summon a level 1/2/3 tier 3 pet as a 2/2."""
i = self._friends.index(self)
super().on_faint()
choices = [
pets.tier3.Badger,
pets.tier3.BlowFish,
pets.tier3.Camel,
pets.tier3.Dog,
pets.tier3.Giraffe,
pets.tier3.Kangaroo,
pets.tier3.Ox,
pets.tier3.Rabbit,
pets.tier3.Sheep,
pets.tier3.Snail,
pets.tier3.Turtle
]
spawn = np.random.choice(choices, 1)[0]()
spawn.zombify(2, 2)
spawn._level = self.level
spawn.assign_friends(self._friends)
spawn.assign_enemies(self._enemies)
self._friends.insert(i, spawn)
class Swan(Pet):
def __init__(self):
super().__init__()
self._name = 'SWAN'
self.attack = 3
self.health = 3
def on_turn_start(self):
"""Gain +(1*level) gold."""
self._game.gold += 1 * self.level
| [
"numpy.random.choice",
"gym_snape.game.pets.tokens.DirtyRat",
"math.floor"
] | [((1239, 1281), 'math.floor', 'math.floor', (['(self.attack * ability_modifier)'], {}), '(self.attack * ability_modifier)\n', (1249, 1281), False, 'import math\n'), ((3826, 3843), 'gym_snape.game.pets.tokens.DirtyRat', 'tokens.DirtyRat', ([], {}), '()\n', (3841, 3843), False, 'from gym_snape.game.pets import tokens\n'), ((4347, 4397), 'numpy.random.choice', 'np.random.choice', (['choices', 'n_chosen'], {'replace': '(False)'}), '(choices, n_chosen, replace=False)\n', (4363, 4397), True, 'import numpy as np\n'), ((5167, 5195), 'numpy.random.choice', 'np.random.choice', (['choices', '(1)'], {}), '(choices, 1)\n', (5183, 5195), True, 'import numpy as np\n')] |
# coding=utf-8
#
# Copyright 2008 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Urls configuration for the application
See http://www.djangoproject.com/documentation/url_dispatch/
"""
import logging
logging.info("Urls loading")
from django.conf.urls.defaults import *
import views as v
urlpatterns = patterns('',
url(r'^$', v.home, name='home'),
url(r'^article/(?P<object_id>[\w-]+)/$', v.Article_detail, name='Article_detail'),
url(r'^p/(?P<object_id>[\w-]+)/$', v.Page_detail, name='Page_detail'),
url(r'^t/(?P<object_id>[\w-]+)/$', v.Topic_detail, name='Topic_detail'),
url(r'^file/(?P<object_id>[\w-]+)/$', v.file_full, name='file_full'),
url(r'^fthumb/(?P<object_id>[\w-]+)$', v.file_full, name='file_thumb'),
)
logging.info("Urls loaded")
| [
"logging.info"
] | [((739, 767), 'logging.info', 'logging.info', (['"""Urls loading"""'], {}), "('Urls loading')\n", (751, 767), False, 'import logging\n'), ((1300, 1327), 'logging.info', 'logging.info', (['"""Urls loaded"""'], {}), "('Urls loaded')\n", (1312, 1327), False, 'import logging\n')] |
Subsets and Splits