max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
pe_tree/__main__.py | lybtongji/pe_tree | 1,271 | 11124947 | #
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PE Tree standalone application"""
# Standard imports
import os
import sys
from argparse import ArgumentParser
# Qt imports
from PyQt5 import QtWidgets
# QDarkStyle
try:
import qdarkstyle
HAVE_DARKSTYLE = True
except:
HAVE_DARKSTYLE = False
# PE Tree imports
import pe_tree.window
import pe_tree.runtime
class ApplicationRuntime(pe_tree.runtime.Runtime):
"""Standalone application runtime"""
def __init__(self, widget, args):
# Load configuration
self.config_file = os.path.join(self.get_temp_dir(), "pe_tree.ini")
super(ApplicationRuntime, self).__init__(widget, args)
self.pe_tree_form = None
def main(args=None):
"""Create PE Tree Qt standalone application"""
# Check command line arguments
parser = ArgumentParser(description="PE-Tree")
parser.add_argument("filenames", nargs="*", help="Path(s) to file/folder/zip")
args = parser.parse_args()
# Create PE Tree application
application = QtWidgets.QApplication(sys.argv)
window = pe_tree.window.PETreeWindow(application, ApplicationRuntime, args)
sys.exit(application.exec_())
if __name__ == "__main__":
main()
|
model_to_onnx.py | David-zaiwang/RGBD_segmentation | 128 | 11124953 | # -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
import argparse
import os
import numpy as np
import torch
from src.args import ArgumentParserRGBDSegmentation
from src.build_model import build_model
from src.prepare_data import prepare_data
if __name__ == '__main__':
# arguments
parser = ArgumentParserRGBDSegmentation(
description='Efficient RGBD Indoor Sematic Segmentation (ONNX Export)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_common_args()
parser.add_argument('--onnx_opset_version', type=int, default=11,
help='Different versions lead to different results but'
'not all versions are supported for a following'
'TensorRT conversion.')
parser.add_argument('--model_output_name', type=str, default='model',
help='Name for the onnx model that will be saved.')
args = parser.parse_args()
args.pretrained_on_imagenet = False
dataset, _ = prepare_data(args, with_input_orig=True)
model, device = build_model(args, dataset.n_classes_without_void)
os.makedirs('./onnx_models', exist_ok=True)
# load weights
if args.last_ckpt:
checkpoint = torch.load(args.last_ckpt,
map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'], strict=True)
model.eval()
model.to(device)
rgb = np.random.random(size=(1, 3, args.height, args.width))
rgb = rgb.astype(np.float32)
depth = np.random.random(size=(1, 1, args.height, args.width))
depth = depth.astype(np.float32)
onnx_file_path = os.path.join('onnx_models',
f'{args.model_output_name}.onnx')
rgb_torch = torch.from_numpy(rgb)
depth_torch = torch.from_numpy(depth)
rgb_torch = rgb_torch.to(device)
depth_torch = depth_torch.to(device)
if args.modality == 'rgbd':
# rgbd
inp = (rgb_torch, depth_torch)
input_names = ['rgb', 'depth']
elif args.modality == 'rgb':
# rgb
inp = rgb_torch
input_names = ['rgb']
else:
# depth
inp = depth_torch
input_names = ['depth']
torch.onnx.export(model,
inp,
onnx_file_path,
export_params=True,
input_names=input_names,
output_names=['output'],
do_constant_folding=True,
verbose=False,
opset_version=args.onnx_opset_version)
|
gym_collision_avoidance/envs/policies/CADRL/scripts/multi/global_var.py | echtesmikkelbeer21/gym-collision-avoidance | 128 | 11124986 | <reponame>echtesmikkelbeer21/gym-collision-avoidance<filename>gym_collision_avoidance/envs/policies/CADRL/scripts/multi/global_var.py
#!/usr/bin/env python
# global varibles
import numpy as np
COLLISION_COST = -0.25
DIST_2_GOAL_THRES = 0.05
GETTING_CLOSE_PENALTY = -0.05
GETTING_CLOSE_RANGE = 0.2
EPS = 1e-5
SMOOTH_COST = -0.5
# terminal states
NON_TERMINAL=0
COLLIDED=1
REACHED_GOAL=2
TRAINING_DT=1.0
# plotting colors
plt_colors = []
plt_colors.append([0.8500, 0.3250, 0.0980]) # red
plt_colors.append([0.0, 0.4470, 0.7410]) # blue
plt_colors.append([0.4660, 0.6740, 0.1880]) # green
plt_colors.append([0.4940, 0.1840, 0.5560]) # purple
plt_colors.append([0.9290, 0.6940, 0.1250]) # orange
plt_colors.append([0.3010, 0.7450, 0.9330]) # cyan
plt_colors.append([0.6350, 0.0780, 0.1840]) # chocolate
# more plotting purpose
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
# plt_colors.append([0.0, 0.0, 0.0]) # black
RL_gamma = 0.97
RL_dt_normal = 0.5
# neural network input/output vectors
# input_avg_vec = [np.array([10.0, 0.8, 0.7, 0.0, 0.65, 0.0, 0.35]), \
# np.array([0.0, 0.0, 0.0, 0.0, 0.35, 1.50, 8.0, 0.0])]
# input_std_vec = [np.array([8.0, 0.4, 0.4, 0.7, 0.5, 0.3, 0.2]), \
# np.array([0.8, 0.8, 8.0, 8.0, 0.2, 1.50, 6.0, 0.5])]
# output_avg_vec = np.array([0.5])
# output_std_vec = np.array([0.4])
input_avg_vec = [np.array([7.0, 0.8, 0.7, 0.0, 0.65, 0.0, 0.35]), \
np.array([0.0, 0.0, 0.0, 0.0, 0.35, 0.7, 4.0, 0.5])]
input_std_vec = [np.array([5.0, 0.4, 0.4, 0.7, 0.5, 0.3, 0.2]), \
np.array([0.8, 0.8, 4.0, 4.0, 0.2, 0.4, 4.0, 0.5])]
output_avg_vec = np.array([0.5])
output_std_vec = np.array([0.4])
NN_ranges = list()
NN_ranges.append(input_avg_vec); NN_ranges.append(input_std_vec)
NN_ranges.append(output_avg_vec); NN_ranges.append(output_std_vec)
# param computed from data
# input_avg_vec [ 3.6982656 0.83099974 0.7252522 0.0514733 -0.13318091 0.10315574
# 0.26505782 -0.11795232 0.39575324 0.40368014 0.79943338 0.65456946
# 0.02955785 2.90838236]
# input_std_vec [ 2.91446845 0.34856427 0.34699031 0.64742148 0.55879675 0.45500179
# 3.59396867 2.08934841 0.0561011 0.05553664 0.0812654 0.4022663
# 0.23505923 2.642269 ]
# out_avg_vec [ 0.43739441]
# output_std_vec [ 0.3618484]
|
PyGithub_examples/get_rate_limit_info.py | DazEB2/SimplePyScripts | 117 | 11125018 | <reponame>DazEB2/SimplePyScripts<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install pygithub
from github import Github
from config import LOGIN, PASSWORD
gh = Github(LOGIN, PASSWORD)
print('With auth:')
print(' rate_limiting:', gh.rate_limiting)
print(' rate_limiting_resettime:', gh.rate_limiting_resettime)
print(' gh.get_rate_limit():', gh.get_rate_limit())
print()
gh = Github()
print('Without auth:')
print(' rate_limiting:', gh.rate_limiting)
print(' rate_limiting_resettime:', gh.rate_limiting_resettime)
print(' gh.get_rate_limit():', gh.get_rate_limit())
|
torchmultimodal/modules/encoders/clip_resnet_encoder.py | facebookresearch/multimodal | 128 | 11125036 | <filename>torchmultimodal/modules/encoders/clip_resnet_encoder.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# This code is based on https://github.com/openai/CLIP/blob/main/clip/model.py
from collections import OrderedDict
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn
EXPANSION = 4
class ResNetForCLIPBottleneck(nn.Module):
def __init__(self, inplanes: int, planes: int, stride: int = 1):
super().__init__()
# all conv layers have stride 1.
# an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * EXPANSION, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * EXPANSION)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * EXPANSION:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * EXPANSION,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * EXPANSION)),
]
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.reshape(x.shape[0], x.shape[1], -1).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ResNetForCLIP(nn.Module):
"""Modified ResNet used by CLIP.
Based on https://github.com/openai/CLIP/blob/main/clip/model.py#L93, this class
differs from Torchvision's ResNet in the following ways:
- There are now 3 "stem" convolutions as opposed to 1, with an
average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is
prepended to convolutions with stride > 1.
- The final pooling layer is a QKV attention instead of an average pool.
Args:
layers (Tuple[int]):
output_dim (int): dimension of output tensor
heads (int): number of heads in the attention pooling layer
input_resolution (int): resolution of image input to encoder
width (int): ResNet width
use_clip_init (bool): Whether to use CLIP-specific initialization.
Inputs:
x (Tensor): Tensor containing image features
"""
def __init__(
self,
layers: Tuple[int, int, int, int] = (3, 4, 6, 3),
output_dim: int = 512,
heads: int = 1024,
input_resolution: int = 224,
width: int = 64,
use_clip_init: bool = True,
):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
if use_clip_init:
self.initialize_parameters()
def _make_layer(self, planes: int, blocks: int, stride=1) -> nn.Module:
layers = [ResNetForCLIPBottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * EXPANSION
for _ in range(1, blocks):
layers.append(ResNetForCLIPBottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def initialize_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
# Zero-initialize each block's third batch normalization weights
# Based on CLIP initialization in https://git.io/JDbGX
for resnet_block in [
self.layer1,
self.layer2,
self.layer3,
self.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def forward(self, x: torch.Tensor) -> torch.Tensor:
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
|
survae/transforms/bijections/permute_axes.py | alisiahkoohi/survae_flows | 262 | 11125083 | import torch
from collections.abc import Iterable
from survae.transforms.bijections import Bijection
class PermuteAxes(Bijection):
def __init__(self, permutation):
super(PermuteAxes, self).__init__()
assert isinstance(permutation, Iterable), 'permutation must be an Iterable'
assert permutation[0] == 0, 'First element of permutation must be 0 (such that batch dimension stays intact)'
self.permutation = permutation
self.inverse_permutation = torch.argsort(torch.tensor(self.permutation)).tolist()
def forward(self, x):
z = x.permute(self.permutation).contiguous()
ldj = torch.zeros((x.shape[0],), device=x.device, dtype=x.dtype)
return z, ldj
def inverse(self, z):
x = z.permute(self.inverse_permutation).contiguous()
return x
|
tests/util_test.py | ale180192/googleads-python-lib | 601 | 11125086 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover the errors module."""
from contextlib import contextmanager
import logging
import os
import unittest
import googleads.ad_manager
import googleads.adwords
import googleads.errors
import googleads.util
from lxml import etree
import mock
class PatchesTest(unittest.TestCase):
"""Tests for the PatchHelper utility."""
def setUp(self):
oauth_header = {'Authorization': 'header'}
oauth2_client = mock.Mock()
oauth2_client.CreateHttpHeader.return_value = dict(oauth_header)
# AdWordsClient setup
client_customer_id = 'client customer id'
dev_token = <PASSWORD>'
user_agent = '4M153rAb13p1l30F53CR37s'
self.adwords_client = googleads.adwords.AdWordsClient(
dev_token, oauth2_client, user_agent,
client_customer_id=client_customer_id)
# AdWordsClient setup (compression enabled)
self.adwords_client_with_compression = googleads.adwords.AdWordsClient(
dev_token, oauth2_client, user_agent,
client_customer_id=client_customer_id,
enable_compression=True)
self.adwords_version = sorted(googleads.adwords._SERVICE_MAP.keys())[-1]
self.adwords_namespace_partial = (
'https://adwords.google.com/api/adwords/%s/' + self.adwords_version)
# AdManagerClient setup
network_code = '12345'
application_name = 'application name'
self.ad_manager_client = googleads.ad_manager.AdManagerClient(
oauth2_client, application_name, network_code)
self.ad_manager_version = sorted(
googleads.ad_manager._SERVICE_MAP.keys())[-1]
self.ad_manager_namespace = (
'https://www.google.com/apis/ads/publisher/%s'
% self.ad_manager_version)
@contextmanager
def mock_fault_response(self, response_file, version):
test_dir = os.path.dirname(__file__)
with open(os.path.join(test_dir, response_file), 'rb') as handler:
response_data = handler.read()
response_data = response_data.replace(b'{VERSION}', bytes(version, 'utf-8'))
with mock.patch('zeep.transports.Transport.post') as mock_post:
reply_instance = mock.Mock()
reply_instance.status_code = 500
reply_instance.headers = {}
reply_instance.content = response_data
mock_post.return_value = reply_instance
yield
def testSingleErrorListIssue90(self):
"""Verifies that issue 90 has been resolved with the patch."""
query = 'WHERE 1 = 0'
statement = googleads.ad_manager.FilterStatement(query)
line_item_service = self.ad_manager_client.GetService('LineItemService')
line_item_action = {'xsi_type': 'ActivateLineItems'}
st = statement.ToStatement()
with self.mock_fault_response(
'test_data/fault_response_envelope.txt', self.ad_manager_version):
try:
line_item_service.performLineItemAction(line_item_action, st)
except googleads.errors.GoogleAdsServerFault as e:
self.assertEqual(1, len(e.errors))
def testSingleErrorListIssue90_emptyErrors(self):
"""Verifies that issue 90 has been resolved with the patch."""
query = 'WHERE 1 = 0'
statement = googleads.ad_manager.FilterStatement(query)
line_item_service = self.ad_manager_client.GetService('LineItemService')
line_item_action = {'xsi_type': 'ActivateLineItems'}
st = statement.ToStatement()
with self.mock_fault_response(
'test_data/empty_fault_response_envelope.txt', self.ad_manager_version):
try:
line_item_service.performLineItemAction(line_item_action, st)
except googleads.errors.GoogleAdsServerFault as e:
self.assertEqual(0, len(e.errors))
def testSingleErrorListIssue90_multipleErrors(self):
"""Verifies that issue 90 has been resolved with the patch."""
query = 'WHERE 1 = 0'
statement = googleads.ad_manager.FilterStatement(query)
line_item_service = self.ad_manager_client.GetService('LineItemService')
line_item_action = {'xsi_type': 'ActivateLineItems'}
st = statement.ToStatement()
with self.mock_fault_response(
'test_data/multi_errors_fault_response_envelope.txt',
self.ad_manager_version):
try:
line_item_service.performLineItemAction(line_item_action, st)
except googleads.errors.GoogleAdsServerFault as e:
self.assertEqual(2, len(e.errors))
class GoogleAdsCommonFilterTest(unittest.TestCase):
"""Tests for the GoogleAdsCommonFilter utility."""
def setUp(self):
self.filter = googleads.util.GetGoogleAdsCommonFilter()
self.redacted_text = 'REDACTED'
self.dev_token_template = ('<tns:developerToken>%s</tns:developerToken>')
def testGetGoogleAdsCommonFilter(self):
self.assertIs(self.filter, googleads.util.GetGoogleAdsCommonFilter())
def testFilterAtInfoLevel(self):
record = mock.Mock()
record.levelno = logging.INFO
doc = mock.Mock()
doc.str.return_value = self.dev_token_template % 'test'
record.args = [doc]
self.filter.filter(record)
self.assertEqual(
record.args, (self.dev_token_template % self.redacted_text,))
def testFilterAtUnfilteredLevel(self):
expected_args = (1, 2, 3, 4, 5)
record = mock.Mock()
record.levelno = logging.DEBUG
record.args = expected_args
self.filter.filter(record)
self.assertEqual(record.args, expected_args)
XML_WITH_DEV_TOKEN = (
'<abc xmlns:ns0="http://abc">'
'<ns0:developerToken>a token</ns0:developerToken>'
'<sdf>hi</sdf>'
'</abc>')
XML_WITH_DEV_TOKEN_SAFE = etree.fromstring(
XML_WITH_DEV_TOKEN.replace('a token', 'REDACTED'))
XML_WITH_DEV_TOKEN = etree.fromstring(XML_WITH_DEV_TOKEN)
XML_PRETTY = etree.tostring(XML_WITH_DEV_TOKEN, pretty_print=True)
XML_PRETTY_SAFE = etree.tostring(XML_WITH_DEV_TOKEN_SAFE, pretty_print=True)
XML_PRETTY_SAFE = XML_PRETTY_SAFE.decode('utf-8')
XML_WITH_FAULT = etree.fromstring(
'<abc xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/">'
'<ns0:Header>'
'<child><key>value</key></child>'
'</ns0:Header>'
'<ns0:Fault>'
'<faultstring>hi</faultstring>'
'</ns0:Fault>'
'</abc>')
ZEEP_HEADER = {'abc': 'hi', 'authorization': 'secret'}
ZEEP_HEADER_SAFE = ZEEP_HEADER.copy()
ZEEP_HEADER_SAFE['authorization'] = 'REDACTED'
BINDING_OPTIONS = {'address': 'myaddress'}
class ZeepLoggerTest(unittest.TestCase):
def setUp(self):
self.logger = mock.Mock()
self.zeep_logger = googleads.util.ZeepLogger()
self.zeep_logger._logger = self.logger
self.operation = mock.Mock()
self.operation.name = 'opname'
self.operation.binding.wsdl.services.keys.return_value = ['service_name']
def enable_log_levels(self, *levels):
self.logger.isEnabledFor.side_effect = lambda lvl: lvl in levels
def testIngressPassesThroughArguments(self):
self.enable_log_levels()
# This result isn't related to logging, just confirming that the plugin
# is passing along the data unmodified.
result = self.zeep_logger.ingress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation)
self.assertEqual(result, (XML_WITH_DEV_TOKEN, ZEEP_HEADER))
def testIngressNoLoggingByDefault(self):
self.enable_log_levels()
self.zeep_logger.ingress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation)
self.logger.debug.assert_not_called()
def testIngressDebugLogging(self):
self.enable_log_levels(logging.DEBUG)
# Re-using the XML with <developerToken> for convenience, but this
# is testing inbound data, which would not have anything sensitive in it.
self.zeep_logger.ingress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation)
self.logger.debug.assert_called_once_with(
googleads.util._RESPONSE_XML_LOG_LINE, XML_PRETTY)
def testIngressFaultLogging(self):
self.enable_log_levels(logging.WARN)
self.zeep_logger.ingress(
XML_WITH_FAULT, ZEEP_HEADER, self.operation)
self.assertEqual({'faultMessage': 'hi',
'key': 'value',
'methodName': 'opname',
'serviceName': 'service_name'},
self.logger.warn.mock_calls[0][1][1])
def testEgressPassesThroughArguments(self):
self.enable_log_levels()
self.enable_log_levels(logging.DEBUG)
# This result isn't related to logging, just confirming that the plugin
# is passing along the data unmodified.
result = self.zeep_logger.egress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation, BINDING_OPTIONS)
self.assertEqual(result, (XML_WITH_DEV_TOKEN, ZEEP_HEADER))
def testEgressNoLoggingByDefault(self):
self.enable_log_levels()
self.zeep_logger.egress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation, BINDING_OPTIONS)
self.logger.debug.assert_not_called()
def testEgressDebugLogging(self):
self.enable_log_levels(logging.DEBUG)
self.zeep_logger.egress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation, BINDING_OPTIONS)
# With egress, they should be redacted.
self.logger.debug.assert_called_once_with(
googleads.util._REQUEST_XML_LOG_LINE, ZEEP_HEADER_SAFE, XML_PRETTY_SAFE)
def testEgressInfoLogging(self):
self.enable_log_levels(logging.INFO)
self.zeep_logger.egress(
XML_WITH_DEV_TOKEN, ZEEP_HEADER, self.operation, BINDING_OPTIONS)
self.logger.info.assert_called_once_with(
googleads.util._REQUEST_LOG_LINE, 'service_name', 'opname', 'myaddress')
if __name__ == '__main__':
unittest.main()
|
tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_norm_fw_output.mod.py | juitem/ONE | 255 | 11125090 | <reponame>juitem/ONE
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Bidirectional Sequence LSTM Test:
# FLOAT32, Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
# Verifies forward output only.
n_batch = 2
n_input = 5
n_cell = 4
n_output = 3
max_time = 3
input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
fw_input_to_input_weights = Input(
"fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_input_to_forget_weights = Input(
"fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_input_to_cell_weights = Input(
"fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_input_to_output_weights = Input(
"fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_recurrent_to_input_weights = Input(
"fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
fw_recurrent_to_forget_weights = Input(
"fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
fw_recurrent_to_cell_weights = Input(
"fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
fw_recurrent_to_output_weights = Input(
"fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
fw_cell_to_input_weights = Input(
"fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_cell_to_forget_weights = Input(
"fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_cell_to_output_weights = Input(
"fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_input_gate_bias = Input(
"fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_forget_gate_bias = Input(
"fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_cell_bias = Input(
"fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_output_gate_bias = Input(
"fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
fw_projection_weights = Input(
"fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
fw_projection_bias = Input(
"fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
bw_input_to_input_weights = Input(
"bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_input_to_forget_weights = Input(
"bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_input_to_cell_weights = Input(
"bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_input_to_output_weights = Input(
"bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_recurrent_to_input_weights = Input(
"bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
bw_recurrent_to_forget_weights = Input(
"bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
bw_recurrent_to_cell_weights = Input(
"bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
bw_recurrent_to_output_weights = Input(
"bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
bw_cell_to_input_weights = Input(
"bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_cell_to_forget_weights = Input(
"bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_cell_to_output_weights = Input(
"bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_input_gate_bias = Input(
"bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_forget_gate_bias = Input(
"bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_cell_bias = Input(
"bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_output_gate_bias = Input(
"bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
bw_projection_weights = Input(
"bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
bw_projection_bias = Input(
"bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
fw_activation_state = Input(
"fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
fw_cell_state = Input(
"fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
bw_activation_state = Input(
"bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
bw_cell_state = Input(
"bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
fw_aux_input_to_input_weights = Input(
"fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_aux_input_to_forget_weights = Input(
"fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_aux_input_to_cell_weights = Input(
"fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_aux_input_to_output_weights = Input(
"fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_aux_input_to_input_weights = Input(
"bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_aux_input_to_forget_weights = Input(
"bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_aux_input_to_cell_weights = Input(
"bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
bw_aux_input_to_output_weights = Input(
"bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
bw_output=IgnoredOutput("bw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
def test(
name,
input_data=[],
fw_input_to_input_weights_data=[],
fw_input_to_forget_weights_data=[],
fw_input_to_cell_weights_data=[],
fw_input_to_output_weights_data=[],
fw_recurrent_to_input_weights_data=[],
fw_recurrent_to_forget_weights_data=[],
fw_recurrent_to_cell_weights_data=[],
fw_recurrent_to_output_weights_data=[],
fw_cell_to_input_weights_data=[],
fw_cell_to_forget_weights_data=[],
fw_cell_to_output_weights_data=[],
fw_input_gate_bias_data=[],
fw_forget_gate_bias_data=[],
fw_cell_bias_data=[],
fw_output_gate_bias_data=[],
fw_projection_weights_data=[],
fw_projection_bias_data=[],
bw_input_to_input_weights_data=[],
bw_input_to_forget_weights_data=[],
bw_input_to_cell_weights_data=[],
bw_input_to_output_weights_data=[],
bw_recurrent_to_input_weights_data=[],
bw_recurrent_to_forget_weights_data=[],
bw_recurrent_to_cell_weights_data=[],
bw_recurrent_to_output_weights_data=[],
bw_cell_to_input_weights_data=[],
bw_cell_to_forget_weights_data=[],
bw_cell_to_output_weights_data=[],
bw_input_gate_bias_data=[],
bw_forget_gate_bias_data=[],
bw_cell_bias_data=[],
bw_output_gate_bias_data=[],
bw_projection_weights_data=[],
bw_projection_bias_data=[],
fw_activation_state_data=[],
fw_cell_state_data=[],
bw_activation_state_data=[],
bw_cell_state_data=[],
aux_input_data=[],
fw_aux_input_to_input_weights_data=[],
fw_aux_input_to_forget_weights_data=[],
fw_aux_input_to_cell_weights_data=[],
fw_aux_input_to_output_weights_data=[],
bw_aux_input_to_input_weights_data=[],
bw_aux_input_to_forget_weights_data=[],
bw_aux_input_to_cell_weights_data=[],
bw_aux_input_to_output_weights_data=[],
fw_input_layer_norm_weights_data=[],
fw_forget_layer_norm_weights_data=[],
fw_cell_layer_norm_weights_data=[],
fw_output_layer_norm_weights_data=[],
bw_input_layer_norm_weights_data=[],
bw_forget_layer_norm_weights_data=[],
bw_cell_layer_norm_weights_data=[],
bw_output_layer_norm_weights_data=[],
fw_output_data=[],
bw_output_data=[],):
activation = Int32Scalar("activation", 4)
cell_clip = Float32Scalar("cell_clip", 0.0)
proj_clip = Float32Scalar("proj_clip", 0.0)
merge_outputs = BoolScalar("merge_outputs", False)
time_major = BoolScalar("time_major", True)
model = Model().Operation(
"BIDIRECTIONAL_SEQUENCE_LSTM",
input,
fw_input_to_input_weights,
fw_input_to_forget_weights,
fw_input_to_cell_weights,
fw_input_to_output_weights,
fw_recurrent_to_input_weights,
fw_recurrent_to_forget_weights,
fw_recurrent_to_cell_weights,
fw_recurrent_to_output_weights,
fw_cell_to_input_weights,
fw_cell_to_forget_weights,
fw_cell_to_output_weights,
fw_input_gate_bias,
fw_forget_gate_bias,
fw_cell_bias,
fw_output_gate_bias,
fw_projection_weights,
fw_projection_bias,
bw_input_to_input_weights,
bw_input_to_forget_weights,
bw_input_to_cell_weights,
bw_input_to_output_weights,
bw_recurrent_to_input_weights,
bw_recurrent_to_forget_weights,
bw_recurrent_to_cell_weights,
bw_recurrent_to_output_weights,
bw_cell_to_input_weights,
bw_cell_to_forget_weights,
bw_cell_to_output_weights,
bw_input_gate_bias,
bw_forget_gate_bias,
bw_cell_bias,
bw_output_gate_bias,
bw_projection_weights,
bw_projection_bias,
fw_activation_state,
fw_cell_state,
bw_activation_state,
bw_cell_state,
aux_input,
fw_aux_input_to_input_weights,
fw_aux_input_to_forget_weights,
fw_aux_input_to_cell_weights,
fw_aux_input_to_output_weights,
bw_aux_input_to_input_weights,
bw_aux_input_to_forget_weights,
bw_aux_input_to_cell_weights,
bw_aux_input_to_output_weights,
activation, cell_clip, proj_clip, merge_outputs, time_major,
fw_input_layer_norm_weights,
fw_forget_layer_norm_weights,
fw_cell_layer_norm_weights,
fw_output_layer_norm_weights,
bw_input_layer_norm_weights,
bw_forget_layer_norm_weights,
bw_cell_layer_norm_weights,
bw_output_layer_norm_weights,).To(fw_output, bw_output)
example = Example(
{
input: input_data,
fw_input_to_input_weights: fw_input_to_input_weights_data,
fw_input_to_forget_weights: fw_input_to_forget_weights_data,
fw_input_to_cell_weights: fw_input_to_cell_weights_data,
fw_input_to_output_weights: fw_input_to_output_weights_data,
fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
fw_cell_to_input_weights: fw_cell_to_input_weights_data,
fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
fw_cell_to_output_weights: fw_cell_to_output_weights_data,
fw_input_gate_bias: fw_input_gate_bias_data,
fw_forget_gate_bias: fw_forget_gate_bias_data,
fw_cell_bias: fw_cell_bias_data,
fw_output_gate_bias: fw_output_gate_bias_data,
fw_projection_weights: fw_projection_weights_data,
fw_projection_bias: fw_projection_bias_data,
bw_input_to_input_weights: bw_input_to_input_weights_data,
bw_input_to_forget_weights: bw_input_to_forget_weights_data,
bw_input_to_cell_weights: bw_input_to_cell_weights_data,
bw_input_to_output_weights: bw_input_to_output_weights_data,
bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
bw_cell_to_input_weights: bw_cell_to_input_weights_data,
bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
bw_cell_to_output_weights: bw_cell_to_output_weights_data,
bw_input_gate_bias: bw_input_gate_bias_data,
bw_forget_gate_bias: bw_forget_gate_bias_data,
bw_cell_bias: bw_cell_bias_data,
bw_output_gate_bias: bw_output_gate_bias_data,
bw_projection_weights: bw_projection_weights_data,
bw_projection_bias: bw_projection_bias_data,
fw_activation_state: fw_activation_state_data,
fw_cell_state: fw_cell_state_data,
bw_activation_state: bw_activation_state_data,
bw_cell_state: bw_cell_state_data,
aux_input: aux_input_data,
fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
fw_output: fw_output_data,
bw_output: bw_output_data,
},
model=model, name=name)
fw_input_to_input_weights_data = [
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
]
bw_input_to_input_weights_data = fw_input_to_input_weights_data
fw_input_to_forget_weights_data = [
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
-0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
]
bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
fw_input_to_cell_weights_data = [
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
-0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
]
bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
fw_input_to_output_weights_data = [
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
]
bw_input_to_output_weights_data = fw_input_to_output_weights_data
fw_recurrent_to_input_weights_data = [
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
]
bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
fw_recurrent_to_forget_weights_data = [
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
]
bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
fw_recurrent_to_cell_weights_data = [
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
]
bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
fw_recurrent_to_output_weights_data = [
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
]
bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
fw_cell_to_input_weights_data = [0.05, 0.1, 0.25, 0.15]
bw_cell_to_input_weights_data = fw_cell_to_input_weights_data
fw_cell_to_forget_weights_data = [-0.02, -0.15, -0.25, -0.03]
bw_cell_to_forget_weights_data = fw_cell_to_forget_weights_data
fw_cell_to_output_weights_data = [0.1, -0.1, -0.5, 0.05]
bw_cell_to_output_weights_data = fw_cell_to_output_weights_data
fw_projection_weights_data = [
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
]
bw_projection_weights_data = fw_projection_weights_data
fw_input_gate_bias_data = [0.03, 0.15, 0.22, 0.38]
bw_input_gate_bias_data = fw_input_gate_bias_data
fw_forget_gate_bias_data = [0.1, -0.3, -0.2, 0.1]
bw_forget_gate_bias_data = fw_forget_gate_bias_data
fw_cell_bias_data = [-0.05, 0.72, 0.25, 0.08]
bw_cell_bias_data = fw_cell_bias_data
fw_output_gate_bias_data = [0.05, -0.01, 0.2, 0.1]
bw_output_gate_bias_data = fw_output_gate_bias_data
input_layer_norm_weights_data = [0.1, 0.2, 0.3, 0.5]
forget_layer_norm_weights_data = [0.2, 0.2, 0.4, 0.3]
cell_layer_norm_weights_data = [0.7, 0.2, 0.3, 0.8]
output_layer_norm_weights_data = [0.6, 0.2, 0.2, 0.5]
input_data = [0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1,
0.8, 0.1, 0.2, 0.4, 0.5, 0.1, 0.5, 0.2, 0.4, 0.2,
0.2, 0.7, 0.7, 0.1, 0.7, 0.6, 0.9, 0.2, 0.5, 0.7]
fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
fw_golden_output_data = [
0.0244077, 0.128027, -0.00170918, -0.00692428, 0.0848741, 0.063445,
0.0137642, 0.140751, 0.0395835, -0.00403912, 0.139963, 0.072681,
-0.00459231, 0.155278, 0.0837377, 0.00752706, 0.161903, 0.0561371,
]
bw_golden_output_data = [0 for _ in range(n_batch * max_time * n_output)]
test(
name="blackbox",
input_data=input_data,
fw_input_to_input_weights_data=fw_input_to_input_weights_data,
fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
fw_input_to_output_weights_data=fw_input_to_output_weights_data,
fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
fw_cell_to_input_weights_data=fw_cell_to_input_weights_data,
fw_cell_to_forget_weights_data=fw_cell_to_forget_weights_data,
fw_cell_to_output_weights_data=fw_cell_to_output_weights_data,
fw_input_gate_bias_data=fw_input_gate_bias_data,
fw_forget_gate_bias_data=fw_forget_gate_bias_data,
fw_cell_bias_data=fw_cell_bias_data,
fw_output_gate_bias_data=fw_output_gate_bias_data,
fw_projection_weights_data=fw_projection_weights_data,
bw_input_to_input_weights_data=bw_input_to_input_weights_data,
bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
bw_input_to_output_weights_data=bw_input_to_output_weights_data,
bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
bw_cell_to_input_weights_data=bw_cell_to_input_weights_data,
bw_cell_to_forget_weights_data=bw_cell_to_forget_weights_data,
bw_cell_to_output_weights_data=bw_cell_to_output_weights_data,
bw_input_gate_bias_data=bw_input_gate_bias_data,
bw_forget_gate_bias_data=bw_forget_gate_bias_data,
bw_cell_bias_data=bw_cell_bias_data,
bw_output_gate_bias_data=bw_output_gate_bias_data,
bw_projection_weights_data=bw_projection_weights_data,
fw_activation_state_data = fw_activation_state_data,
bw_activation_state_data = bw_activation_state_data,
fw_cell_state_data = fw_cell_state_data,
bw_cell_state_data = bw_cell_state_data,
fw_input_layer_norm_weights_data = input_layer_norm_weights_data,
fw_forget_layer_norm_weights_data = forget_layer_norm_weights_data,
fw_cell_layer_norm_weights_data = cell_layer_norm_weights_data,
fw_output_layer_norm_weights_data = output_layer_norm_weights_data,
bw_input_layer_norm_weights_data = input_layer_norm_weights_data,
bw_forget_layer_norm_weights_data = forget_layer_norm_weights_data,
bw_cell_layer_norm_weights_data = cell_layer_norm_weights_data,
bw_output_layer_norm_weights_data = output_layer_norm_weights_data,
fw_output_data=fw_golden_output_data,
bw_output_data=bw_golden_output_data
)
|
visual_inspector/figure_custom/cloud_figures_custom.py | m4ta1l/deep-neuroevolution | 883 | 11125094 | <reponame>m4ta1l/deep-neuroevolution<gh_stars>100-1000
"""Customerized Cloud Figures"""
from figure_base.cloud_figures import CloudPlot
class CloudPlotHDBC(CloudPlot):
"""Cloud plot to show trajectory as Hi Dim BCs"""
def __init__(self, *args, **kwargs):
CloudPlot.__init__(self, *args, **kwargs)
self.hd_bc, = self.main_ax.plot([], [], color='k', linewidth=3)
def show_new_labels_dp(self, thisData):
CloudPlot.show_new_labels_dp(self, thisData)
self.hd_bc.set_data(thisData.x, thisData.y)
def clear_labels(self):
CloudPlot.clear_labels(self)
self.hd_bc.set_data([], [])
class CloudPlotRollout(CloudPlot):
"""Cloud plot with policy rollout"""
def __init__(self, *args, **kwargs):
CloudPlot.__init__(self, *args, **kwargs)
self.traj_plots = []
def button_3(self, artist, ind):
from figure_custom.rollout_trajectory import rolloutMaker
print("rolling out!!")
gen = self.artist2gen[artist]
this_data = self.fetch_data_point(artist, ind)
if self.get_policy_file(gen) != None:
self.traj_plots.append(rolloutMaker(gen, this_data, self))
class CloudPlotRolloutAtari(CloudPlot):
"""Cloud plot with policy rollout"""
def button_3(self, artist, ind):
from figure_custom.rollout_custom import RolloutAtari
print("rolling out!!")
gen = self.artist2gen[artist]
print(gen)
this_data = self.fetch_data_point(artist, ind)
policy_file = self.get_policy_file(gen)
if policy_file is None:
return
noise_stdev = self.get_parent_op_data(gen)[-1]
if this_data.parentOrNot:
seed = int(self.get_parent_op_data(gen)[-2])
print(self.get_parent_op_data(gen))
else:
seed = int(this_data.child_op_data[-2])
print(this_data.child_op_data)
x, y, f = this_data.x[-1], this_data.y[-1], this_data.fitness
record = "snapshots/snapshot_gen_{:04}/clips/x_{:.2f}_y_{:.2f}_f{:.2f}".format(
this_data.gen, x, y, f)
RolloutAtari.setup_and_rollout_policy(policy_file, this_data,
noise_stdev=noise_stdev, fixed_seed=seed,
render=True, path=self.path, record=record)
import subprocess
subprocess.call(["open {}/*.mp4".format(self.path+record)], shell=True)
|
tacker/tests/unit/sol_refactored/objects/test_base.py | h1r0mu/tacker | 116 | 11125107 | <gh_stars>100-1000
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from unittest import mock
from oslo_serialization import jsonutils
from tacker.sol_refactored.objects import base
from tacker.sol_refactored.objects import fields
from tacker.tests import base as tests_base
@base.TackerObjectRegistry.register
class MyObj(base.TackerObject):
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'data': fields.StringField(nullable=False),
'listData': fields.ListOfObjectsField(
'MySubObj', nullable=True),
'createdAt': fields.DateTimeField(nullable=False),
}
@base.TackerObjectRegistry.register
class MySubObj(base.TackerObject):
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'data': fields.StringField(nullable=False),
}
@base.TackerObjectRegistry.register
class MyDBObj(MyObj, base.TackerPersistentObject):
pass
class TestTackerObject(tests_base.BaseTestCase):
def test_tacker_obj_get_changes(self):
o = MyObj.from_dict({'id': 'foo',
'data': 'abcde',
'createdAt': '2021-09-01T12:34:56+09:00'})
o.obj_reset_changes()
self.assertEqual(o.tacker_obj_get_changes(), {})
o.data = '12345'
o.createdAt = datetime.datetime(2021, 8, 7, 6, 5, 44,
tzinfo=datetime.timezone(datetime.timedelta(hours=-9)))
changes = o.tacker_obj_get_changes()
self.assertEqual(len(changes), 2)
self.assertIn('data', changes)
self.assertIsNone(changes['createdAt'].tzinfo)
self.assertEqual(changes['createdAt'].hour, o.createdAt.hour + 9)
def test_from_dict(self):
o = MyObj.from_dict({'id': 'foo',
'data': 'abcde',
'listData': [
{'id': 'foo1', 'data': 'bar1'},
{'id': 'foo2', 'data': 'bar2'},
],
'createdAt': '2021-09-01T12:34:56+09:00'})
self.assertEqual(o.id, 'foo')
self.assertIsInstance(o.createdAt, datetime.datetime)
self.assertEqual(len(o.listData), 2)
self.assertEqual(o.listData[1].data, 'bar2')
class TestTackerObjectSerializer(tests_base.BaseTestCase):
def test_serialize_entity(self):
serializer = base.TackerObjectSerializer()
o = MyObj.from_dict({'id': 'foo',
'data': 'abcde',
'listData': [
{'id': 'foo1', 'data': 'bar1'},
{'id': 'foo2', 'data': 'bar2'},
],
'createdAt': '2021-09-01T12:34:56+09:00'})
entity = serializer.serialize_entity(mock.Mock(), o)
self.assertEqual(entity['tacker_sol_refactored_object.name'], 'MyObj')
self.assertEqual(entity['tacker_sol_refactored_object.namespace'],
'tacker_sol_refactored')
data = entity['tacker_sol_refactored_object.data']
self.assertEqual(set(data.keys()),
set(['id', 'data', 'listData', 'createdAt']))
o2 = serializer.deserialize_entity(mock.Mock(), entity)
self.assertEqual(o2.listData[1].id, o.listData[1].id)
self.assertEqual(o2.createdAt, o.createdAt)
class TestTackerPersistentObject(tests_base.BaseTestCase):
def test_from_db_obj(self):
o = MyDBObj.from_db_obj(
{'id': 'foo', 'data': 'abcde',
'listData': '[{"id": "foo1", "data": "bar1"},'
'{"id": "foo2", "data": "bar2"}]',
'createdAt': datetime.datetime(2021, 9, 1, 12, 34, 56)})
self.assertEqual(o.id, 'foo')
self.assertEqual(len(o.listData), 2)
self.assertEqual(o.listData[0].data, 'bar1')
def test_to_db_obj(self):
o = MyDBObj.from_dict({'id': 'foo',
'data': 'abcde',
'listData': [
{'id': 'foo1', 'data': 'bar1'},
{'id': 'foo2', 'data': 'bar2'},
],
'createdAt': '2021-09-01T12:34:56'})
dbobj = o.to_db_obj()
self.assertEqual(jsonutils.loads(dbobj['listData']),
[{"id": "foo1", "data": "bar1"}, {"id": "foo2", "data": "bar2"}])
|
functions/FillRaster.py | mmfink/raster-functions | 173 | 11125109 | import numpy as np
class FillRaster():
def __init__(self):
self.name = "Fill Raster Function"
self.description = ("")
self.fillValue = 0.
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': ""
},
{
'name': 'value',
'dataType': 'numeric',
'value': 0,
'required': True,
'displayName': "Fill Value",
'description': ("")
},
]
def updateRasterInfo(self, **kwargs):
b = kwargs['raster_info']['bandCount']
self.fillValue = kwargs.get('value', 0.)
kwargs['output_info']['statistics'] = b * ({'minimum': self.fillValue, 'maximum': self.fillValue}, )
kwargs['output_info']['histogram'] = ()
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
pixelBlocks['output_pixels'] = np.full(shape, self.fillValue, dtype=props['pixelType'])
return pixelBlocks
|
tests/test_issues.py | cstoltze/fhir.resources | 144 | 11125148 | <gh_stars>100-1000
# _*_ coding: utf-8 _*_
from pydantic import ValidationError
from fhir.resources.patient import Patient
__author__ = "<NAME><<EMAIL>>"
def test_issue_74():
"""When are Falsy values evaluated as None?"""
patient = Patient(active=True, address=[])
assert "address" not in patient.dict()
assert patient.dict(exclude_none=False)["address"] == []
def test_issue_64():
"""Allow empty string """
try:
Patient(
active=True,
address=[
{
"use": "old",
"line": ["100 LaSalle St", ""],
"city": "Chicago",
"district": "Cook",
"state": "IL",
"postalCode": "60606",
"country": "USA",
}
],
)
assert 1 == 2, "Code should not come here, because of empty string validation"
except ValidationError:
# should raise validation error
assert 1 == 1
from fhir.resources.fhirtypes import String
String.configure_empty_str(allow=True)
try:
Patient(
active=True,
address=[
{
"use": "old",
"line": ["100 LaSalle St", ""],
"city": "Chicago",
"district": "Cook",
"state": "IL",
"postalCode": "60606",
"country": "USA",
}
],
)
assert 1 == 1
except ValidationError:
# should not raise validation error
assert 1 == 2, "Code should not come here, we allow empty string"
String.configure_empty_str(allow=False)
|
lib/bindings/samples/server/utils/cpp_callback.py | tlalexander/stitchEm | 182 | 11125162 | import utils.async
import vs
class CppCallback(vs.CppCallback):
def __init__(self, to_call):
super(CppCallback, self).__init__()
self.to_call = to_call
def __call__(self, payload):
#defer to avoid deadlock
utils.async.defer(self.to_call, payload)
|
tests/test_static_graph.py | zbmain/PGL | 1,389 | 11125183 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import numpy as np
import paddle
import pgl
import paddle.nn as nn
import pgl.nn as gnn
import pgl.nn.functional as F
import paddle.static as static
class GNNModel(nn.Layer):
def __init__(self, input_size, output_size, num_layers=3):
super(GNNModel, self).__init__()
self.conv_fn = nn.LayerList()
self.conv_fn.append(gnn.GCNConv(input_size, output_size))
for i in range(num_layers - 1):
self.conv_fn.append(gnn.GCNConv(output_size, output_size))
self.pool_fn = gnn.GraphPool("sum")
def forward(self, num_nodes, edges, feature):
graph = pgl.Graph(num_nodes=num_nodes, edges=edges)
for fn in self.conv_fn:
feature = fn(graph, feature)
output = self.pool_fn(graph, feature)
return output
class StaticGraphOpTest(unittest.TestCase):
def test_static_graph(self):
path = './tmp'
dim = 100
# Load DyGraph Model
paddle.disable_static()
num_nodes = 5
edges = [(0, 1), (1, 2), (3, 4)]
nfeat = np.random.randn(num_nodes, dim).astype("float32")
model = GNNModel(dim, 10)
out = model(
paddle.to_tensor(num_nodes),
paddle.to_tensor(edges), paddle.to_tensor(nfeat))
out = out.numpy()
paddle.save(model.state_dict(),
os.path.join(path, "static_gnn.pdparam"))
paddle.enable_static()
# Run Static Fisrt
model2 = GNNModel(dim, 10)
input_num_nodes = static.data(
name='num_nodes', shape=[-1], dtype='int32')
input_edges = static.data(name='edges', shape=[-1, 2], dtype='int32')
input_feature = static.data(
name="feature", shape=[-1, dim], dtype="float32")
output = model2(input_num_nodes, input_edges, input_feature)
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
prog = static.default_main_program()
state_dict = paddle.load(os.path.join(path, "static_gnn.pdparam"))
model2.set_state_dict(state_dict)
feed_dict = {
"num_nodes": num_nodes,
"edges": np.array(
edges, dtype="int32"),
"feature": nfeat.astype("float32"),
}
out2 = exe.run(prog, feed=feed_dict, fetch_list=[output])[0]
eps = np.sum((out2 - out)**2)
self.assertTrue(eps < 1e-5)
import shutil
shutil.rmtree(path)
if __name__ == "__main__":
unittest.main()
|
tests/system/test__helpers.py | Arushi201296/python-firestore | 140 | 11125201 | import os
import re
from google.cloud.firestore_v1.base_client import _FIRESTORE_EMULATOR_HOST
from test_utils.system import unique_resource_id
from test_utils.system import EmulatorCreds
FIRESTORE_CREDS = os.environ.get("FIRESTORE_APPLICATION_CREDENTIALS")
FIRESTORE_PROJECT = os.environ.get("GCLOUD_PROJECT")
RANDOM_ID_REGEX = re.compile("^[a-zA-Z0-9]{20}$")
MISSING_DOCUMENT = "No document to update: "
DOCUMENT_EXISTS = "Document already exists: "
UNIQUE_RESOURCE_ID = unique_resource_id("-")
EMULATOR_CREDS = EmulatorCreds()
FIRESTORE_EMULATOR = os.environ.get(_FIRESTORE_EMULATOR_HOST) is not None
|
RecoBTag/Combined/python/candidateChargeBTagComputer_cfi.py | ckamtsikis/cmssw | 852 | 11125230 | import FWCore.ParameterSet.Config as cms
candidateChargeBTagComputer = cms.ESProducer("CandidateChargeBTagESProducer",
useCondDB = cms.bool(False),
gbrForestLabel = cms.string(""),
weightFile = cms.FileInPath('RecoBTag/Combined/data/ChargeBTag_4sep_2016.weights.xml.gz'),
useAdaBoost = cms.bool(True),
jetChargeExp = cms.double(0.8),
svChargeExp = cms.double(0.5)
)
|
bcs-ui/backend/bcs_web/apis/authentication.py | laodiu/bk-bcs | 599 | 11125231 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from rest_framework.authentication import BaseAuthentication
from backend.utils.authentication import JWTClient, JWTUser
from backend.utils.whitelist import is_app_open_api_trusted
from .constants import APIGW_JWT_KEY_NAME, BCS_APP_APIGW_PUBLIC_KEY, USERNAME_KEY_NAME
class JWTAuthentication(BaseAuthentication):
def authenticate(self, request):
client = JWTClient(request.META.get(APIGW_JWT_KEY_NAME, ""))
if not client.is_valid(BCS_APP_APIGW_PUBLIC_KEY):
return None
username = client.user.username
if not username and is_app_open_api_trusted(client.app.app_code):
username = request.META.get(USERNAME_KEY_NAME, "")
user = JWTUser(username=username)
user.client = client
return (user, None)
|
tests/unit/output/test_row_formatter.py | tttgm/basketball_reference_web_scraper | 325 | 11125232 | <reponame>tttgm/basketball_reference_web_scraper<filename>tests/unit/output/test_row_formatter.py
from unittest import TestCase
from basketball_reference_web_scraper.data import Team, Location, Outcome, Position
from basketball_reference_web_scraper.output.fields import format_value
class TestRowFormatter(TestCase):
def test_team_enum_value(self):
self.assertEqual(format_value(Team.BOSTON_CELTICS), "BOSTON CELTICS")
def test_location_enum_value(self):
self.assertEqual(format_value(Location.HOME), "HOME")
def test_outcome_enum_value(self):
self.assertEqual(format_value(Outcome.LOSS), "LOSS")
def test_empty_array(self):
self.assertEqual(format_value([]), "")
def test_empty_set(self):
self.assertEqual(format_value(set()), "")
def test_position_enum_value(self):
self.assertEqual(format_value(Position.POINT_GUARD), "POINT GUARD")
def test_positions_array_with_single_position(self):
self.assertEqual(format_value([Position.POINT_GUARD]), "POINT GUARD")
def test_positions_array_with_multiple_positions(self):
self.assertEqual(format_value([Position.POINT_GUARD, Position.SHOOTING_GUARD]), "POINT GUARD-SHOOTING GUARD")
def test_positions_set_with_single_position(self):
self.assertEqual(format_value({Position.POINT_GUARD}), "POINT GUARD")
def test_string_value(self):
self.assertEqual(format_value("jaebaebae"), "jaebaebae")
|
lib/pare/pare/models/backbone/utils.py | YuliangXiu/ICON | 486 | 11125248 | <reponame>YuliangXiu/ICON
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
def get_backbone_info(backbone):
info = {
'resnet18': {
'n_output_channels': 512,
'downsample_rate': 4
},
'resnet34': {
'n_output_channels': 512,
'downsample_rate': 4
},
'resnet50': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'resnet50_adf_dropout': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'resnet50_dropout': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'resnet101': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'resnet152': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'resnext50_32x4d': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'resnext101_32x8d': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'wide_resnet50_2': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'wide_resnet101_2': {
'n_output_channels': 2048,
'downsample_rate': 4
},
'mobilenet_v2': {
'n_output_channels': 1280,
'downsample_rate': 4
},
'hrnet_w32': {
'n_output_channels': 480,
'downsample_rate': 4
},
'hrnet_w48': {
'n_output_channels': 720,
'downsample_rate': 4
},
# 'hrnet_w64': {'n_output_channels': 2048, 'downsample_rate': 4},
'dla34': {
'n_output_channels': 512,
'downsample_rate': 4
},
}
return info[backbone]
|
checkov/terraform/checks/data/__init__.py | cclauss/checkov | 4,013 | 11125306 | from checkov.terraform.checks.data.aws import *
|
itchatmp/views/crypto.py | yf-ftd/itchatmp | 1,504 | 11125340 | import os, logging, traceback
import hashlib, struct
from base64 import b64decode, b64encode
try:
from Crypto.Cipher import AES
except ImportError:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
def aes_encode(key, data):
cryptor = Cipher(algorithms.AES(key), modes.CBC(key[:16]),
backend=default_backend()).encryptor()
return b64encode(cryptor.update(data) + cryptor.finalize())
def aes_decode(key, data):
cryptor = Cipher(algorithms.AES(key), modes.CBC(key[:16]),
backend=default_backend()).decryptor()
return cryptor.update(b64decode(data)) + cryptor.finalize()
else:
def aes_encode(key, data):
cryptor = AES.new(key, AES.MODE_CBC, key[:16])
return b64encode(cryptor.encrypt(data))
def aes_decode(key, data):
cryptor = AES.new(key, AES.MODE_CBC, key[:16])
return cryptor.decrypt(b64decode(data))
from itchatmp.content import ENCRYPT
from itchatmp.views import deconstruct_msg, construct_msg
logger = logging.getLogger('itchatmp')
def decrypt_msg(timestamp, nonce, signature, config, msgDict):
''' decrypt msg from wechat, use AES_CBC decryption
return a dict contains encrypted information
if decrypt failed, will return an empty dict
pass {'echostr': ECHOSTR} into msgDict to decrypt Cop mp oauth
'''
if 'echostr' in msgDict:
msgDict['Encrypt'] = msgDict['echostr']
elif msgDict.get('MsgType') != ENCRYPT:
return msgDict
try:
text = aes_decode(config._encodingAesKey, msgDict['Encrypt'])
text = text[16:-(text[-1] if isinstance(text[-1], int) else ord(text[-1]))]
xmlLen = struct.unpack('>I', text[:4])[0]
xmlContent = text[4:xmlLen + 4].decode('utf8')
fromAppid = text[xmlLen + 4:].decode('utf8')
except:
logger.debug(traceback.format_exc())
return {}
# Check appId
if fromAppid not in (config.appId, config.copId):
logger.debug('A message from wrong appid is filtered when decrypt: %s' % fromAppid)
return {}
if 'echostr' in msgDict:
return {'echostr': xmlContent}
else:
return deconstruct_msg(xmlContent)
def encrypt_msg(timestamp, nonce, signature, config, replyDict):
''' encrypt msg for sending to wechat
* use AES_CBC encryption
* return a string ready for sending
* as in construct_msg, string in replyDict should be unicode
'''
text = construct_msg(replyDict).encode('utf8')
text = os.urandom(16) + struct.pack('>I', len(text)) +\
text + config.appId.encode('utf8')
paddingAmount = 32 - (len(text) % 32)
text += chr(paddingAmount).encode('utf8') * paddingAmount
text = aes_encode(config._encodingAesKey, text)
# Encrypt generated
s = [i.encode('utf8') for i in (timestamp, nonce, config.token)]
s += [text]; s.sort(); s = b''.join(s)
# Signature generated
return construct_msg({
'FromUserName': replyDict['FromUserName'],
'ToUserName': replyDict['ToUserName'],
'MsgType': ENCRYPT,
'Encrypt': text.decode('utf8'),
'MsgSignature': hashlib.sha1(s).hexdigest(),
'TimeStamp': timestamp,
'Nonce': nonce,
}, )
def oauth(timestamp, nonce, signature, token, echostr=None):
''' determine whether signature of request is right
* both get and post functions need oauth
* msgs we generate for sending don't match this fn
* for Cop mp, we need echostr as well
'''
s = [timestamp, nonce, token]
if echostr is not None: s.append(echostr)
s.sort(); s = ''.join(s).encode('utf8')
return hashlib.sha1(s).hexdigest() == signature
|
pythonFiles/tests/unittestadapter/conftest.py | Kelvin-Gamer/vscode-python | 1,476 | 11125376 | <reponame>Kelvin-Gamer/vscode-python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import sys
# Ignore the contents of this folder for Python 2 tests.
if sys.version_info[0] < 3:
collect_ignore_glob = ["*.py"]
|
.test-infra/jenkins/metrics_report/dashboards_parser.py | eyal0/beam | 5,279 | 11125382 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import unittest
class Dashboard:
def __init__(self, file):
self.file = file
self.uid, self.queries = self.get_dashboard_uid_and_queries(file)
self.regexes = set(
self.parse_query_to_regex(query) for query in self.queries)
@staticmethod
def get_dashboard_uid_and_queries(file):
queries = []
with open(file, "r") as f:
data = json.load(f)
uid = data.get("uid")
for panel in data.get("panels", []):
for target in panel.get("targets", []):
query = target.get("query")
queries.append(query)
return uid, queries
@staticmethod
def parse_query_to_regex(query):
select_pattern = r"(.*FROM\s)(.*)(\sWHERE.*)"
match = re.match(select_pattern, query)
if match:
from_ = match.group(2)
without_quotes = re.sub(r"\"", "", from_)
without_retention_policy = without_quotes
if re.match(r"(\w+.\.)(.*)", without_quotes):
without_retention_policy = re.sub(r"(\w+.)(.*)", r"\2", without_quotes)
replaced_parameters = re.sub(
r"\$\{\w+\}", r"[\\w\\d]*", without_retention_policy)
return replaced_parameters
@staticmethod
def _get_json_files_from_directory(directory):
return [
os.path.join(directory, i) for i in os.listdir(directory)
if i.endswith(".json")
]
@classmethod
def get_dashboards_from_directory(cls, directory):
for file in cls._get_json_files_from_directory(directory):
yield cls(file)
def guess_dashboard_by_measurement(
measurement, directory, additional_query_substrings=None):
"""
Guesses dashboard by measurement name by parsing queries and matching it with measurement.
It is done by using regular expressions obtained from queries.
Additionally query can be checked for presence of any of the substrings.
"""
dashboards = list(Dashboard.get_dashboards_from_directory(directory))
ret = []
for dashboard in dashboards:
for regex in dashboard.regexes:
if additional_query_substrings and not any(
substring.lower() in query.lower()
for substring in additional_query_substrings
for query in dashboard.queries):
continue
if regex and re.match(regex, measurement):
ret.append(dashboard)
return list(set(ret))
class TestParseQueryToRegex(unittest.TestCase):
def test_parse_query_to_regex_1(self):
query = (
'SELECT "runtimeMs" FROM "forever"."nexmark_${ID}_${processingType}" WHERE '
'"runner" =~ /^$runner$/ AND $timeFilter GROUP BY "runner"')
expected = r"nexmark_[\w\d]*_[\w\d]*"
result = Dashboard.parse_query_to_regex(query)
self.assertEqual(expected, result)
def test_parse_query_to_regex_2(self):
query = (
'SELECT mean("value") FROM "python_bqio_read_10GB_results" WHERE "metric" '
'=~ /runtime/ AND $timeFilter GROUP BY time($__interval), "metric"')
expected = "python_bqio_read_10GB_results"
result = Dashboard.parse_query_to_regex(query)
self.assertEqual(expected, result)
def test_parse_query_to_regex_3(self):
query = (
'SELECT mean("value") FROM "${sdk}_${processingType}_cogbk_3" WHERE '
'"metric" =~ /runtime/ AND $timeFilter GROUP BY time($__interval), "metric"'
)
expected = "[\w\d]*_[\w\d]*_cogbk_3"
result = Dashboard.parse_query_to_regex(query)
self.assertEqual(expected, result)
|
qmlt/numerical/plot.py | stjordanis/QMLT | 117 | 11125413 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments
"""
Plotting functions
========================================================
**Module name:** :mod:`qmlt.numerical.plot`
.. currentmodule:: qmlt.numerical.plot
.. codeauthor:: <NAME> <<EMAIL>>
This module contains the functions required to plot the parameters
of the numeric learner.
These are auxillary functions, it is recommended you instead use the
plotting method available in the numeric learner, which will provide
live plots of the training progress and monitored parameters. This can be
turned on by passing the ``plot`` key to the hyperparameters dictionary.
For example,
>>> hyperparams = {'circuit': circuit,
'log_every': 10,
'plot': True
}
Here, the integer value of ``log_every`` specifies at how many global steps
the live plots should be updated. When the training is complete, the terminal
will show the message
.. code-block:: console
Training complete. Close the live plot window to exit.
To use auxillary plotting functions on a logfile:
>>> from qmlt.numerical import plot
>>> plot.plot_parameter(numerical, y='loss')
You can also chain together plots by passing through the returned
axes, to display multiple parameters on one plot:
>>> ax = plot.plot_parameter(numerical, y='loss')
>>> ax = plot.plot_parameter(numerical, y='cost', ax=ax)
>>> ax = plot.plot_parameter(numerical, y='regul', ax=ax,
... legend=True, save_filename="test.png")
Finally, you can also automatically plot all parameters against the global
step, on one figure as multiple subplots:
>>> plot.plot_all("numerical/logsNUM/log.csv")
Plotting functions
------------------
.. autosummary::
plot_parameter
plot_all
Auxillary functions
-------------------
.. autosummary::
_squareish
_plot
Code details
------------
"""
import os
from itertools import zip_longest
import numpy as np
try:
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
from matplotlib import pyplot as plt
except:
raise ImportError("To use the plotting functions, matplotlib must be installed")
def _squareish(n):
"""Factors an integer to two integers that closesly approximates a square
Args:
n (int): integer to factor.
Returns:
tuple(int, int): the squareish integers.
"""
if n == 1:
return (1, 1)
if n == 2:
return (2, 1)
nsqrt = np.ceil(np.sqrt(n))
solution = False
x = int(nsqrt)
while not solution:
y = int(n/x)
if y * x == float(n):
solution = True
else:
x -= 1
if n > 1:
if x == 1 or y == 1:
x = 3
y = int(np.ceil(n/3))
return x, y
def _plot(x, y, ax=None, xlabel=None, ylabel=None, **plot_kw):
r"""Produces a line plot visualizing settings, training progress, or monitored parameters.
Args:
x (array): the data to plot on the x-axis.
y (array): the data to plot on the y-axis.
xlabel (str): the x-axis label.
ylabel (str): the y-axis label.
**plot_kw: additional keyword arguments to be passed to ``plt.plot``.
Returns:
axes: returns a tuple containing the figure and axes.
"""
if ax is None:
ax = plt.gca()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_title(ylabel)
ax.plot(x, y, **plot_kw)
return ax
def plot_parameter(logfile, x='global_step', y='loss', save_filename=None,
ax=None, legend=False, style='ggplot', legend_kw=None,
fig_kw=None, savefig_kw=None, **plot_kw): # pragma: no cover
r"""Produces a line plot visualizing settings, training progress, or monitored parameters.
Args:
logfile (str): the location of the logfile containing the training progress
and parameter values for each global step.
x (str): the parameter to plot on the x-axis. By default the global step.
y (str): the parameter to plot on the y-axis. By default the loss.
save_filename (str): string containing the output image filename, including
all relevant paths and file extensions. All image filetypes supported
by matplotlib are supported here. By default, the plot is *not* saved.
ax (matplotlib.axes.Axes): a matplotlib axes object. If none is provided,
this is created automatically.
legend (bool): If True, a legend is added containing the y parameter names.
style (str): a supported matplotlib style sheet. To see the available
styles on your system, please refer to the output of
``matplotlib.pyplot.style.available``.
legend_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.pyplot.legend``.
fig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure``.
savefig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure.savefig``.
**plot_kw: additional keyword arguments to be passed to ``matplotlib.pyplot.plot``.
Returns:
matplotlib.axes.Axes: returns the plotting axes.
"""
# pragma: no cover
if fig_kw is None:
fig_kw = {'figsize': (12, 8)}
if savefig_kw is None:
savefig_kw = {}
if legend_kw is None:
legend_kw = {}
data = np.genfromtxt(logfile, dtype=float, delimiter=',', names=True)
params = data.dtype.names
if x not in params:
raise ValueError("The x-axis parameter {} does not exist.".format(x))
if y not in params:
raise ValueError("The y-axis parameter {} does not exist.".format(y))
plt.style.use(style)
if ax is None:
_, ax = plt.subplots(1, 1, squeeze=True, **fig_kw)
if legend:
ax = _plot(data[x], data[y], label=y, ylabel="", ax=ax, **plot_kw)
plt.legend()
else:
ax = _plot(data[x], data[y], label=y, ylabel=y, xlabel=x, ax=ax, **plot_kw)
if save_filename is not None:
plt.savefig(save_filename, **savefig_kw)
return ax
def plot_all(logfile, x='global_step', y=None, save_filename=None,
figax=None, style='ggplot', fig_kw=None, savefig_kw=None, **plot_kw): # pragma: no cover
r"""Produces a figure containing line plots visualizing settings,
training progress, or monitored parameters.
Args:
filename (str): string containing the output image filename, including
all relevant paths and file extensions. All image filetypes supported
by matplotlib are supported here.
logfile (str): the location of the logfile containing the training progress
and parameter values for each global step.
x (str): the parameter to plot on the x-axes. By default the global step.
y Sequence[str]: the parameters to plot on the figure. By default, all will be plotted.
save_filename (str): string containing the output image filename, including
all relevant paths and file extensions. All image filetypes supported
by matplotlib are supported here. By default, the plot is *not* saved.
figax (tuple): a tuple containing the figure and the plotting axes. Created
by default if not provided.
style (str): a supported matplotlib style sheet. To see the available
styles on your system, please refer to the output of
``matplotlib.pyplot.style.available``.
fig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure``.
savefig_kw (dict): dictionary of additional matplotlib keyword arguments
to apss to ``matplotlib.figure.Figure.savefig``.
**plot_kw: additional keyword arguments to be passed to ``matplotlib.pyplot.plot``.
Returns:
tuple: returns a tuple containing the figure and the plotting axes.
"""
if fig_kw is None:
fig_kw = {'figsize': (12, 8)}
if savefig_kw is None:
savefig_kw = {}
data = np.genfromtxt(logfile, dtype=float, delimiter=',', names=True)
params = data.dtype.names
if x not in params:
raise ValueError("The x-axis parameter {} does not exist.".format(x))
xdata = data[x]
if y is None:
ydata = [data[p] for p in params if p != x]
ylabels = [p for p in params if p != x]
else:
try:
ydata = [data[p] for p in y]
except ValueError:
raise ValueError("parameter name does not exist in logfile.")
ylabels = y
rows, cols = _squareish(len(ydata))
plt.style.use(style)
if figax is None:
fig, ax = plt.subplots(rows, cols, sharex=True, sharey=False, **fig_kw)
else:
fig, ax = figax
for idx, (yd, yl, a) in enumerate(zip_longest(ydata, ylabels, ax.ravel())):
# get 2D grid location
loc = np.array(np.unravel_index([idx], (rows, cols))).flatten()
# only label x-axis if on the bottom row
if yd is not None:
if loc[0] == rows - 1:
a = _plot(xdata, yd, xlabel=x, ylabel=yl, ax=a, **plot_kw)
else:
a = _plot(xdata, yd, ylabel=yl, ax=a, **plot_kw)
else:
a.axis('off')
plt.tight_layout()
if save_filename is not None:
fig.savefig(save_filename, **savefig_kw)
return fig, ax
|
lldb/test/API/tools/lldb-server/TestGdbRemoteSingleStep.py | mkinsner/llvm | 2,338 | 11125430 | import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows # No pty support to test any inferior std -i/e/o
@skipIf(triple='^mips')
def test_single_step_only_steps_one_instruction_with_s(self):
self.build()
self.set_inferior_startup_launch()
self.single_step_only_steps_one_instruction(
use_Hc_packet=True, step_instruction="s")
|
utilities/diskimage_unittesting/tests/empty_directory_test.py | timgates42/pymacadmin | 112 | 11125493 | <filename>utilities/diskimage_unittesting/tests/empty_directory_test.py
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure directories that are supposed to be empty actually are."""
__author__ = '<EMAIL> (<NAME>)'
import os
import macdmgtest
class TestEmptyDirectories(macdmgtest.DMGUnitTest):
def setUp(self):
self.empty_directories = ['var/vm',
'/private/tmp',
'Volumes',
'Library/Logs']
def DirectoryEmpty(self, dirname):
"""Make sure dirname is empty."""
path = self.PathOnDMG(dirname)
if os.listdir(path):
return False
else:
return True
def testEmptyDirectories(self):
"""Ensure every directory that is supposed to be empty on the image, is."""
full_dirs = []
for d in self.empty_directories:
if not self.DirectoryEmpty(d):
full_dirs.append(d)
self.assertEqual(len(full_dirs), 0)
if __name__ == '__main__':
macdmgtest.main()
|
myia/operations/prim_partial.py | strint/myia | 222 | 11125500 | <reponame>strint/myia
"""Definitions for the primitive `partial`."""
from ..lib import (
AbstractFunction,
PartialApplication,
Possibilities,
standard_prim,
)
from . import primitives as P
def pyimpl_partial(f, *args):
"""Implement `partial`."""
def res(*others):
return f(*(args + others))
return res
@standard_prim(P.partial)
async def infer_partial(self, engine, fn, *args):
"""Infer the return type of primitive `partial`."""
fns = await fn.get()
assert isinstance(fns, Possibilities)
return AbstractFunction(*[PartialApplication(fn, list(args)) for fn in fns])
__operation_defaults__ = {
"name": "partial",
"registered_name": "partial",
"mapping": P.partial,
"python_implementation": pyimpl_partial,
}
__primitive_defaults__ = {
"name": "partial",
"registered_name": "partial",
"type": "backend",
"python_implementation": pyimpl_partial,
"inferrer_constructor": infer_partial,
"grad_transform": None,
}
|
tests/param/string_to_boolean_test.py | Timothyyung/bravado-core | 122 | 11125590 | # -*- coding: utf-8 -*-
import pytest
from bravado_core.param import string_to_boolean
def test_boolean_true_is_true_or_1():
assert string_to_boolean('true')
assert string_to_boolean('tRUe')
assert string_to_boolean('1')
def test_boolean_false_is_false_or_0():
assert not string_to_boolean('false')
assert not string_to_boolean('faLSe')
assert not string_to_boolean('0')
def test_boolean_cast_failure_raises_value_error():
with pytest.raises(ValueError):
string_to_boolean('PIZZA')
|
DiffAugment-biggan-imagenet/compare_gan/metrics/fid_score_test.py | Rian-T/data-efficient-gans | 1,902 | 11125610 | <reponame>Rian-T/data-efficient-gans
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the FID score."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.metrics import fid_score as fid_score_lib
import numpy as np
import tensorflow as tf
class FIDScoreTest(tf.test.TestCase):
def test_fid_computation(self):
real_data = np.ones((100, 2))
real_data[:50, 0] = 2
gen_data = np.ones((100, 2)) * 9
gen_data[50:, 0] = 2
# mean(real_data) = [1.5, 1]
# Cov(real_data) = [[ 0.2525, 0], [0, 0]]
# mean(gen_data) = [5.5, 9]
# Cov(gen_data) = [[12.37, 0], [0, 0]]
result = fid_score_lib.compute_fid_from_activations(real_data, gen_data)
self.assertNear(result, 89.091, 1e-4)
if __name__ == "__main__":
tf.test.main()
|
test/manualAreplTests/envVarTest.py | manuth/LiveCode | 203 | 11125619 | <gh_stars>100-1000
from arepl_dump import dump
import os
path=os.environ['Path']
test = os.environ['test']
try:
shouldFail = os.environ['aofijaef']
except KeyError:
print('bad key failed as expected')
|
apps/point_cloud/image_common/camera_calibration_parsers/src/camera_calibration_parsers/__init__.py | ramonidea/prl_wireless_perception | 742 | 11125628 | from camera_calibration_parsers.camera_calibration_parsers_wrapper import __readCalibrationWrapper
from sensor_msgs.msg import CameraInfo
def readCalibration(file_name):
"""Read .ini or .yaml calibration file and return (camera name and cameraInfo message).
@param file_name: camera calibration file name
@type file_name: str
@return: (camera name, cameraInfo message) or None on Error
@rtype: tuple(str, sensor_msgs.msg.CameraInfo | None
"""
ret, cn, ci = __readCalibrationWrapper(file_name)
if not ret:
return None
c = CameraInfo()
c.deserialize(ci)
return cn, c
|
vscode/wsclient.py | TTitcombe/vscode-ext | 140 | 11125648 | import json
import uuid
import socket
import asyncio
import websockets
class WSClient:
"""
This class manages the websocket connection.
"""
BASE_URI = "ws://localhost:"
def __init__(self, extension, port: int = None) -> None:
self.extension = extension
self.port = port
self.ws = None
self.responses = {}
@property
def uri(self) -> str:
return self.BASE_URI + str(self.port)
def run_webserver(self):
if self.port is None:
self.port = self.get_free_port()
async def webserver():
async with websockets.serve(self.handler, "localhost", self.port):
print(f"Listening on {self.uri}", flush=True) # js will read this
await asyncio.Future() # run forever
asyncio.run(webserver())
def get_free_port(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
_, port = sock.getsockname()
sock.close()
return port
async def handler(self, websocket, path):
self.ws = websocket
while True:
try:
message = await websocket.recv()
except websockets.ConnectionClosedOK:
break
data = json.loads(message)
await self.extension.parse_ws_data(data)
async def run_code(self, code, wait_for_response=True, thenable=True):
if wait_for_response:
uid = str(uuid.uuid4())
payload = {"type": 2 if thenable else 3, "code": code, "uuid": uid}
await self.ws.send(json.dumps(payload))
return await self.wait_for_response(uid)
else:
return await self.ws.send(json.dumps({"type": 1, "code": code}))
async def wait_for_response(self, uid):
while not uid in self.responses:
await asyncio.sleep(0.1)
return self.responses.pop(uid)
|
scout/commands/update/genes.py | Clinical-Genomics/scout | 111 | 11125684 | <reponame>Clinical-Genomics/scout
#!/usr/bin/env python
# encoding: utf-8
"""
update/genes.py
Build a file with genes that are based on hgnc format.
Parses ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt,
ftp.broadinstitute.org/pub/ExAC_release/release0.3/functional_gene_constraint/
and a biomart dump from ensembl with
'Gene ID' 'Chromosome' 'Gene Start' 'Gene End' 'HGNC symbol'
The hgnc file will determine which genes that are added and most of the meta information.
The ensembl gene file will add coordinates and the exac file will add pLi scores.
Created by <NAME> on 2015-01-14.
Copyright (c) 2015 __MoonsoInc__. All rights reserved.
"""
import logging
import os
import tempfile
import click
from flask.cli import current_app, with_appcontext
from scout.commands.download.ensembl import ensembl as ensembl_cmd
from scout.commands.download.exac import exac as exac_cmd
from scout.commands.download.hgnc import hgnc as hgnc_cmd
from scout.commands.download.hpo import hpo as hpo_cmd
from scout.commands.download.omim import omim as omim_cmd
from scout.constants import UPDATE_GENES_RESOURCES
from scout.load import load_hgnc_genes, load_transcripts
from scout.server.extensions import store
from scout.utils.handle import get_file_handle
LOG = logging.getLogger(__name__)
def download_resources(download_dir, api_key, builds):
"""Download necessary files to update gene definitions in a temporary directory
Args:
download_dir(str): path to downloaded resources. Provided by user in the cli command
api_key(str): API key for downloading OMIM resources
builds(list): a list containing both genome builds or one genome build ['37', '38']
"""
ctx = click.get_current_context()
if not api_key:
LOG.warning("No OMIM API key provided. Please note that some information will be missing.")
else:
# Download OMIM files
ctx.invoke(omim_cmd, out_dir=download_dir, api_key=api_key)
# Download HPO definitions
ctx.invoke(hpo_cmd, out_dir=download_dir)
# Download Exac genes
ctx.invoke(exac_cmd, out_dir=download_dir)
# Download HGNC genes
ctx.invoke(hgnc_cmd, out_dir=download_dir)
# Download Ensembl genes
for build in builds:
ctx.invoke(
ensembl_cmd,
out_dir=download_dir,
skip_tx=False,
exons=False,
build=build,
)
def fetch_downloaded_resources(resources, downloads_folder, builds):
"""Checks that a resource file exists on disk and has valid data. Return its content as a list of lines
Args:
resources(dict): Dictionary containing resource files' lines
downloads_folder(str): Path to downloaded resources. Provided by user in the cli command
builds(list): a list containing both genome builds or one genome build ['37', '38']
"""
for resname, filenames in UPDATE_GENES_RESOURCES.items():
for filename in filenames:
resource_path = os.path.join(downloads_folder, filename)
resource_exists = os.path.isfile(resource_path)
if resource_exists:
resources[resname] = get_file_handle(resource_path).readlines()
# If the resource is manadatory make sure it exists and contains data (OMIM data is NOT mandatory)
if resname in ["hpo_genes", "hgnc_lines", "exac_lines"] and not resources.get(resname):
LOG.error(f"Missing resource {resname} in downloads path.")
raise click.Abort()
# Check that the available genes and transcripts file correspond to the required genome build
for build in builds:
if resname.endswith(build) and resources.get(resname) is False:
LOG.error(
f"Updating genes for genome build '{build}' requires a resource '{resname}' that is currenly missing in provided path."
)
raise click.Abort()
# Check that resource lines contain actual data
if resname not in resources:
continue
if "<!DOCTYPE html>" in resources[resname][0] or "<!DOCTYPE html>" in resources[resname][1]:
LOG.error(f"Resource file '{resname}' doesn't contain valid data.")
raise click.Abort()
@click.command("genes", short_help="Update all genes")
@click.option(
"--build",
type=click.Choice(["37", "38"]),
help="What genome build should be used. If no choice update 37 and 38.",
)
@click.option(
"-f",
"--downloads_folder",
type=click.Path(exists=True, dir_okay=True, readable=True),
help="specify path to folder where files necessary to update genes are pre-downloaded",
)
@click.option(
"--api-key",
help="Specify the OMIM downloads api key. Only if downloads_folder is not provided",
)
@with_appcontext
def genes(build, downloads_folder, api_key):
"""
Load the hgnc aliases to the mongo database.
"""
LOG.info("Running scout update genes")
adapter = store
builds = [build] if build else ["37", "38"]
api_key = api_key or current_app.config.get("OMIM_API_KEY")
resources = {}
# If required resources are missing, download them to a temporary directory
if downloads_folder is None:
with tempfile.TemporaryDirectory() as tempdir:
try:
download_resources(tempdir, api_key, builds)
except Exception as ex:
LOG.error(ex)
fetch_downloaded_resources(resources, tempdir, builds)
else: # If resources have been previosly downloaded, read those file and return their lines
fetch_downloaded_resources(resources, downloads_folder, builds)
# Load genes and transcripts info
for genome_build in builds:
LOG.warning("Dropping all gene information")
adapter.drop_genes(genome_build)
LOG.warning("Dropping all transcript information")
adapter.drop_transcripts(genome_build)
ensembl_gene_res = (
resources.get("ensembl_genes_37")
if genome_build == "37"
else resources.get("ensembl_genes_38")
) # It will be none if everything needs to be downloaded
# Load the genes
hgnc_genes = load_hgnc_genes(
adapter=adapter,
ensembl_lines=ensembl_gene_res,
hgnc_lines=resources.get("hgnc_lines"),
exac_lines=resources.get("exac_lines"),
mim2gene_lines=resources.get("mim2genes"),
genemap_lines=resources.get("genemap2"),
hpo_lines=resources.get("hpo_genes"),
build=genome_build,
)
ensembl_genes_dict = {}
for gene_obj in hgnc_genes:
ensembl_id = gene_obj["ensembl_id"]
ensembl_genes_dict[ensembl_id] = gene_obj
# Load the transcripts
ensembl_tx_res = (
resources.get("ensembl_transcripts_37")
if genome_build == "37"
else resources.get("ensembl_transcripts_38")
) # It will be none if everything needs to be downloaded
load_transcripts(adapter, ensembl_tx_res, genome_build, ensembl_genes_dict)
LOG.info("Genes and transcripts loaded")
|
test/run/t310.py | timmartin/skulpt | 2,671 | 11125743 | <filename>test/run/t310.py
s = set([2,3,4])
t = set([3,4,5])
u = set([1,3,5])
a = s.intersection(t)
b = u.intersection(s)
c = u.intersection(t)
print a
print b
print c
print a == set([3, 4])
print b == set([3])
print c == set([3, 5])
d = s.intersection(t, u)
print d
print d == set([3])
|
tests/test_static.py | mblack20/flintrock | 615 | 11125751 | <filename>tests/test_static.py
import compileall
import os
import subprocess
# External modules
import yaml
FLINTROCK_ROOT_DIR = (
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))))
TEST_TARGETS = [
'setup.py',
'flintrock/',
'tests/']
TEST_PATHS = [
os.path.join(FLINTROCK_ROOT_DIR, path) for path in TEST_TARGETS]
def test_code_compiles():
for path in TEST_PATHS:
if os.path.isdir(path):
result = compileall.compile_dir(path)
else:
result = compileall.compile_file(path)
# NOTE: This is not publicly documented, but a return of 1 means
# the compilation succeeded.
# See: http://bugs.python.org/issue25768
assert result == 1
def test_flake8():
ret = subprocess.call(['flake8'], cwd=FLINTROCK_ROOT_DIR)
assert ret == 0
def test_config_template_is_valid():
config_template = os.path.join(FLINTROCK_ROOT_DIR, 'flintrock', 'config.yaml.template')
with open(config_template) as f:
yaml.safe_load(f)
|
examples/tensorboard/text.py | dwolfschlaeger/guildai | 694 | 11125786 | """Ref: https://www.tensorflow.org/api_docs/python/tf/summary/text
"""
import argparse
import tensorflow as tf
p = argparse.ArgumentParser()
p.add_argument("--logdir", default="/tmp/text_demo")
args = p.parse_args()
print("Writing logs to %s" % args.logdir)
writer = tf.summary.create_file_writer(args.logdir)
with writer.as_default():
tf.summary.text("color", tf.convert_to_tensor("blue"), 10)
tf.summary.text(
"text",
tf.convert_to_tensor(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed venenatis, "
"nisl in lacinia placerat, tortor neque blandit erat, ut auctor dui dolor "
"quis elit."
),
10,
)
tf.summary.text("color", tf.convert_to_tensor("green"), 20)
tf.summary.text(
"text",
tf.convert_to_tensor(
"Vestibulum a tellus accumsan, posuere tellus sed, volutpat elit. Ut "
"venenatis in massa ac scelerisque. Ut in risus ut turpis facilisis "
"maximus."
),
20,
)
tf.summary.text("color", tf.convert_to_tensor("red"), 30)
tf.summary.text(
"text",
tf.convert_to_tensor(
"Aenean malesuada risus non laoreet efficitur. Maecenas sit amet laoreet "
"sem, ac eleifend augue. Suspendisse potenti. Morbi euismod tempor gravida."
),
30,
)
writer.flush()
|
examples/detection_file_conversions/scripts/correct_frame_ids_in_viame_csv.py | Kitware/VAIME | 127 | 11125844 | <filename>examples/detection_file_conversions/scripts/correct_frame_ids_in_viame_csv.py
#!/usr/bin/env python
import argparse
import csv
import os
import sys
F_IMAGE_NAME = 1
F_FRAME_NUMBER = 2
field_types = [
int,
str,
int,
float,
float,
float,
float,
float,
float,
]
#------------------------------------------------------------------------------
def warn(msg, *args, **kwargs):
sys.stderr.write('Warning: ' + msg.format(*args, **kwargs) + '\n')
#------------------------------------------------------------------------------
def read_image_list(path):
images = {}
i = 0
with open(path, 'rt') as f:
for l in f:
images[os.path.basename(l.strip())] = i
i += 1
return images
#------------------------------------------------------------------------------
def read_records(path):
records = []
with open(path, 'rt') as f:
reader = csv.reader(f)
for row in reader:
try:
record = [t(v) for t, v in zip(field_types, row)]
classifiers = row[len(field_types):]
while len(classifiers) >= 2:
record += [classifiers[0], float(classifiers[1])]
if len(classifiers):
warn('ignoring unpaired classification {!r}',
classifiers[0])
records.append(record)
except ValueError:
warn('ignoring row {!r} with malformatted field(s)', row)
return records
#------------------------------------------------------------------------------
def write_records(records, out_file):
writer = csv.writer(out_file, quoting=csv.QUOTE_NONNUMERIC)
map(writer.writerow, records)
#------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='Fix bad frame numbers in a NOAA CSV file '
'using an image list')
output_group = parser.add_mutually_exclusive_group()
output_group.add_argument('-o', '--output', type=str, metavar='OUTPUT',
help='Output CSV file')
output_group.add_argument('-i', '--in-place', action='store_true',
help='Rewrite CSV file in place')
parser.add_argument('images', metavar='IMAGES', type=str,
help='Input image list')
parser.add_argument('csv', metavar='INPUT', type=str,
help='Input CSV file')
args = parser.parse_args()
# Read input files
images = read_image_list(args.images)
records = read_records(args.csv)
# Fix record frame numbers using image names and image list
for i in range(len(records)):
f = records[i][F_IMAGE_NAME]
if f in images:
records[i][F_FRAME_NUMBER] = images[f]
else:
warn('no match for image name {!r}: frame number not updated', f)
# Write output
if args.in_place:
out = open(args.csv, 'wt')
elif args.output is not None:
out = open(args.output, 'wt')
else:
out = sys.stdout
write_records(records, out)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if __name__ == '__main__':
main()
|
vilya/models/actions/__init__.py | mubashshirjamal/code | 1,582 | 11125862 | <reponame>mubashshirjamal/code
# -*- coding: utf-8 -*-
migrate_type_name = {
'code_review': 'pull_comment',
# 'commit': 'pull_commit',
}
# 迁移数据
def migrate_notif_data(data, receiver):
from vilya.models.notification import Notification
from vilya.models.actions.commit_comment import migrate_commit_comment
from vilya.models.actions.issue import migrate_issue
from vilya.models.actions.issue_comment import migrate_issue_comment
from vilya.models.actions.pull import migrate_pull_request
from vilya.models.actions.pull_comment import migrate_pull_comment
from vilya.models.actions.recommend import migrate_recommend
from vilya.models.actions.team_add_member import migrate_team_add_member
type_migrate_dict = {
'commit_comment': migrate_commit_comment,
'issue': migrate_issue,
'issue_comment': migrate_issue_comment,
'pull_request': migrate_pull_request,
'pull_comment': migrate_pull_comment,
# 'pull_commit': migrate_pull_commit,
'recommend': migrate_recommend,
'team_add_member': migrate_team_add_member,
}
assert data.get('uid') is not None
assert data.get('type') is not None
uid = data['uid']
action_type = data['type']
if action_type in migrate_type_name:
Notification.set_data(receiver, 'type', migrate_type_name[action_type],
uid)
action_type = migrate_type_name[action_type]
migrate_data = type_migrate_dict[action_type]
for new, old in migrate_data.iteritems():
if data.get(new) is None:
assert type(old) in (str, tuple, list)
value = data[old] if isinstance(old, str) else old[1](data.get(old[0])) # noqa
Notification.set_data(receiver, new, value, uid)
data[new] = value
elif type(old) is list: # force update
value = data[old] if isinstance(old, str) else old[1](data.get(old[0])) # noqa
Notification.set_data(receiver, new, value, uid)
data[new] = value
data['notif_template'] = action_type + '_notif'
data['feed_template'] = action_type + '_feed'
return data
|
examples/torch/common/models/__init__.py | MaximProshin/nncf | 310 | 11125867 | <gh_stars>100-1000
from examples.torch.common.models.segmentation import *
from examples.torch.common.models.classification import *
|
python/examples/z_dual_numbers.py | sgillen/tiny-differentiable-simulator | 862 | 11125912 | <filename>python/examples/z_dual_numbers.py
import pytinydiffsim_dual as pd
def auto_diff(f, x):
return f(pd.TinyDualDouble(x, 1.)).dual()
x = pd.TinyDualDouble(2,1)
y = pd.TinyDualDouble(3,0)
print(x)
print(y)
f = (x*x)*x
print(f)
print("pytinydiffsim.TinyDualDouble")
print(auto_diff(lambda x:pd.TinyDualDouble(1.,0)/(x*x*x*x*x), 0.01))
|
tests/components/guardian/__init__.py | domwillcode/home-assistant | 30,023 | 11125986 | """Tests for the Elexa Guardian integration."""
|
api/environments/identities/traits/constants.py | mevinbabuc/flagsmith | 1,259 | 11126055 | <filename>api/environments/identities/traits/constants.py<gh_stars>1000+
from features.value_types import BOOLEAN, FLOAT, INTEGER, STRING
ACCEPTED_TRAIT_VALUE_TYPES = [INTEGER, STRING, BOOLEAN, FLOAT]
TRAIT_STRING_VALUE_MAX_LENGTH = 2000
|
msticpy/vis/matrix_plot.py | kubajir/msticpy | 820 | 11126066 | <reponame>kubajir/msticpy<gh_stars>100-1000
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Bokeh matrix plot."""
import math
from typing import List, Optional, Union
import attr
from bokeh.io import output_notebook, show, reset_output
from bokeh.plotting import figure
from bokeh.models import (
HoverTool,
ColumnDataSource,
LayoutDOM,
)
import numpy as np
import pandas as pd
from ..common.utility import check_kwargs
from .._version import VERSION
__version__ = VERSION
__author__ = "<NAME>"
@attr.s(auto_attribs=True)
class PlotParams:
"""Plot params for time_duration."""
title: Optional[str] = "Interaction Plot"
x: Optional[str] = None
x_col: Optional[str] = None
y: Optional[str] = None
y_col: Optional[str] = None
intersect: bool = False
height: int = 700
width: int = 900
color: str = "red"
value_col: Optional[str] = None
dist_count: bool = False
log_size: bool = False
invert: bool = False
sort: Optional[Union[str, bool]] = None
sort_x: Optional[Union[str, bool]] = None
sort_y: Optional[Union[str, bool]] = None
hide: bool = False
font_size: Optional[int] = None
max_label_font_size: int = 11
@property
def x_column(self) -> Optional[str]:
"""Return the current x column value."""
return self.x or self.x_col
@property
def y_column(self) -> Optional[str]:
"""Return the current y column value."""
return self.y or self.y_col
@classmethod
def field_list(cls) -> List[str]:
"""Return field names as a list."""
return list(attr.fields_dict(cls).keys())
def plot_matrix(data: pd.DataFrame, **kwargs) -> LayoutDOM:
"""
Plot data as an intersection matrix.
Parameters
----------
data : pd.DataFrame
The data to plot.
x : str
Column to plot on the x (horizontal) axis
x_col : str
Alias for 'x'
y : str
Column to plot on the y (vertical) axis
y_col : str
Alias for 'y'
title : str, optional
Custom title, default is 'Intersection plot'
value_col : str, optional
Column from the DataFrame used to size the intersection points.
dist_count : bool, optional
Calculates a count of distinct values (from `value_col`) and uses
this to size the intersection points.
Requires `value_col` to be specified.
log_size : bool, optional
Takes the log of the size value before calculating the intersection
display point size.
Can be combined with `invert`.
invert : bool, optional
Takes the inverse of the size value as the basis for calculating
the intersection display point size. This is useful for highlighting
rare interactions.
Can be combined with `log_size`.
intersect : bool, optional
Plots points of a fixed size, rather than using a sizing value. This
is useful for just showing the presence/absence of an interaction.
height : int, optional
The plot height. Default is 700
width : int
The plot width. Default is 900
color : str
The color of the plotted points, default is "red"
sort : Union[str, bool], optional
Sorts the labels of both axes, default is None.
Acceptable values are:
'asc' (or string starting with 'asc') - Sort ascending
'desc' (or string starting with 'asc') - Sort descending
False or None (no sort)
True - Sort ascending
sort_x : str, optional
Sorts the labels of the x axis (takes precedence over `sort`),
default is None.
Acceptable values are:
'asc' (or string starting with 'asc') - Sort ascending
'desc' (or string starting with 'asc') - Sort descending
False or None (no sort)
True - Sort ascending
sort_y : str, optional
Sorts the labels of the y axis (takes precedence over `sort`),
default is None.
Acceptable values are:
'asc' (or string starting with 'asc') - Sort ascending
'desc' (or string starting with 'asc') - Sort descending
False or None (no sort)
True - Sort ascending
hide : bool, optional
Creates and returns but does not display the plot, default
is False.
font_size : int, optional
Manually specify the font size for axis labels, in points,
the default is to automatically calculate a size based on the
number of items in each axis.
max_label_font_size : int, optional
The maximum size, in points, of the X and Y labels, default is 11.
Returns
-------
LayoutDOM
The Bokeh plot
"""
# Process/extract parameters
check_kwargs(kwargs, PlotParams.field_list())
param = PlotParams(**kwargs)
if not param.x_column or not param.y_column:
raise ValueError("Must supply `x` and `y` column parameters.")
reset_output()
output_notebook()
plot_data = _prep_data(data, param)
x_range = _sort_labels(plot_data, param.x_column, param.sort_x or param.sort)
y_range = _sort_labels(
plot_data, param.y_column, param.sort_y or param.sort, invert=True
)
# Rescale the size so that it matches the graph
max_size = plot_data["size"].max()
plot_data["plt_size"] = plot_data["size"] * 10 / max_size
source = ColumnDataSource(data=plot_data)
plot = figure(
title=param.title,
plot_width=param.width,
plot_height=param.height,
x_range=x_range,
y_range=y_range,
tools=["wheel_zoom", "box_zoom", "pan", "reset", "save"],
toolbar_location="above",
)
tool_tips = [
(param.x_column, f"@{param.x_column}"),
(param.y_column, f"@{param.y_column}"),
("value", "@size"),
]
plot.add_tools(HoverTool(tooltips=tool_tips))
if param.intersect:
plot.circle_cross(
x=param.x_column,
y=param.y_column,
source=source,
fill_alpha=0.6,
line_color=param.color,
size=5,
)
else:
plot.circle(
x=param.x_column,
y=param.y_column,
source=source,
fill_alpha=0.6,
fill_color=param.color,
size="plt_size",
)
_set_plot_params(plot)
# Calculate appropriate font size for labels
x_label_pt_size = param.font_size or max(
5,
min(
param.max_label_font_size,
int(param.width * 0.6 / plot_data[param.x_column].nunique()),
),
)
y_label_pt_size = param.font_size or max(
5,
min(
param.max_label_font_size,
int(param.height * 0.6 / plot_data[param.y_column].nunique()),
),
)
plot.xaxis.major_label_text_font_size = f"{x_label_pt_size}pt"
plot.yaxis.major_label_text_font_size = f"{y_label_pt_size}pt"
plot.xaxis.axis_label = param.x_column
plot.yaxis.axis_label = param.y_column
if not param.hide:
show(plot)
return plot
def _set_plot_params(plot):
plot.title.text_font_size = "15pt"
plot.outline_line_color = None
plot.grid.grid_line_color = "navy"
plot.grid.grid_line_alpha = 0.1
plot.axis.axis_line_color = None
plot.axis.major_tick_line_color = None
plot.xaxis.major_label_orientation = math.pi / 2
plot.xgrid.visible = True
plot.ygrid.visible = True
plot.axis.major_label_standoff = 0
def _sort_labels(data, column, sort_arg, invert=False):
"""Sort the labels if required."""
if sort_arg:
if isinstance(sort_arg, str):
sort_order = sort_arg.casefold().startswith("asc")
else:
sort_order = True
sort_order = not sort_order if invert else sort_order
return data[column].sort_values(ascending=sort_order).unique().tolist()
return data[column].unique().tolist()
def _prep_data(data: pd.DataFrame, param: PlotParams):
"""Process the data to create size column."""
def _size_scale(value_series, log_size, invert):
# local function to scale values
if invert:
# If invert, calculate inverse values on same
# scale as input
max_size = value_series.max()
# min_size = value_series.min()
value_series = (max_size) / value_series
if log_size:
# calc log of values, if requested
return np.log(value_series)
return value_series
if param.value_col is None:
# calculate a count of rows in each group
other_cols = list(set(data.columns) - set([param.x_column, param.y_column]))
if other_cols:
count_col = other_cols[0]
else:
count_col = data.index.name or "index"
data = data.reset_index()
count_rows_df = (
data[[param.x_column, param.y_column, count_col]]
.groupby([param.x_column, param.y_column])
.count()
.rename(columns={count_col: "row_count"})
.reset_index()
)
return count_rows_df.assign(
size=_size_scale(count_rows_df.row_count, param.log_size, param.invert)
)
# if value column was specified, use that
if param.dist_count:
# If distinct count of values required, get nunique
tmp_df = (
data[[param.x_column, param.y_column, param.value_col]]
.groupby([param.x_column, param.y_column])
.nunique()
.reset_index()
)
else:
tmp_df = (
data[[param.x_column, param.y_column, param.value_col]]
.groupby([param.x_column, param.y_column])
.sum()
.reset_index()
)
return tmp_df.assign(
size=lambda x: _size_scale(
tmp_df[param.value_col], param.log_size, param.invert
)
)
|
modules/rfblock.py | ZAKAUDD/LightNet | 737 | 11126110 | <reponame>ZAKAUDD/LightNet<gh_stars>100-1000
import time
import torch
import torch.nn as nn
from modules import InPlaceABN
from collections import OrderedDict
from torch.autograd import Variable
class RFBlock(nn.Module):
def __init__(self, in_chs, out_chs, scale=0.1, feat_res=(56, 112), aspp_sec=(12, 24, 36),
up_ratio=2, norm_act=InPlaceABN):
super(RFBlock, self).__init__()
self.scale = scale
self.down_chs = nn.Sequential(OrderedDict([("norm_act", norm_act(in_chs)),
("down_conv1x1", nn.Conv2d(in_chs, out_chs,
kernel_size=1, stride=1,
padding=0, bias=False))]))
self.gave_pool = nn.Sequential(OrderedDict([("norm_act", norm_act(out_chs)),
("gavg", nn.AdaptiveAvgPool2d((1, 1))),
("conv1_0", nn.Conv2d(out_chs, out_chs,
kernel_size=1, stride=1, padding=0,
groups=1, bias=False, dilation=1)),
("up0", nn.Upsample(size=feat_res, mode='bilinear'))]))
self.branch0 = nn.Sequential(OrderedDict([("norm_act", norm_act(out_chs)),
("conv1x1", nn.Conv2d(out_chs, out_chs,
kernel_size=1, stride=1,
padding=0, bias=False)),
("norm_act", norm_act(out_chs)),
("aconv1", nn.Conv2d(out_chs, out_chs,
kernel_size=3, stride=1,
padding=1, dilation=1,
bias=False))]))
self.branch1 = nn.Sequential(OrderedDict([("norm_act", norm_act(out_chs)),
("conv1x3", nn.Conv2d(out_chs, (out_chs // 2) * 3,
kernel_size=(1, 3), stride=1,
padding=(0, 1), bias=False)),
("norm_act", norm_act((out_chs // 2) * 3)),
("conv3x1", nn.Conv2d((out_chs // 2) * 3, out_chs,
kernel_size=(3, 1), stride=1,
padding=(1, 0), bias=False)),
("norm_act", norm_act(out_chs)),
("aconv3", nn.Conv2d(out_chs, out_chs,
kernel_size=3, stride=1,
padding=aspp_sec[0],
dilation=aspp_sec[0],
bias=False))]))
self.branch2 = nn.Sequential(OrderedDict([("norm_act", norm_act(out_chs)),
("conv1x5", nn.Conv2d(out_chs, (out_chs // 2) * 3,
kernel_size=(1, 5), stride=1,
padding=(0, 2), bias=False)),
("norm_act", norm_act((out_chs // 2) * 3)),
("conv5x1", nn.Conv2d((out_chs // 2) * 3, out_chs,
kernel_size=(5, 1), stride=1,
padding=(2, 0), bias=False)),
("norm_act", norm_act(out_chs)),
("aconv5", nn.Conv2d(out_chs, out_chs,
kernel_size=3, stride=1,
padding=aspp_sec[1],
dilation=aspp_sec[1],
bias=False))]))
self.branch3 = nn.Sequential(OrderedDict([("norm_act", norm_act(out_chs)),
("conv1x7", nn.Conv2d(out_chs, (out_chs // 2) * 3,
kernel_size=(1, 7), stride=1,
padding=(0, 3), bias=False)),
("norm_act", norm_act((out_chs // 2) * 3)),
("conv7x1", nn.Conv2d((out_chs // 2) * 3, out_chs,
kernel_size=(7, 1), stride=1,
padding=(3, 0), bias=False)),
("norm_act", norm_act(out_chs)),
("aconv7", nn.Conv2d(out_chs, out_chs,
kernel_size=3, stride=1,
padding=aspp_sec[2],
dilation=aspp_sec[2],
bias=False))]))
self.conv_linear = nn.Sequential(OrderedDict([("conv1x1_linear", nn.Conv2d(out_chs * 5, out_chs,
kernel_size=1, stride=1,
padding=0, bias=False))]))
self.upsampling = nn.Upsample(size=(int(feat_res[0] * up_ratio),
int(feat_res[1] * up_ratio)),
mode='bilinear')
def forward(self, x):
down = self.down_chs(x)
out = torch.cat([self.gave_pool(down.clone()),
self.branch0(down.clone()),
self.branch1(down.clone()),
self.branch2(down.clone()),
self.branch3(down.clone())], dim=1)
return self.upsampling(torch.add(self.conv_linear(out), self.scale, down)) # out=input+value×other
if __name__ == "__main__":
from functools import partial
from modules import InPlaceABNWrapper
input_chs = 712
output_chs = 256
feat_maps = Variable(torch.randn(1, input_chs, 32, 32).cuda())
rfblocka = RFBlock(in_chs=input_chs, out_chs=output_chs,
scale=0.1, feat_res=(32, 32),
norm_act=partial(InPlaceABNWrapper, activation="leaky_relu", slope=0.1)).cuda()
start_time = time.time()
_ = rfblocka(feat_maps)
end_time = time.time()
print("RFBlock: {}s".format(end_time - start_time))
|
pingo/arduino/test_util_firmata.py | pingo-io/pingo-py | 116 | 11126142 | import unittest
from util_firmata import pin_list_to_board_dict
class FirmataCapabilityDetect(unittest.TestCase):
def test_capability_response(self):
test_layout = {
'digital': (0, 1),
'analog': (0,), # Analog are numbered from zero
'pwm': (1,),
'i2c': (2,),
'disabled': (0,),
}
# Eg: (127)
unavailible_pin = [
0x7F, # END_SYSEX (Pin delimiter)
]
# Eg: (0, 1, 1, 1, 3, 8, 4, 14, 127)
digital_pin = [
0x00, # INPUT
0x01,
0x01, # OUTPUT
0x01,
0x03, # PWM
0x08,
0x7F, # END_SYSEX (Pin delimiter)
]
# Eg. (0, 1, 1, 1, 4, 14, 127)
analog_pin = [
0x00, # INPUT
0x01,
0x01, # OUTPUT
0x01,
0x02, # ANALOG
0x0A,
0x06, # I2C
0x01,
0x7F, # END_SYSEX (Pin delimiter)
]
data_arduino = list(
# [0x6C] # CAPABILITY_RESPONSE
unavailible_pin
+ digital_pin
+ analog_pin
)
pinmap = pin_list_to_board_dict(data_arduino)
for key in test_layout.keys():
self.assertEqual(pinmap[key], test_layout[key])
if __name__ == '__main__':
unittest.main()
|
netpyne/tutorials/netpyne_tut0.py | adamjhn/netpyne | 120 | 11126144 | <reponame>adamjhn/netpyne
"""
Install NetPyNE tutorials
"""
import os
os.system("mkdir netpyne_tuts && cd netpyne_tuts && export PATH=/bin:/usr/bin && python3 -m venv env && source env/bin/activate && python3 -m pip install --upgrade pip && python3 -m pip install --upgrade ipython && python3 -m pip install --upgrade ipykernel && python3 -m pip install --upgrade jupyter && ipython kernel install --user --name=env && python3 -m pip install --upgrade neuron && git clone https://github.com/Neurosim-lab/netpyne.git && python3 -m pip install -e netpyne && cp -r netpyne/netpyne/tutorials . && cd tutorials && jupyter notebook")
|
src/algorithms/__init__.py | LaudateCorpus1/hermes-5 | 135 | 11126162 | import cf
import content_based
import content_based_kmeans
import performance_metrics
import recommender_helpers
import simple_hybrid |
tests/test_pandas.py | python-pipe/hellp | 123 | 11126194 | import pandas as pd
from sspipe import p, px
def test_px_slice():
df = pd.DataFrame(dict(x=[1, 2, 0], y=[3, 4, 5]))
assert (df | px[(px.x > 1) & (px.x < px.y)].y.sum()) == 4
def test_dataframe():
df = {'x': [0, 1, 2], 'y': [3, 4, 5]} | p(pd.DataFrame)
assert df.shape == (3, 2)
df = [{'x': 0, 'y': 3}, {'x': 1, 'y': 4}, {'x': 2, 'y': 5}] | p(pd.DataFrame)
assert df.shape == (3, 2)
def test_loc_tuple():
df = (
{'x': [0, 1, 2], 'y': [3, 4, 5]}
| p(pd.DataFrame)
| px.loc[px.x > 1, ['y']]
)
assert pd.DataFrame({'y': [5]}, index=[2]).equals(df)
|
applications/CoSimulationApplication/python_scripts/factories/convergence_criterion_factory.py | lkusch/Kratos | 778 | 11126200 | <gh_stars>100-1000
from KratosMultiphysics.CoSimulationApplication.factories import base_factory
def CreateConvergenceCriterion(convergence_criterion_settings, *args):
"""This function creates and returns the Convergence Criterion used for CoSimulation"""
return base_factory.Create(convergence_criterion_settings, [*args], "KratosMultiphysics.CoSimulationApplication.convergence_criteria")
|
Python/control/system_id.py | hpbader42/Klampt | 238 | 11126208 | <reponame>hpbader42/Klampt
from online_leastsq import OnlineLeastSquares
import numpy as np
import math
class LinearSystemID:
"""System identification for a system y = Ax + Bu + C with m state
variables and n inputs."""
def __init__(self,m,n):
self.m,self.n = m,n
self.coeffPattern = [None,None,None]
self.estimators = [OnlineLeastSquares(self.m+self.n+1) for i in xrange(m)]
def setPattern(self,Apattern,Bpattern,Cpattern):
"""The patterns are list-of-lists of size mxm, mxn, and a list of
size m, which indicate whether the (i,j)'th entry of the A,B, and C
matrices are fixed (respectively). A None entry indicates a free
coefficient, while a numeric entry indicates a fixed coefficient.
"""
self.coeffPattern = [Apattern,Bpattern,Cpattern]
for i in xrange(self.m):
self._updateEstimatorSize(i)
def fixA(self,i,j,value):
"""Sets the i,j'th entry of the A matrix to a fixed value"""
if self.coeffPattern[0] == None:
m,n=self.m,self.n
self.coeffPattern[0] = [[None]*m for i in xrange(m)]
self.coeffPattern[0][i][j]=value
self._updateEstimatorSize(i)
def fixB(self,i,j,value):
"""Sets the i,j'th entry of the B matrix to a fixed value"""
if self.coeffPattern[1] == None:
m,n=self.m,self.n
self.coeffPattern = [[None]*n for i in xrange(m)]
self.coeffPattern[1][i][j]=value
self._updateEstimatorSize(i)
def fixC(self,i,value):
"""Sets the i'th entry of the C vector to a fixed value"""
if self.coeffPattern[2] == None:
m,n=self.m,self.n
self.coeffPattern[2] = [None]*m
self.coeffPattern[2][i]=value
self._updateEstimatorSize(i)
def add(self,x,u,y,weight=1.0):
"""Adds a new datapoint to the estimator"""
assert(len(y)==self.m)
assert(len(x)==self.m)
assert(len(u)==self.n)
if isinstance(x,np.ndarray): x = x.tolist()
if isinstance(u,np.ndarray): u = u.tolist()
xu1 = x + u + [1.0]
if self.coeffPattern == [None,None,None]:
for yi,e in zip(y,self.estimators):
e.add(xu1,yi,weight)
else:
#each row might have some fixed values
for i,(yi,e) in enumerate(zip(y,self.estimators)):
if e == None: continue
(xuc,constOffset) = self._toEstimator(i,x,u)
rhs = yi - constOffset
e.add(xuc,rhs,weight)
return
def discount(self,discountFactor,type='geometric'):
"""Reduces the effects of prior readings."""
for e in self.estimators:
e.discount(discountFactor,type)
return
def setModelPrior(self,A,B,C,priorWeight):
"""Adds in a prior belief for the model.
Must be called AFTER fixing coefficients and BEFORE adding any
datapoints."""
Cpattern = self.coeffPattern[2]
for i in xrange(self.m):
ai = A[i,:].tolist()
bi = B[i,:].tolist()
(xuc,constant) = self._toEstimator(i,ai,bi)
if Cpattern == None or Cpattern[i] == None:
xuc[-1] = C[i]
self.estimators[i].setPrior(np.array(xuc),priorWeight)
return
def getModel(self):
"""Returns the estimated triple (A,B,C) as numpy arrays"""
m,n = self.m,self.n
A = np.zeros((m,m))
B = np.zeros((m,n))
C = np.zeros(m)
Apattern,Bpattern,Cpattern = self.coeffPattern
for i,e in enumerate(self.estimators):
aofs = 0
bofs = m
cofs = m+n
if Apattern==None:
ai = e.x[aofs:m+aofs]
else:
bofs=aofs
ai = []
for j,pj in enumerate(Apattern[i]):
if pj == None:
ai.append(e.x[bofs])
bofs += 1
else:
ai.append(pj)
if Bpattern==None:
bi = e.x[bofs:n+bofs]
else:
cofs=bofs
bi = []
for j,pj in enumerate(Bpattern[i]):
if pj == None:
bi.append(e.x[cofs])
cofs += 1
else:
bi.append(pj)
if Cpattern==None:
ci = e.x[cofs]
cofs+=1
else:
if Cpattern[i] == None:
ci = e.x[cofs]
cofs+=1
else:
ci = Cpattern[i]
assert(cofs == e.n)
assert len(ai)==m
assert len(bi)==n
A[i,:] = ai
B[i,:] = bi
C[i] = ci
return (A,B,C)
def getOutput(self,x,u):
"""Returns the estimate A*x+B*u+c"""
assert(len(x)==self.m)
assert(len(u)==self.n)
if isinstance(x,np.ndarray): x = x.tolist()
if isinstance(u,np.ndarray): u = u.tolist()
dx = []
if self.coeffPattern == [None,None,None]:
xuc = np.array(x + u + [1.0])
for e in self.estimators:
dx.append(np.dot(e.x,xuc))
else:
for i,e in enumerate(self.estimators):
(xuc,constOffset) = self._toEstimator(i,x,u)
dx.append(np.dot(e.x,xuc)+constOffset)
return dx
def _updateEstimatorSize(self,index):
"""Helper."""
Apattern,Bpattern,Cpattern = self.coeffPattern
m,n = self.m,self.n
numFixed = 0
if Apattern!=None:
numFixed += len(v for v in Apattern[index] if v != None)
if Bpattern!=None:
numFixed += len(v for v in Bpattern[index] if v != None)
if Cpattern!=None:
if Cpattern[index]!=None:
numFixed += 1
if numFixed==m+n+1:
self.estimators[index]=None
else:
self.estimators[index]=OnlineLeastSquares(m+n+1-numFixed)
return
def _toEstimator(self,index,x,u):
"""Helper: Projects x,u to the pattern xe taken by the index'th
estimator. Returns the pair (xe,constant offset) where the index'th
row of Ax+Bu+C is equal to dot(xe,estimator.coeffs) + constOffset."""
Apattern,Bpattern,Cpattern = self.coeffPattern
xuc = []
constOffset = 0
if Apattern == None:
xuc += x
else:
xuc += [xj for (xj,pj) in zip(x,Apattern[index]) if pj == None]
constOffset += sum([xj*pj for (xj,pj) in zip(x,Apattern[index]) if pj != None])
if Bpattern == None:
xuc += u
else:
xuc += [uj for (uj,pj) in zip(u,Bpattern[index]) if pj == None]
constOffset += sum([uj*pj for (uj,pj) in zip(u,Bpattern[index]) if pj != None])
if Cpattern == None:
xuc += [1.0]
else:
constOffset = Cpattern[index]
return (xuc,constOffset)
def testLinearSystemID(A,B,C,N,xScale=1,uScale=1):
m,n = B.shape
sysid = LinearSystemID(m,n)
print "Actual model:",
print "A:"
print A
print "B:"
print B
print "C:"
print C
X = [np.random.rand(m)*xScale*2-np.ones(m)*xScale for i in range(N)]
U = [np.random.rand(n)*uScale-np.ones(n)*uScale for i in range(N)]
for x,u in zip(X,U):
y = np.dot(A,x)+np.dot(B,u)+C
sysid.add(x,u,y)
print "Estimated model:",
eA,eB,eC=sysid.getModel()
print "A:"
print eA
print "B:"
print eB
print "C:"
print eC
resid = 0.0
for x,u in zip(X,U):
y = np.dot(A,x)+np.dot(B,u)+C
ey = np.dot(eA,x)+np.dot(eB,u)+C
#print "Error",np.dot(ey,ey)
resid += np.dot(ey-y,ey-y)
print "RMSE",math.sqrt(resid/len(X))
def testNonlinearSystemID(func,m,n,N,discountParams,x0,uScale=1):
sysid = LinearSystemID(m,n)
sysid.add(x0,np.zeros(n),x0)
if not hasattr(discountParams,'__iter__'):
discountParams = (discountParams,'geometric')
#simulate a random trace
X = [x0]
U = []
for i in xrange(N):
u = np.random.rand(n)*uScale*2.0 - np.ones(n)*uScale
U.append(u)
X.append(func(X[-1],u))
resid = 0.0
discresid = 0.0
errors = []
for x,u,xn in zip(X[:-1],U,X[1:]):
xpred = sysid.getOutput(x,u)
e2 = np.dot(xn-xpred,xn-xpred)
errors.append(e2)
resid += e2
discount = discountParams[0] if discountParams[1]=='geometric' else (1.0/discountParams[0]-1)/sysid.estimators[0].sumWeight
discresid = discresid*discount + e2
sysid.discount(*discountParams)
sysid.add(x,u,xn)
print "Nonlinear estimation with discount",discountParams
print "Final estimated model:",
eA,eB,eC=sysid.getModel()
print "A:"
print eA
print "B:"
print eB
print "C:"
print eC
print "RMSE",math.sqrt(resid/(len(X)-1))
print "Discounted squared errors",discresid
#print "Errors",errors
if __name__=='__main__':
print "Testing standard double integrator with offset"""
mass = 5.0
f = 1.0
A = np.array([[0,1],[0,0]])
B = np.array([[0],[1/mass]])
C = np.array([0,f/mass])
testLinearSystemID(A,B,C,100)
def ABCFunc(x,u):
dt = 0.1
return x + dt*(np.dot(A,x)+np.dot(B,u)+C)
x0 = np.array([0,0])
"""
testNonlinearSystemID(ABCFunc,2,1,100,1.0,x0)
testNonlinearSystemID(ABCFunc,2,1,100,0.95,x0)
testNonlinearSystemID(ABCFunc,2,1,100,0.9,x0)
testNonlinearSystemID(ABCFunc,2,1,100,0.8,x0)
exit(1)
"""
print "Testing damped pendulum with gravity"""
def pendulumFunc(x,u):
theta,dtheta = x
torque = u[0]
g = -9.8
dt = 0.1
mass = 1
kD = 0.5
ddtheta = torque/mass + math.sin(theta)*g - kD*dtheta
return np.array([theta+dt*dtheta,dtheta+dt*ddtheta])
x0 = np.array([math.pi*0.5,0])
testNonlinearSystemID(pendulumFunc,2,1,1000,1.0,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,0.99,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,0.95,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,(0.01,'hyperbolic'),x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,(0.05,'hyperbolic'),x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,(0.1,'hyperbolic'),x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,(0.2,'hyperbolic'),x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,(0.5,'hyperbolic'),x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,(0.75,'hyperbolic'),x0)
"""
testNonlinearSystemID(pendulumFunc,2,1,1000,0.9,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,0.8,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,0.7,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,0.5,x0)
testNonlinearSystemID(pendulumFunc,2,1,1000,0.25,x0)
"""
print "Actual:"
print "[q' ] = [ 1, 0.1 ]*[q ] + [0 ]*u + [0 ]"
print "[dq'] [ 0, 1 ] [dq] [0.1] [-.98 sin(q) - 0.05 dq]"
|
conans/server/rest/controller/v2/conan.py | matthiasng/conan | 6,205 | 11126211 | <reponame>matthiasng/conan<filename>conans/server/rest/controller/v2/conan.py
from bottle import request
from conans.errors import NotFoundException
from conans.model.ref import ConanFileReference
from conans.server.rest.bottle_routes import BottleRoutes
from conans.server.rest.controller.v2 import get_package_ref
from conans.server.service.v2.service_v2 import ConanServiceV2
class ConanControllerV2(object):
@staticmethod
def attach_to(app):
conan_service = ConanServiceV2(app.authorizer, app.server_store)
r = BottleRoutes()
@app.route(r.package_revision_files, method=["GET"])
def get_package_file_list(name, version, username, channel, package_id, auth_user,
revision, p_revision):
pref = get_package_ref(name, version, username, channel, package_id,
revision, p_revision)
ret = conan_service.get_package_file_list(pref, auth_user)
return ret
@app.route(r.package_revision_file, method=["GET"])
def get_package_file(name, version, username, channel, package_id, the_path, auth_user,
revision, p_revision):
pref = get_package_ref(name, version, username, channel, package_id,
revision, p_revision)
file_generator = conan_service.get_package_file(pref, the_path, auth_user)
return file_generator
@app.route(r.package_revision_file, method=["PUT"])
def upload_package_file(name, version, username, channel, package_id,
the_path, auth_user, revision, p_revision):
if "X-Checksum-Deploy" in request.headers:
raise NotFoundException("Non checksum storage")
pref = get_package_ref(name, version, username, channel, package_id,
revision, p_revision)
conan_service.upload_package_file(request.body, request.headers, pref,
the_path, auth_user)
@app.route(r.recipe_revision_files, method=["GET"])
def get_recipe_file_list(name, version, username, channel, auth_user, revision):
ref = ConanFileReference(name, version, username, channel, revision)
ret = conan_service.get_recipe_file_list(ref, auth_user)
return ret
@app.route(r.recipe_revision_file, method=["GET"])
def get_recipe_file(name, version, username, channel, the_path, auth_user, revision):
ref = ConanFileReference(name, version, username, channel, revision)
file_generator = conan_service.get_conanfile_file(ref, the_path, auth_user)
return file_generator
@app.route(r.recipe_revision_file, method=["PUT"])
def upload_recipe_file(name, version, username, channel, the_path, auth_user, revision):
if "X-Checksum-Deploy" in request.headers:
raise NotFoundException("Not a checksum storage")
ref = ConanFileReference(name, version, username, channel, revision)
conan_service.upload_recipe_file(request.body, request.headers, ref, the_path, auth_user)
|
toyplot/style.py | eaton-lab/toyplot | 438 | 11126239 | <gh_stars>100-1000
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
"""Functionality for working with CSS style information."""
import numbers
import numpy
import toyplot.color
def require(css, allowed):
"""Validate that an object is usable as CSS style information.
Parameters
----------
css: dict or None
The style dictionary to be validated. An exception will be raised if it is
not a valid style dictionary or None.
allowed: sequence of strings
The set of allowed style properties. An exception will be raised if `css`
contains any keys that aren't in this sequence.
Returns
-------
style: dict
The validated style dictionary.
"""
if css is None:
return css
if not isinstance(css, dict):
raise ValueError("Expected a dictionary of CSS styles or None, received %s." % css) # pragma: no cover
for key, value in css.items():
if key not in allowed:
raise ValueError("Not an allowed CSS style: %s. Use one of: %s" % (key, ", ".join(allowed))) # pragma: no cover
if isinstance(value, numpy.ndarray) and value.dtype == toyplot.color.dtype:
css[key] = toyplot.color.to_css(value)
return css
class allowed(object):
"""Defines groups of allowable CSS property names."""
#: Allowable CSS property names for filling areas.
fill = set([
"fill",
"fill-opacity",
"opacity",
"stroke",
"stroke-dasharray",
"stroke-opacity",
"stroke-width",
])
#: Allowable CSS property names for stroking lines.
line = set([
"opacity",
"stroke",
"stroke-dasharray",
"stroke-linecap",
"stroke-opacity",
"stroke-width",
])
#: Allowable CSS property names for :ref:`markers`.
marker = set([
"fill",
"fill-opacity",
"opacity",
"stroke",
"stroke-opacity",
"stroke-width",
])
#: Allowable CSS property names for text.
text = set([
"alignment-baseline",
"baseline-shift",
"fill",
"fill-opacity",
"font-family",
"font-size",
"font-weight",
"line-height",
"opacity",
"stroke",
"stroke-opacity",
"stroke-width",
"text-anchor",
"text-decoration-line",
"text-shadow",
"-toyplot-anchor-shift",
"-toyplot-text-layout-box-visibility",
"-toyplot-text-layout-line-visibility",
"-toyplot-text-layout-visibility",
"-toyplot-vertical-align",
])
def combine(*styles):
"""Combine multiple style specifications into one.
Parameters
----------
styles: sequence of :class:`dict` instances
A collection of dicts containing CSS-compatible name-value pairs.
Returns
-------
styles: :class:`dict` containing CSS-compatible name-value pairs.
"""
computed_style = {}
for style in styles:
if style is not None:
computed_style.update(style)
return computed_style
def parse(css):
"""Parse a CSS style into a dict."""
result = {}
for declaration in css.split(";"):
if declaration:
key, value = declaration.split(":")
result[key] = value.strip()
return result
def _color_fixup(styles):
"""It turns-out that many applications and libraries (Inkscape, Adobe Illustrator, Qt)
don't handle CSS rgba() colors correctly. So convert them to CSS rgb colors and use
fill-opacity / stroke-opacity instead."""
if "fill" in styles:
color = toyplot.color.css(styles["fill"])
if color is not None:
opacity = float(styles.get("fill-opacity", 1.0))
styles["fill"] = "rgb(%.3g%%,%.3g%%,%.3g%%)" % (
color["r"] * 100, color["g"] * 100, color["b"] * 100)
styles["fill-opacity"] = str(color["a"] * opacity)
if "stroke" in styles:
color = toyplot.color.css(styles["stroke"])
if color is not None:
opacity = float(styles.get("stroke-opacity", 1.0))
styles["stroke"] = "rgb(%.3g%%,%.3g%%,%.3g%%)" % (
color["r"] * 100, color["g"] * 100, color["b"] * 100)
styles["stroke-opacity"] = str(color["a"] * opacity)
return styles
def to_css(*styles):
"""Convert one-or-more dicts containing CSS properties into a single CSS string."""
declarations = []
for key, value in sorted( _color_fixup(combine(*styles)).items()):
if isinstance(value, numbers.Number):
value = "{:.6g}".format(value)
declarations.append("%s:%s" % (key, value))
return ";".join(declarations)
|
cacreader/swig-4.0.2/Examples/test-suite/python/python_extranative_runme.py | kyletanyag/LL-Smartcard | 1,031 | 11126316 | import python_extranative
vs = python_extranative.make_vector_string()
if not isinstance(vs, python_extranative.VectorString):
# will be of type tuple if extranative not working
raise RuntimeError("Not of type VectorString")
for s1, s2 in zip(vs, ["one", "two"]):
if s1 != s2:
raise RuntimeError("Mismatch: " + s1 + " " + s2)
|
depricated/smoothline.py | ksachdeva/distfit | 126 | 11126352 | # ----------------------------------------------------
# Name : smoothline.py
# Author : E.Taskesen
# Contact : <EMAIL>
# Licence : MIT
# ----------------------------------------------------
import numpy as np
from scipy.interpolate import make_interp_spline
def smoothline(xs, ys=None, interpol=3, window=1, verbose=3):
"""Smoothing 1D vector.
Description
-----------
Smoothing a 1d vector can be challanging if the number of data is low sampled.
This smoothing function therefore contains two steps. First interpolation of the
input line followed by a convolution.
Parameters
----------
xs : array-like
Data points for the x-axis.
ys : array-like
Data points for the y-axis.
interpol : int, (default : 3)
The interpolation factor. The data is interpolation by a factor n before the smoothing step.
window : int, (default : 1)
Smoothing window that is used to create the convolution and gradually smoothen the line.
verbose : int [1-5], default: 3
Print information to screen. A higher number will print more.
Returns
-------
xnew : array-like
Data points for the x-axis.
ynew : array-like
Data points for the y-axis.
"""
if window is not None:
if verbose>=3: print('[smoothline] >Smoothing by interpolation..')
# Specify number of points to interpolate the data
# Interpolate
extpoints = np.linspace(0, len(xs), len(xs) * interpol)
spl = make_interp_spline(range(0, len(xs)), xs, k=3)
# Compute x-labels
xnew = spl(extpoints)
xnew[window:-window]
# First smoothing on the raw input data
ynew=None
if ys is not None:
ys = _smooth(ys,window)
# Interpolate ys line
spl = make_interp_spline(range(0, len(ys)), ys, k=3)
ynew = spl(extpoints)
ynew[window:-window]
else:
xnew, ynew = xs, ys
return xnew, ynew
def _smooth(X, window):
box = np.ones(window) / window
X_smooth = np.convolve(X, box, mode='same')
return X_smooth
|
lisa/core/__init__.py | jrespeto/LiSa | 244 | 11126409 | """
Core package.
"""
|
GPy/examples/__init__.py | ekalosak/GPy | 1,685 | 11126410 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Introduction
^^^^^^^^^^^^
The examples in this package usually depend on `pods <https://github.com/sods/ods>`_ so make sure
you have that installed before running examples. The easiest way to do this is to run `pip install pods`. `pods` enables access to 3rd party data required for most of the examples.
The examples are executable and self-contained workflows in that they have their own source data, create their own models, kernels and other objects as needed, execute optimisation as required, and display output.
Viewing the source code of each model will clarify the steps taken in its execution, and may provide inspiration for developing of user-specific applications of `GPy`.
"""
from . import classification
from . import regression
from . import dimensionality_reduction
from . import non_gaussian
|
community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/step_by_step_sdk_tf_agents_bandits_movie_recommendation/src/tests/test_policy_util.py | gogasca/vertex-ai-samples | 213 | 11126417 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unit testing module for policy_util."""
import functools
import unittest
from src.training import policy_util
import tensorflow as tf
from tf_agents.bandits.agents import lin_ucb_agent
from tf_agents.bandits.environments import environment_utilities
from tf_agents.bandits.environments import movielens_py_environment
from tf_agents.bandits.metrics import tf_metrics as tf_bandit_metrics
from tf_agents.environments import tf_py_environment
# Paths and configurations
DATA_PATH = "gs://[your-bucket-name]/[your-dataset-dir]/u.data" # FILL IN
ROOT_DIR = "gs://[your-bucket-name]/artifacts" # FILL IN
ARTIFACTS_DIR = "gs://[your-bucket-name]/artifacts" # FILL IN
# Hyperparameters
BATCH_SIZE = 8
# MovieLens simulation environment parameters
RANK_K = 20
NUM_ACTIONS = 20
PER_ARM = False
# Agent parameters
TIKHONOV_WEIGHT = 0.001
AGENT_ALPHA = 10.0
# Metric names
DEFAULT_METRIC_NAMES = frozenset({
"NumberOfEpisodes", "AverageReturnMetric", "AverageEpisodeLengthMetric"})
class TestPolicyUtil(unittest.TestCase):
"""Test class for the policy_util module."""
def setUp(self): # pylint: disable=g-missing-super-call
# Define RL environment.
env = movielens_py_environment.MovieLensPyEnvironment(
DATA_PATH, RANK_K, BATCH_SIZE,
num_movies=NUM_ACTIONS, csv_delimiter="\t")
self.environment = tf_py_environment.TFPyEnvironment(env)
# Define RL agent/algorithm.
self.agent = lin_ucb_agent.LinearUCBAgent(
time_step_spec=self.environment.time_step_spec(),
action_spec=self.environment.action_spec(),
tikhonov_weight=TIKHONOV_WEIGHT,
alpha=AGENT_ALPHA,
dtype=tf.float32,
accepts_per_arm_features=PER_ARM)
# Define RL metric.
optimal_reward_fn = functools.partial(
environment_utilities.compute_optimal_reward_with_movielens_environment,
environment=self.environment)
self.regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn)
def test_run_training_set_invalid_root_dir(self):
"""Invalid root directory for saving training artifacts."""
with self.assertRaises(tf.errors.FailedPreconditionError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir="\0",
artifacts_dir=ARTIFACTS_DIR)
def test_run_training_set_invalid_artifacts_dir(self):
"""Invalid artifacts directory for saving training artifacts."""
with self.assertRaises(tf.errors.FailedPreconditionError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir="\0")
def test_run_training_set_zero_training_loops(self):
"""Training with zero training loops."""
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=0,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
self.assertIsInstance(metric_results, dict)
self.assertFalse(metric_results)
def test_run_training_set_negative_training_loops(self):
"""Training with a negative number of training loops."""
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=-1,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
self.assertIsInstance(metric_results, dict)
self.assertFalse(metric_results)
def test_run_training_set_float_training_loops(self):
"""Training with a floating-point number of training loops."""
with self.assertRaises(TypeError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=0.5,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
def test_run_training_set_zero_steps_per_loop(self):
"""Training with a zero number of steps per training loop."""
with self.assertRaises(tf.errors.InvalidArgumentError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=0,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
def test_run_training_set_negative_steps_per_loop(self):
"""Training with a negative number of steps per training loop."""
with self.assertRaises(tf.errors.InvalidArgumentError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=-1,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
def test_run_training_set_float_steps_per_loop(self):
"""Training with a floating-point number of steps per training loop."""
with self.assertRaises(TypeError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=0.5,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
def test_run_training_set_no_additional_metrics(self):
"""Training with default metrics."""
training_loops = 1
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=training_loops,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
self.assertIsInstance(metric_results, dict)
self.assertEqual(metric_results.keys(), DEFAULT_METRIC_NAMES)
for metric_name in DEFAULT_METRIC_NAMES:
# There are `training_loops` number of intermediate metric values.
self.assertEqual(len(metric_results[metric_name]), training_loops)
def test_run_training_set_additional_metrics(self):
"""Training with an additional metric."""
training_loops = 1
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=training_loops,
steps_per_loop=2,
additional_metrics=[self.regret_metric],
run_hyperparameter_tuning=False,
root_dir=ROOT_DIR,
artifacts_dir=ARTIFACTS_DIR)
self.assertIsInstance(metric_results, dict)
total_metric_names = DEFAULT_METRIC_NAMES.union(
{type(self.regret_metric).__name__})
self.assertEqual(metric_results.keys(), total_metric_names)
for metric_name in total_metric_names:
# There are `training_loops` number of intermediate metric values.
self.assertEqual(len(metric_results[metric_name]), training_loops)
def test_run_hyperparameter_tuning_set_root_dir(self):
"""Setting root directory for hyperparameter tuning.
Hyperparameter tuning doesn't save artifacts to `root_dir`.
"""
with self.assertRaises(ValueError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=True,
root_dir="./")
def test_run_hyperparameter_tuning_set_artifacts_dir(self):
"""Setting artifacts directory for hyperparameter tuning.
Hyperparameter tuning doesn't save artifacts to `artifacts_dir`.
"""
with self.assertRaises(ValueError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=True,
artifacts_dir="./")
def test_run_hyperparameter_tuning_set_zero_training_loops(self):
"""Hyperparameter tuning with zero training loops."""
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=0,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=True)
self.assertIsInstance(metric_results, dict)
self.assertFalse(metric_results)
def test_run_hyperparameter_tuning_set_negative_training_loops(self):
"""Hyperparameter tuning with a negative number of training loops."""
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=-1,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=True)
self.assertIsInstance(metric_results, dict)
self.assertFalse(metric_results)
def test_run_hyperparameter_tuning_set_float_training_loops(self):
"""Hyperparameter tuning with a floating-point number of training loops."""
with self.assertRaises(TypeError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=0.5,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=True)
def test_run_hyperparameter_tuning_set_zero_steps_per_loop(self):
"""Hyperparameter tuning with a zero number of steps per training loop."""
with self.assertRaises(tf.errors.InvalidArgumentError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=0,
additional_metrics=[],
run_hyperparameter_tuning=True)
def test_run_hyperparameter_tuning_set_negative_steps_per_loop(self):
"""Hyperparameter tuning with a negative number of steps per training loop.
"""
with self.assertRaises(tf.errors.InvalidArgumentError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=-1,
additional_metrics=[],
run_hyperparameter_tuning=True)
def test_run_hyperparameter_tuning_set_float_steps_per_loop(self):
"""Hyperparameter tuning with a float number of steps per training loop."""
with self.assertRaises(TypeError):
policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=2,
steps_per_loop=0.5,
additional_metrics=[],
run_hyperparameter_tuning=True)
def test_run_hyperparameter_tuning_set_no_additional_metrics(self):
"""Hyperparameter tuning with default metrics."""
training_loops = 1
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=training_loops,
steps_per_loop=2,
additional_metrics=[],
run_hyperparameter_tuning=True)
self.assertIsInstance(metric_results, dict)
self.assertEqual(metric_results.keys(), DEFAULT_METRIC_NAMES)
for metric_name in DEFAULT_METRIC_NAMES:
# There are `training_loops` number of intermediate metric values.
self.assertEqual(len(metric_results[metric_name]), training_loops)
def test_run_hyperparameter_tuning_set_additional_metrics(self):
"""Hyperparameter tuning with an additional metric."""
training_loops = 1
metric_results = policy_util.train(
agent=self.agent,
environment=self.environment,
training_loops=training_loops,
steps_per_loop=2,
additional_metrics=[self.regret_metric],
run_hyperparameter_tuning=True)
self.assertIsInstance(metric_results, dict)
total_metric_names = DEFAULT_METRIC_NAMES.union(
{type(self.regret_metric).__name__})
self.assertEqual(metric_results.keys(), total_metric_names)
for metric_name in total_metric_names:
# There are `training_loops` number of intermediate metric values.
self.assertEqual(len(metric_results[metric_name]), training_loops)
if __name__ == "__main__":
unittest.main()
|
libheap/frontend/commands/gdb/mstats.py | saidelike/libheap | 498 | 11126420 | <filename>libheap/frontend/commands/gdb/mstats.py<gh_stars>100-1000
from __future__ import print_function
import sys
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
sys.exit()
from libheap.frontend.printutils import print_error
from libheap.frontend.printutils import print_value
from libheap.frontend.printutils import print_header
from libheap.ptmalloc.ptmalloc import ptmalloc
from libheap.ptmalloc.malloc_chunk import malloc_chunk
from libheap.ptmalloc.malloc_state import malloc_state
class mstats(gdb.Command):
"print general malloc stats, adapted from malloc.c mSTATs()"
def __init__(self, debugger=None, version=None):
super(mstats, self).__init__("mstats", gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE)
if debugger is not None:
self.dbg = debugger
else:
print_error("Please specify a debugger")
sys.exit()
self.version = version
def invoke(self, arg, from_tty):
"Specify an optional arena addr: print_mstats main_arena=0x12345"
ptm = ptmalloc(debugger=self.dbg)
if ptm.SIZE_SZ == 0:
ptm.set_globals()
try:
# XXX: add mp_ address guessing via offset without symbols
mp = self.dbg.read_variable("mp_")
if arg.find("main_arena") == -1:
main_arena = self.dbg.read_variable("main_arena")
main_arena_address = self.dbg.format_address(
main_arena.address)
else:
arg = arg.split()
for item in arg:
if item.find("main_arena") != -1:
if len(item) < 12:
print_error("Malformed main_arena parameter")
return
else:
main_arena_address = int(item[11:], 16)
except RuntimeError:
print_error("No frame is currently selected.")
return
except ValueError:
print_error("Debug glibc was not found.")
return
if main_arena_address == 0:
print_error("Invalid main_arena address (0)")
return
in_use_b = mp['mmapped_mem']
system_b = in_use_b
print("Malloc Stats", end="\n\n")
arena = 0
ar_ptr = malloc_state(main_arena_address, debugger=self.dbg,
version=self.version)
while(1):
ptm.mutex_lock(ar_ptr)
# account for top
avail = ptm.chunksize(malloc_chunk(ptm.top(ar_ptr), inuse=True,
read_data=False, debugger=self.dbg))
nblocks = 1
nfastblocks = 0
fastavail = 0
# traverse fastbins
for i in range(ptm.NFASTBINS):
p = ptm.fastbin(ar_ptr, i)
while p != 0:
p = malloc_chunk(p, inuse=False, debugger=self.dbg)
nfastblocks += 1
fastavail += ptm.chunksize(p)
p = p.fd
avail += fastavail
# traverse regular bins
for i in range(1, ptm.NBINS):
b = ptm.bin_at(ar_ptr, i)
first = malloc_chunk(b, inuse=False, debugger=self.dbg)
first = ptm.first(first)
p = malloc_chunk(first, inuse=False, debugger=self.dbg)
while p.address != int(b):
nblocks += 1
avail += ptm.chunksize(p)
p = malloc_chunk(ptm.first(p), inuse=False,
debugger=self.dbg)
print_header("Arena {}:".format(arena), end="\n")
print("{:16} = ".format("system bytes"), end='')
print_value("{}".format(ar_ptr.max_system_mem), end='\n')
print("{:16} = ".format("in use bytes"), end='')
print_value("{}".format(ar_ptr.max_system_mem - avail), end='\n')
system_b += ar_ptr.max_system_mem
in_use_b += (ar_ptr.max_system_mem - avail)
ptm.mutex_unlock(ar_ptr)
if ar_ptr.next == main_arena_address:
break
else:
next_addr = self.dbg.format_address(ar_ptr.next)
ar_ptr = malloc_state(next_addr, debugger=self.dbg,
version=self.version)
arena += 1
print_header("\nTotal (including mmap):", end="\n")
print("{:16} = ".format("system bytes"), end='')
print_value("{}".format(system_b), end='\n')
print("{:16} = ".format("in use bytes"), end='')
print_value("{}".format(in_use_b), end='\n')
# XXX: max_total_mem removed in 2.24
try:
# catch the error before we print anything
val = mp['max_total_mem']
print("{:16} = ".format("max system bytes"), end='')
print_value("{}".format(val), end='\n')
except gdb.error:
pass
print("{:16} = ".format("max mmap regions"), end='')
print_value("{}".format(mp['max_n_mmaps']), end='\n')
print("{:16} = ".format("max mmap bytes"), end='')
print_value("{}".format(mp['max_mmapped_mem']), end='\n')
|
stats/models/Game.py | twist3dimages/nba-sql | 113 | 11126421 | from peewee import (
ForeignKeyField,
IntegerField,
DateField,
Model
)
from . import Team
class Game(Model):
# Primary Key
game_id = IntegerField(primary_key=True)
# Foreign Keys
team_id_home = ForeignKeyField(Team, index=True, null=False, column_name='team_id_home')
team_id_away = ForeignKeyField(Team, index=True, null=False, column_name='team_id_away')
team_id_winner = ForeignKeyField(Team, index=True, null=False, column_name='team_id_winner')
team_id_loser = ForeignKeyField(Team, index=True, null=False, column_name='team_id_loser')
# Indexes
season_id = IntegerField(index=True, null=False)
date = DateField(null=False)
class Meta:
db_table = 'game'
|
strawberry/utils/mixins.py | otakuy/strawberry | 2,062 | 11126442 | <reponame>otakuy/strawberry
from typing import Optional
from strawberry.utils.str_converters import to_camel_case
class GraphQLNameMixin:
python_name: str
graphql_name: Optional[str]
def get_graphql_name(self, auto_camel_case: bool) -> str:
if self.graphql_name is not None:
return self.graphql_name
assert self.python_name
if auto_camel_case:
return to_camel_case(self.python_name)
return self.python_name
|
sqlbeautifier.py | V-Darr/SqlBeautifier | 179 | 11126452 | <reponame>V-Darr/SqlBeautifier
import sublime, sublime_plugin
import sys
import os
sys.path.append(os.path.dirname(__file__))
if sys.version_info >= (3, 0):
import sqlparse3 as sqlparse
else:
import sqlparse2 as sqlparse
# for ST2
settings = sublime.load_settings('SQL Beautifier.sublime-settings')
# for ST3
def plugin_loaded():
global settings
settings = sublime.load_settings('SQL Beautifier.sublime-settings')
class SqlBeautifierCommand(sublime_plugin.TextCommand):
def format_sql(self, raw_sql):
try:
formatted_sql = sqlparse.format(raw_sql,
keyword_case=settings.get("keyword_case"),
identifier_case=settings.get("identifier_case"),
strip_comments=settings.get("strip_comments"),
indent_tabs=settings.get("indent_tabs"),
indent_width=settings.get("indent_width"),
reindent=settings.get("reindent")
)
if self.view.settings().get('ensure_newline_at_eof_on_save'):
formatted_sql += "\n"
return formatted_sql
except Exception as e:
print(e)
return None
def replace_region_with_formatted_sql(self, edit, region):
selected_text = self.view.substr(region)
foramtted_text = self.format_sql(selected_text)
self.view.replace(edit, region, foramtted_text)
def run(self, edit):
window = self.view.window()
view = window.active_view()
for region in self.view.sel():
if region.empty():
selection = sublime.Region(0, self.view.size())
self.replace_region_with_formatted_sql(edit, selection)
self.view.set_syntax_file("Packages/SQL/SQL.tmLanguage")
else:
self.replace_region_with_formatted_sql(edit, region) |
exercises/all-your-base/all_your_base.py | kishankj/python | 1,177 | 11126454 | def rebase(input_base, digits, output_base):
pass
|
web/webViews/dashboard.py | anbo225/docklet | 273 | 11126474 | from flask import session,render_template
from webViews.view import normalView
from webViews.dockletrequest import dockletRequest
class dashboardView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
result = dockletRequest.post_to_all('/cluster/list/')
desc = dockletRequest.getalldesc()
allclusters={}
for master in result:
clusters = result[master].get("clusters")
full_clusters = []
data={}
for cluster in clusters:
data["clustername"] = cluster
single_cluster = {}
single_cluster['name'] = cluster
message = dockletRequest.post("/cluster/info/", data , master.split("@")[0])
if(message):
message = message.get("message")
single_cluster['status'] = message['status']
single_cluster['id'] = message['clusterid']
single_cluster['proxy_public_ip'] = message['proxy_public_ip']
full_clusters.append(single_cluster)
else:
self.error()
allclusters[master] = full_clusters
return self.render(self.template_path, allclusters = allclusters, desc=desc)
#else:
# self.error()
@classmethod
def post(self):
return self.get()
|
src/models/model_config.py | zju-3dv/multi-person3dpose | 391 | 11126477 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
class ModelConfig ( object ):
model_dir = os.path.abspath ( os.path.join ( os.path.dirname ( __file__ ) ) )
root_dir = os.path.abspath ( os.path.join ( model_dir, '..', '..' ) )
datasets_dir = os.path.join ( root_dir, 'datasets' )
shelf_path = os.path.join ( datasets_dir, 'Shelf' )
campus_path = os.path.join ( datasets_dir, 'CampusSeq1' )
ultimatum1_path = os.path.join ( datasets_dir, '160422_ultimatum1', 'vgaImgs' )
shelf_range = range ( 300, 600 )
campus_range = [i for i in range ( 350, 471 )] + [i for i in range ( 650, 751 )]
vga_frame_rate = 25
ultimatum1_range = list ( range ( 17337, 17370 ) ) + list ( range ( 21560, 21660 ) )
joint_num = 17
rerank = False
use_mincut = False
metric = 'geometry mean'
testing_on = 'Shelf'
reprojection_refine = False
refine_threshold = 1
semantic_matching = False
match_SVT = True
dual_stochastic_SVT = False
lambda_SVT = 50
alpha_SVT = 0.5
eta = 1.5
beta = 0.5
use_bundle = False
spectral = True
hybrid = True
def __repr__(self):
if self.semantic_matching:
return f'testing_on: {self.testing_on} eta:{self.eta} metric: {self.metric}'
elif self.match_SVT:
return f'testing_on: {self.testing_on} alpha:{self.alpha_SVT} lambda:{self.lambda_SVT}'
else:
return f'testing_on: {self.testing_on} beta:{self.beta} metric: {self.metric}'
model_cfg = ModelConfig ()
if model_cfg.root_dir not in sys.path:
sys.path.append ( model_cfg.root_dir )
|
corehq/apps/sso/utils/session_helpers.py | akashkj/commcare-hq | 471 | 11126505 | <reponame>akashkj/commcare-hq
import logging
log = logging.getLogger(__name__)
def store_saml_data_in_session(request):
"""
This stores SAML-related authentication data in the request's session
:param request: HttpRequest
"""
request.session['samlUserdata'] = request.saml2_auth.get_attributes()
request.session['samlNameId'] = request.saml2_auth.get_nameid()
request.session['samlNameIdFormat'] = request.saml2_auth.get_nameid_format()
request.session['samlNameIdNameQualifier'] = request.saml2_auth.get_nameid_nq()
request.session['samlNameIdSPNameQualifier'] = request.saml2_auth.get_nameid_spnq()
request.session['samlSessionIndex'] = request.saml2_auth.get_session_index()
def _get_saml_user_data_property(request, prop_slug):
"""
Shortcut for getting samlUserData properties generated by python3-saml.
These can sometimes manifest as lists or strings.
:param request: HttpRequest
:param prop_slug: string (property slug)
:return: string or None
"""
value = request.session.get('samlUserdata', {}).get(prop_slug)
if isinstance(value, list):
# for Azure AD the data is usually returned as a list
value = value[0] if value else ''
return value
def _get_display_name_from_session(request):
"""
This gets the display name from SSO user data stored in the session SAML
data.
:param request: HttpRequest
:return: string or None
"""
return _get_saml_user_data_property(
request,
'http://schemas.microsoft.com/identity/claims/displayname'
)
def get_sso_user_first_name_from_session(request):
"""
This gets the first name from sso user data stored in the session SAML data.
:param request: HttpRequest
:return: string or None
"""
first_name = _get_saml_user_data_property(
request,
'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname'
)
if not first_name:
display_name = _get_display_name_from_session(request)
first_name = display_name.split(' ')[0] if display_name else None
return first_name
def get_sso_user_last_name_from_session(request):
"""
This gets the last name from sso user data stored in the session SAML data.
:param request: HttpRequest
:return: string or None
"""
last_name = _get_saml_user_data_property(
request,
'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname'
)
if not last_name:
display_name = _get_display_name_from_session(request)
display_name_parts = display_name.split(' ') if display_name else []
if len(display_name_parts) > 1:
last_name = ' '.join(display_name_parts[1:])
return last_name
|
tests/activeBlock.py | apple314159/urweb | 837 | 11126520 | import unittest
import base
import time
class Suite(base.Base):
def test_1(self):
"""Test case 1"""
self.start()
alert = self.driver.switch_to.alert
self.assertEqual("Error: May not 'sleep' in main thread of 'code' for <active>", alert.text)
alert.accept()
time.sleep(0.1)
alert = self.driver.switch_to.alert
self.assertEqual("Hi!", alert.text)
alert.accept()
button = self.xpath('span[1]/button')
button.click()
txt = self.body_text()
self.assertEqual("Hi! Click me! Success", txt)
|
inceptor/engine/Filter.py | whitefi/inceptor | 743 | 11126545 | class Filter:
def __init__(self, include: list = None, exclude: list = None, imode="or", emode="and"):
self.include = include if include else []
self.exclude = exclude if exclude else []
self.imode = imode.lower()
self.emode = emode.lower()
def match(self, target):
match_include = match_exclude = True
if self.include and len(self.include) > 0:
if self.imode == "or":
match_include = any([target.find(s) >= 0 for s in self.include])
elif self.imode == "and":
match_include = all([target.find(s) >= 0 for s in self.include])
if self.exclude and len(self.exclude) > 0:
if self.emode == "or":
match_exclude = any([target.find(s) == -1 for s in self.exclude])
elif self.emode == "and":
match_exclude = all([target.find(s) == -1 for s in self.exclude])
return match_include and match_exclude
def to_string(self):
print(f"Include: {','.join(self.include)}, Mode: {self.imode}")
print(f"Exclude: {','.join(self.exclude)}, Mode: {self.emode}")
|
tests/convert_gl2pt_dense.py | naviocean/imgclsmob | 2,649 | 11126608 | <filename>tests/convert_gl2pt_dense.py
import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dense = mx.gluon.nn.Dense(
units=1000,
use_bias=False,
in_units=1024)
def hybrid_forward(self, F, x):
x = self.dense(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.dense = torch.nn.Linear(
in_features=1024,
out_features=1000,
bias=False)
def forward(self, x):
x = self.dense(x)
return x
def main():
success = True
for i in range(10):
w = np.random.randn(1000, 1024).astype(np.float32)
# b = np.random.randn(1000, ).astype(np.float32)
x = np.random.randn(1, 1024).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['dense.weight']._load_init(mx.nd.array(w, ctx), ctx)
# gl_params['dense.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['dense.weight'] = torch.from_numpy(w)
# pt_params['dense.bias'] = torch.from_numpy(b)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
dist = np.sum(np.abs(gl_y - pt_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(pt_y)
y = np.matmul(w.astype(np.float64), x[0].astype(np.float64))
# y = np.dot(w, x[0])
gl_dist = np.sum(np.abs(gl_y - y))
pt_dist = np.sum(np.abs(pt_y - y))
print("i={}, gl_dist={}".format(i, gl_dist))
print("i={}, pt_dist={}".format(i, pt_dist))
if success:
print("All ok.")
if __name__ == '__main__':
main()
|
322 Coin Change.py | ChiFire/legend_LeetCode | 872 | 11126698 | """
You are given coins of different denominations and a total amount of money amount. Write a function to compute the
fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any
combination of the coins, return -1.
Example 1:
coins = [1, 2, 5], amount = 11
return 3 (11 = 5 + 5 + 1)
Example 2:
coins = [2], amount = 3
return -1.
Note:
You may assume that you have an infinite number of each kind of coin.
"""
import sys
__author__ = 'Daniel'
class Solution(object):
def coinChange(self, coins, amount):
"""
DP with early prune
Let F[i] be the fewest number of coins make to i
F[i+k] = min(F[i]+1, \forall k if F[i])
O(NM)
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount == 0:
return 0
F = [sys.maxint for _ in xrange(amount+1)]
for k in coins:
if k < amount+1:
F[k] = 1
for i in xrange(1, amount+1):
if F[i] != sys.maxint:
for k in coins:
if i+k <= amount:
F[i+k] = min(F[i+k], F[i]+1)
return F[amount] if F[amount] != sys.maxint else -1
class SolutionTLE(object):
def coinChange(self, coins, amount):
"""
Let F[i] be the fewest number of coins make to i
F[i] = min(F[i-k]+1, \forall k)
O(NM)
:type coins: List[int]
:type amount: int
:rtype: int
"""
F = [sys.maxint for _ in xrange(amount+1)]
for k in coins:
if k < amount + 1:
F[k] = 1
for i in xrange(1, amount+1):
for k in coins:
if i-k > 0 and F[i-k] != sys.maxint:
F[i] = min(F[i], F[i-k]+1)
return F[amount] if F[amount] != sys.maxint else -1
if __name__ == "__main__":
assert Solution().coinChange([243, 291, 335, 209, 177, 345, 114, 91, 313, 331], 7367) == 23 |
src/genie/libs/parser/iosxe/tests/ShowL2fibBdPort/cli/equal/golden_output2_expected.py | balmasea/genieparser | 204 | 11126735 | expected_output = {
'Et0/1:11': {
'type': 'BD_PORT',
'is_path_list': False,
'port': 'Et0/1:11'
},
'[IR]20011:2.2.2.2': {
'type':'VXLAN_REP',
'is_path_list': True,
'path_list': {
'id': 1190,
'path_count': 1,
'type': 'VXLAN_REP',
'description': '[IR]20011:2.2.2.2'
}
},
'[IR]20011:3.3.3.2': {
'type':'VXLAN_REP',
'is_path_list': True,
'path_list': {
'id': 1183,
'path_count': 1,
'type': 'VXLAN_REP',
'description': '[IR]20011:3.3.3.2'
}
}
} |
tests/test_node_set/test_bitcoind/test_bitcoind_node.py | ryan-lingle/node-launcher | 249 | 11126755 | import pytest
from node_launcher.node_set.lib.node_status import NodeStatus
from node_launcher.node_set.bitcoind.bitcoind_node import BitcoindNode
from node_launcher.node_set.tor.tor_node import TorNode
@pytest.fixture
def tor_node():
return TorNode()
@pytest.fixture
def bitcoind_node():
return BitcoindNode()
class TestBitcoindNode(object):
@pytest.mark.slow
def test_start(self, bitcoind_node: BitcoindNode, tor_node: TorNode, qtbot):
def handle_tor_node_status_change(status):
if status in [NodeStatus.SOFTWARE_DOWNLOADED,
NodeStatus.SOFTWARE_READY]:
bitcoind_node.software.update()
elif status == NodeStatus.SYNCED:
bitcoind_node.tor_synced = True
bitcoind_node.start_process()
tor_node.status.connect(handle_tor_node_status_change)
tor_node.software.update()
def check_synced():
return bitcoind_node.current_status == NodeStatus.SYNCING
qtbot.waitUntil(check_synced, timeout=500000)
bitcoind_node.stop()
def check_stopped():
return bitcoind_node.current_status == NodeStatus.STOPPED
qtbot.waitUntil(check_stopped, timeout=500000)
|
dpc/run/eval_camera_pose.py | kyuhyoung/differentiable-point-clouds | 153 | 11126761 | <reponame>kyuhyoung/differentiable-point-clouds
import startup
import os
import numpy as np
import scipy.io
import tensorflow as tf
from util.simple_dataset import Dataset3D
from util.app_config import config as app_config
from util.quaternion import quaternion_multiply, quaternion_conjugate
from util.camera import quaternion_from_campos
def run_eval():
config = tf.ConfigProto(
device_count={'GPU': 1}
)
cfg = app_config
exp_dir = cfg.checkpoint_dir
num_views = cfg.num_views
g = tf.Graph()
with g.as_default():
quat_inp = tf.placeholder(dtype=tf.float64, shape=[1, 4])
quat_inp_2 = tf.placeholder(dtype=tf.float64, shape=[1, 4])
quat_conj = quaternion_conjugate(quat_inp)
quat_mul = quaternion_multiply(quat_inp, quat_inp_2)
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
save_pred_name = "{}_{}".format(cfg.save_predictions_dir, cfg.eval_split)
save_dir = os.path.join(exp_dir, cfg.save_predictions_dir)
reference_rotation = scipy.io.loadmat("{}/final_reference_rotation.mat".format(exp_dir))["rotation"]
ref_conj_np = sess.run(quat_conj, feed_dict={quat_inp: reference_rotation})
dataset = Dataset3D(cfg)
num_models = dataset.num_samples()
model_names = []
angle_error = np.zeros((num_models, num_views), dtype=np.float64)
for model_idx in range(num_models):
sample = dataset.data[model_idx]
print("{}/{}".format(model_idx, num_models))
print(sample.name)
model_names.append(sample.name)
mat_filename = "{}/{}_pc.mat".format(save_dir, sample.name)
data = scipy.io.loadmat(mat_filename)
all_cameras = data["camera_pose"]
for view_idx in range(num_views):
cam_pos = sample.cam_pos[view_idx, :]
gt_quat_np = quaternion_from_campos(cam_pos)
gt_quat_np = np.expand_dims(gt_quat_np, 0)
pred_quat_np = all_cameras[view_idx, :]
pred_quat_np /= np.linalg.norm(pred_quat_np)
pred_quat_np = np.expand_dims(pred_quat_np, 0)
pred_quat_aligned_np = sess.run(quat_mul, feed_dict={
quat_inp: pred_quat_np,
quat_inp_2: ref_conj_np
})
q1 = gt_quat_np
q2 = pred_quat_aligned_np
q1_conj = sess.run(quat_conj, feed_dict={quat_inp: q1})
q_diff = sess.run(quat_mul, feed_dict={quat_inp: q1_conj, quat_inp_2: q2})
ang_diff = 2 * np.arccos(q_diff[0, 0])
if ang_diff > np.pi:
ang_diff -= 2*np.pi
angle_error[model_idx, view_idx] = np.fabs(ang_diff)
all_errors = np.reshape(angle_error, (-1))
angle_thresh_rad = cfg.pose_accuracy_threshold / 180.0 * np.pi
correct = all_errors < angle_thresh_rad
num_predictions = correct.shape[0]
accuracy = np.count_nonzero(correct) / num_predictions
median_error = np.sort(all_errors)[num_predictions // 2]
median_error = median_error / np.pi * 180
print("accuracy:", accuracy, "median angular error:", median_error)
scipy.io.savemat(os.path.join(exp_dir, "pose_error_{}.mat".format(save_pred_name)),
{"angle_error": angle_error,
"accuracy": accuracy,
"median_error": median_error})
f = open(os.path.join(exp_dir, "pose_error_{}.txt".format(save_pred_name)), "w")
f.write("{} {}\n".format(accuracy, median_error))
f.close()
def main(_):
run_eval()
if __name__ == '__main__':
tf.app.run()
|
pypika/tests/test_drop.py | YiuRULE/pypika | 1,616 | 11126796 | <filename>pypika/tests/test_drop.py<gh_stars>1000+
import unittest
from pypika import (
Columns,
Database,
Query,
Tables,
)
class DropTableTests(unittest.TestCase):
database_xyz = Database("mydb")
new_table, existing_table = Tables("abc", "efg")
foo, bar = Columns(("a", "INT"), ("b", "VARCHAR(100)"))
def test_drop_database(self):
q1 = Query.drop_database(self.database_xyz)
q2 = Query.drop_database(self.database_xyz).if_exists()
self.assertEqual('DROP DATABASE "mydb"', str(q1))
self.assertEqual('DROP DATABASE IF EXISTS "mydb"', str(q2))
def test_drop_table(self):
q1 = Query.drop_table(self.new_table)
q2 = Query.drop_table(self.new_table).if_exists()
self.assertEqual('DROP TABLE "abc"', str(q1))
self.assertEqual('DROP TABLE IF EXISTS "abc"', str(q2))
def test_drop_user(self):
q1 = Query.drop_user("myuser")
q2 = Query.drop_user("myuser").if_exists()
self.assertEqual('DROP USER "myuser"', str(q1))
self.assertEqual('DROP USER IF EXISTS "myuser"', str(q2))
def test_drop_view(self):
q1 = Query.drop_view("myview")
q2 = Query.drop_view("myview").if_exists()
self.assertEqual('DROP VIEW "myview"', str(q1))
self.assertEqual('DROP VIEW IF EXISTS "myview"', str(q2))
|
pydebloatx/gui_main.py | space9bug/PyDebloatX | 730 | 11126863 | <filename>pydebloatx/gui_main.py
# -*- coding: utf-8 -*-
from PySide6.QtWidgets import QFrame, QPushButton, QMainWindow, QWidget, QLabel, QVBoxLayout, QCheckBox, QHBoxLayout, \
QProgressBar, QToolTip
from PySide6.QtCore import Qt, QRect, QCoreApplication, QMetaObject, QSize
from PySide6.QtGui import QIcon, QKeySequence, QFont, QShortcut
from packaging import version
from bisect import insort
import platform
import sys
import os
def resource_path(relative_path):
"""Determine resource path if app is built or run natively."""
if hasattr(sys, 'frozen'):
return os.path.join(sys._MEIPASS, relative_path) # skipcq: PYL-W0212
return os.path.join(os.path.abspath('.'), relative_path)
class Ui_MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowIcon(QIcon(resource_path('icon.ico')))
self.setFixedSize(540, 470)
def setupUi(self):
self.centralwidget = QWidget(self)
with open(resource_path('style.css'), 'r') as file:
self.centralwidget.setStyleSheet(file.read())
self.appswidget = QWidget(self.centralwidget)
self.appswidget.setGeometry(50, 0, 490, 470)
self.appswidget.setProperty('class', 'appswidget')
self.sidebar = QFrame(self.centralwidget)
self.sidebar.setFrameShape(QFrame.StyledPanel)
self.sidebar.setGeometry(0, 0, 50, 470)
self.sidebar.setProperty('class', 'sidebar')
self.refresh_btn = QPushButton(self.sidebar)
self.refresh_btn.setGeometry(QRect(0, 0, 51, 51))
self.refresh_btn.setProperty('class', 'sidebar_btns')
self.refresh_btn.setIcon(QIcon(':/icon/refresh_icon.png'))
self.refresh_btn.setIconSize(QSize(24, 24))
self.refresh_bind = QShortcut(QKeySequence('Ctrl+R'), self)
self.store_btn = QPushButton(self.sidebar)
self.store_btn.setGeometry(QRect(0, 51, 51, 51))
self.store_btn.setProperty('class', 'sidebar_btns')
self.store_btn.setIcon(QIcon(':/icon/store_icon.png'))
self.store_btn.setIconSize(QSize(24, 24))
self.store_bind = QShortcut(QKeySequence('Ctrl+S'), self)
self.homepage_btn = QPushButton(self.sidebar)
self.homepage_btn.setGeometry(QRect(0, 102, 51, 51))
self.homepage_btn.setProperty('class', 'sidebar_btns')
self.homepage_btn.setIcon(QIcon(':/icon/github_icon.png'))
self.homepage_btn.setIconSize(QSize(24, 24))
self.homepage_bind = QShortcut(QKeySequence('Ctrl+G'), self)
self.about_btn = QPushButton(self.sidebar)
self.about_btn.setGeometry(QRect(0, 153, 51, 51))
self.about_btn.setProperty('class', 'sidebar_btns')
self.about_btn.setIcon(QIcon(':/icon/about_icon.png'))
self.about_btn.setIconSize(QSize(24, 24))
self.about_bind = QShortcut(QKeySequence('Ctrl+A'), self)
self.quit_btn = QPushButton(self.sidebar)
self.quit_btn.setGeometry(QRect(0, 420, 51, 51))
self.quit_btn.setProperty('class', 'sidebar_btns_quit')
self.quit_btn.setIcon(QIcon(':/icon/quit_icon.png'))
self.quit_btn.setIconSize(QSize(24, 24))
self.quit_bind = QShortcut(QKeySequence('Ctrl+Q'), self)
self.font = QFont()
self.font.setPointSize(8)
self.font.setStyleStrategy(QFont.PreferAntialias)
self.label_refresh = QLabel(self.appswidget)
self.label_refresh.setFont(self.font)
self.label_refresh.setGeometry(QRect(20, 10, 441, 15))
self.label_info = QLabel(self.appswidget)
self.label_info.setFont(self.font)
self.label_info.setGeometry(QRect(20, 10, 441, 30))
self.progressbar = QProgressBar(self.appswidget)
self.progressbar.setGeometry(QRect(20, 30, 441, 20))
self.layout_widget_checkboxes = QWidget(self.appswidget)
self.layout_widget_checkboxes.setGeometry(QRect(20, 55, 155, 311))
self.layout_checkboxes = QVBoxLayout(self.layout_widget_checkboxes)
self.layout_checkboxes.setContentsMargins(0, 0, 0, 0)
self.layout_widget_checkboxes_2 = QWidget(self.appswidget)
self.layout_widget_checkboxes_2.setGeometry(QRect(175, 55, 155, 311))
self.layout_checkboxes_2 = QVBoxLayout(self.layout_widget_checkboxes_2)
self.layout_checkboxes_2.setContentsMargins(0, 0, 0, 0)
self.layout_widget_checkboxes_3 = QWidget(self.appswidget)
self.layout_widget_checkboxes_3.setGeometry(QRect(330, 55, 155, 311))
self.layout_checkboxes_3 = QVBoxLayout(self.layout_widget_checkboxes_3)
self.layout_checkboxes_3.setContentsMargins(0, 0, 0, 0)
self.layout_widget_labels = QWidget(self.appswidget)
self.layout_widget_labels.setGeometry(QRect(20, 390, 350, 16))
self.layout_labels = QHBoxLayout(self.layout_widget_labels)
self.layout_labels.setContentsMargins(0, 0, 0, 0)
self.label_space = QLabel(self.appswidget)
self.label_space.setFont(self.font)
self.layout_labels.addWidget(self.label_space)
self.label_size = QLabel(self.appswidget)
self.label_size.setFont(self.font)
self.layout_labels.addWidget(self.label_size)
self.layout_widget_buttons = QWidget(self.appswidget)
self.layout_widget_buttons.setGeometry(QRect(20, 420, 454, 31))
self.layout_buttons = QHBoxLayout(self.layout_widget_buttons)
self.layout_buttons.setContentsMargins(0, 0, 0, 0)
self.button_select_all = QPushButton(self.layout_widget_buttons)
self.button_select_all.setIcon(QIcon(':/icon/check_icon.png'))
self.button_select_all.setIconSize(QSize(18, 18))
self.button_select_all.setLayoutDirection(Qt.RightToLeft)
self.layout_buttons.addWidget(self.button_select_all)
self.button_select_all.setMinimumSize(100, 30)
self.button_select_all.setProperty('class', 'Aqua')
self.button_deselect_all = QPushButton(self.layout_widget_buttons)
self.button_deselect_all.setIcon(QIcon(':/icon/cancel_icon.png'))
self.button_deselect_all.setIconSize(QSize(18, 18))
self.button_deselect_all.setLayoutDirection(Qt.RightToLeft)
self.layout_buttons.addWidget(self.button_deselect_all)
self.button_deselect_all.setMinimumSize(100, 30)
self.button_deselect_all.setProperty('class', 'Aqua')
self.layout_buttons.addStretch()
self.button_uninstall = QPushButton(self.layout_widget_buttons)
self.button_uninstall.setIcon(QIcon(':/icon/trash_icon.png'))
self.button_uninstall.setIconSize(QSize(18, 18))
self.button_uninstall.setLayoutDirection(Qt.RightToLeft)
self.layout_buttons.addWidget(self.button_uninstall)
self.button_uninstall.setMinimumSize(100, 30)
self.button_uninstall.setProperty('class', 'Grapefruit')
self.setCentralWidget(self.centralwidget)
self.retranslateUi()
QMetaObject.connectSlotsByName(self)
def retranslateUi(self):
QToolTip.setFont(self.font)
self.setWindowTitle(QCoreApplication.translate("Title", "PyDebloatX"))
self.label_info.setText(QCoreApplication.translate("Label", ""))
self.app_name_list = list(( # Convert tuple to list, because lupdate ignores initial lists
QCoreApplication.translate("AppName", "3D Builder"),
QCoreApplication.translate("AppName", "3D Viewer"),
QCoreApplication.translate("AppName", "Alarms and Clock"),
QCoreApplication.translate("AppName", "Calculator"),
QCoreApplication.translate("AppName", "Calendar and Mail"),
QCoreApplication.translate("AppName", "Camera"),
QCoreApplication.translate("AppName", "Feedback Hub"),
QCoreApplication.translate("AppName", "Get Help"),
QCoreApplication.translate("AppName", "Groove Music"),
QCoreApplication.translate("AppName", "Maps"),
QCoreApplication.translate("AppName", "Messaging"),
QCoreApplication.translate("AppName", "Mixed Reality Portal"),
QCoreApplication.translate("AppName", "Mobile Plans"),
QCoreApplication.translate("AppName", "Money"),
QCoreApplication.translate("AppName", "Movies && TV"),
QCoreApplication.translate("AppName", "News"),
QCoreApplication.translate("AppName", "Office"),
QCoreApplication.translate("AppName", "OneNote"),
QCoreApplication.translate("AppName", "Paint 3D"),
QCoreApplication.translate("AppName", "People"),
QCoreApplication.translate("AppName", "Photos"),
QCoreApplication.translate("AppName", "Print 3D"),
QCoreApplication.translate("AppName", "Skype"),
QCoreApplication.translate("AppName", "Snip && Sketch"),
QCoreApplication.translate("AppName", "Solitaire"),
QCoreApplication.translate("AppName", "Sports"),
QCoreApplication.translate("AppName", "Spotify"),
QCoreApplication.translate("AppName", "Sticky Notes"),
QCoreApplication.translate("AppName", "Tips"),
QCoreApplication.translate("AppName", "Translator"),
QCoreApplication.translate("AppName", "Voice Recorder"),
QCoreApplication.translate("AppName", "Weather"),
QCoreApplication.translate("AppName", "Xbox"),
QCoreApplication.translate("AppName", "Xbox Game Bar"),
QCoreApplication.translate("AppName", "Your Phone")
))
self.tooltip_list = list((
QCoreApplication.translate("ToolTip", "View, create, and personalize 3D objects."),
QCoreApplication.translate("ToolTip", "View 3D models and animations in real-time."),
QCoreApplication.translate("ToolTip", "A combination of alarm clock, world clock, timer, and stopwatch."),
QCoreApplication.translate("ToolTip", "A calculator that includes standard, scientific, and programmer modes, as well as a unit converter."),
QCoreApplication.translate("ToolTip", "Stay up to date with email and schedule managing."),
QCoreApplication.translate("ToolTip", "Point and shoot to take pictures on Windows 10."),
QCoreApplication.translate("ToolTip", "Provide feedback about Windows and apps by sharing suggestions or problems."),
QCoreApplication.translate("ToolTip", "Provide a way to ask a question and get recommended solutions or contact assisted support."),
QCoreApplication.translate("ToolTip", "Listen to music on Windows, iOS, and Android devices."),
QCoreApplication.translate("ToolTip", "Search for places to get directions, business info, and reviews."),
QCoreApplication.translate("ToolTip", "Quick, reliable SMS, MMS and RCS messaging from your phone."),
QCoreApplication.translate("ToolTip", "Discover Windows Mixed Reality and dive into more than 3,000 games and VR experiences from Steam VR and Microsoft Store."),
QCoreApplication.translate("ToolTip", "Sign up for a data plan and connect with mobile operators in your area. You will need a supported SIM card."),
QCoreApplication.translate("ToolTip", "Finance calculators, currency exchange rates and commodity prices from around the world."),
QCoreApplication.translate("ToolTip", "All your movies and TV shows, all in one place, on all your devices."),
QCoreApplication.translate("ToolTip", "Deliver breaking news and trusted, in-depth reporting from the world\'s best journalists."),
QCoreApplication.translate("ToolTip", "Find all your Office apps and files in one place."),
QCoreApplication.translate("ToolTip", "Digital notebook for capturing and organizing everything across your devices."),
QCoreApplication.translate("ToolTip", "Make 2D masterpieces or 3D models that you can play with from all angles."),
QCoreApplication.translate("ToolTip", "Connect with all your friends, family, colleagues, and acquaintances in one place."),
QCoreApplication.translate("ToolTip", "View and edit your photos and videos, make movies, and create albums."),
QCoreApplication.translate("ToolTip", "Quickly and easily prepare objects for 3D printing on your PC."),
QCoreApplication.translate("ToolTip", "Instant message, voice or video call application."),
QCoreApplication.translate("ToolTip", "Quickly annotate screenshots, photos and other images and save, paste or share with other apps."),
QCoreApplication.translate("ToolTip", "Solitaire is one of the most played computer card games of all time."),
QCoreApplication.translate("ToolTip", "Live scores and in-depth game experiences for more than 150 leagues."),
QCoreApplication.translate("ToolTip", "Play your favorite songs and albums free on Windows 10 with Spotify."),
QCoreApplication.translate("ToolTip", "Create notes, type, ink or add a picture, add text formatting, or stick them to the desktop."),
QCoreApplication.translate("ToolTip", "Provide users with information and tips about operating system features."),
QCoreApplication.translate("ToolTip", "Translate text and speech, have translated conversations, and even download AI-powered language packs to use offline."),
QCoreApplication.translate("ToolTip", "Record sounds, lectures, interviews, and other events."),
QCoreApplication.translate("ToolTip", "Latest weather conditions, accurate 10-day and hourly forecasts."),
QCoreApplication.translate("ToolTip", "Browse the catalogue, view recommendations, and discover PC games with Xbox Game Pass."),
QCoreApplication.translate("ToolTip", "Instant access to widgets for screen capture and sharing, and chatting with Xbox friends."),
QCoreApplication.translate("ToolTip", "Link your Android phone and PC to view and reply to text messages, access mobile apps, and receive notifications.")
))
self.app_data_list = [
{"name": "*Microsoft.3DBuilder*", "link": "/?PFN=Microsoft.3DBuilder_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.Microsoft3DViewer*", "link": "/?PFN=Microsoft.Microsoft3DViewer_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.WindowsAlarms*", "link": "/?PFN=Microsoft.WindowsAlarms_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.WindowsCalculator*", "link": "/?PFN=Microsoft.WindowsCalculator_8wekyb3d8bbwe", "size": 0},
{"name": "*microsoft.windowscommunicationsapps*", "link": "/?PFN=Microsoft.windowscommunicationsapps_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.WindowsCamera*", "link": "/?PFN=Microsoft.WindowsCamera_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.WindowsFeedbackHub*", "link": "/?PFN=Microsoft.WindowsFeedbackHub_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.GetHelp*", "link": "/?PFN=Microsoft.Gethelp_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.ZuneMusic*", "link": "/?PFN=Microsoft.ZuneMusic_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.WindowsMaps*", "link": "/?PFN=Microsoft.WindowsMaps_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.Messaging*", "link": "/?PFN=Microsoft.Messaging_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.MixedReality.Portal*", "link": "/?PFN=Microsoft.MixedReality.Portal_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.OneConnect*", "link": "/?PFN=Microsoft.OneConnect_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.BingFinance*", "link": "/?PFN=Microsoft.BingFinance_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.ZuneVideo*", "link": "/?PFN=Microsoft.ZuneVideo_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.BingNews*", "link": "/?PFN=Microsoft.BingNews_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.MicrosoftOfficeHub*", "link": "/?PFN=Microsoft.MicrosoftOfficeHub_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.Office.OneNote*", "link": "/?PFN=Microsoft.Office.OneNote_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.MSPaint*", "link": "/?PFN=Microsoft.MSPaint_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.People*", "link": "/?PFN=Microsoft.People_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.Windows.Photos*", "link": "/?PFN=Microsoft.Windows.Photos_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.Print3D*", "link": "/?PFN=Microsoft.Print3D_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.SkypeApp*", "link": "/?PFN=Microsoft.SkypeApp_kzf8qxf38zg5c", "size": 0},
{"name": "*Microsoft.ScreenSketch*", "link": "/?PFN=Microsoft.ScreenSketch_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.MicrosoftSolitaireCollection*", "link": "/?PFN=Microsoft.MicrosoftSolitaireCollection_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.BingSports*", "link": "/?PFN=Microsoft.BingSports_8wekyb3d8bbwe", "size": 0},
{"name": "*SpotifyAB.SpotifyMusic*", "link": "/?PFN=SpotifyAB.SpotifyMusic_zpdnekdrzrea0", "size": 0},
{"name": "*Microsoft.MicrosoftStickyNotes*", "link": "/?PFN=Microsoft.MicrosoftStickyNotes_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.Getstarted*", "link": "/?PFN=Microsoft.Getstarted_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.BingTranslator*", "link": "/?PFN=Microsoft.BingTranslator_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.WindowsSoundRecorder*", "link": "/?PFN=Microsoft.WindowsSoundRecorder_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.BingWeather*", "link": "/?PFN=Microsoft.BingWeather_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.GamingApp*", "link": "/?PFN=Microsoft.GamingApp_8wekyb3d8bbwe", "size": 0},
{"name": "*Xbox*", "link": "/?PFN=Microsoft.XboxGameOverlay_8wekyb3d8bbwe", "size": 0},
{"name": "*Microsoft.YourPhone*", "link": "/?PFN=Microsoft.YourPhone_8wekyb3d8bbwe", "size": 0}
]
if version.parse(platform.version()) >= version.parse("10.0.19041"):
insort(self.app_name_list, QCoreApplication.translate("AppName", "Cortana"))
cortana_index = self.app_name_list.index("Cortana")
self.tooltip_list.insert(cortana_index, QCoreApplication.translate("ToolTip", "Personal intelligence assistant."))
self.app_data_list.insert(cortana_index, {"name": "*Microsoft.549981C3F5F10*", "link": "/?PFN=Microsoft.549981C3F5F10_8wekyb3d8bbwe", "size": 0})
self.checkbox_list = []
for i, _ in enumerate(self.app_name_list):
self.checkbox_list.append(QCheckBox())
if i % 3 == 2:
self.layout_checkboxes_3.addWidget(self.checkbox_list[i])
elif i % 3 == 1:
self.layout_checkboxes_2.addWidget(self.checkbox_list[i])
else:
self.layout_checkboxes.addWidget(self.checkbox_list[i])
self.apps_dict = {}
for i, checkbox in enumerate(self.checkbox_list):
checkbox.setText(self.app_name_list[i])
checkbox.setToolTip(self.tooltip_list[i])
checkbox.setFont(self.font)
self.apps_dict[checkbox] = self.app_data_list[i]
self.label_space.setText(QCoreApplication.translate("Label", "Total amount of disk space:"))
self.label_size.setText(QCoreApplication.translate("Label", "0 MB"))
self.button_select_all.setText(QCoreApplication.translate("Button", "Select All"))
self.button_deselect_all.setText(QCoreApplication.translate("Button", "Deselect All"))
self.button_uninstall.setText(QCoreApplication.translate("Button", "Uninstall"))
|
neural-networks-intro/SimpleNeuralNetwork.py | mervatkheir/kite-python-blog-post-code | 238 | 11126868 | #!/usr/bin/env python
# coding: utf-8
from numpy import exp, dot, random, array
"""Python code for simple Artificial Neural Network with one hidden layer"""
def initialize_weights():
# Generate random numbers
random.seed(1)
# Assign random weights to a 3 x 1 matrix
synaptic_weights = random.uniform(low=-1, high=1, size=(3, 1))
return synaptic_weights
def sigmoid(x):
return 1 / (1 + exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
def train(inputs, expected_output, synaptic_weights, bias, learning_rate, training_iterations):
for epoch in range(training_iterations):
# Forward pass -- Pass the training set through the network.
predicted_output = learn(inputs, synaptic_weights, bias)
# Backaward pass
# Calculate the error
error = sigmoid_derivative(predicted_output) * (expected_output - predicted_output)
# Adjust the weights and bias by a factor
weight_factor = dot(inputs.T, error) * learning_rate
bias_factor = error * learning_rate
# Update the synaptic weights
synaptic_weights += weight_factor
# Update the bias
bias += bias_factor
if ((epoch % 1000) == 0):
print("Epoch", epoch)
print("Predicted Output = ", predicted_output.T)
print("Expected Output = ", expected_output.T)
print()
return synaptic_weights
def learn(inputs, synaptic_weights, bias):
return sigmoid(dot(inputs, synaptic_weights) + bias)
if __name__ == "__main__":
# Initialize random weights for the network
synaptic_weights = initialize_weights()
# The training set
inputs = array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# Target set
expected_output = array([[1, 0, 1]]).T
# Test set
test = array([1, 0, 1])
# Train the neural network
trained_weights = train(inputs, expected_output, synaptic_weights, bias=0.001, learning_rate=0.98,
training_iterations=1000000)
# Test the neural network with a test example
accuracy = (learn(test, trained_weights, bias=0.01)) * 100
print("accuracy =", accuracy[0], "%")
|
lightreid/models/heads/__init__.py | nataliamiccini/light-reid | 296 | 11126884 | """
Author: <NAME>
E-mail: <EMAIL>
"""
from .build import HEADs_REGISTRY, build_head
# import heads, so they will be registered
from .bn_head import BNHead
from .code_pyramid import CodePyramid
from .pcb_head import PCBHead
|
vae/flax/utils.py | vipavlovic/pyprobml | 4,895 | 11126893 | <filename>vae/flax/utils.py
import jax
import math
import jax.numpy as jnp
import matplotlib.pyplot as plt
def save_image(ndarray, fp, nrow=8, padding=2, pad_value=0.0, format=None):
"""Make a grid of images and Save it into an image file.
Args:
ndarray (array_like): 4D mini-batch images of shape (B x H x W x C)
fp - A filename(string) or file object
nrow (int, optional): Number of images displayed in each row of the grid.
The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
padding (int, optional): amount of padding. Default: ``2``.
scale_each (bool, optional): If ``True``, scale each image in the batch of
images separately rather than the (min, max) over all images. Default: ``False``.
pad_value (float, optional): Value for the padded pixels. Default: ``0``.
format(Optional): If omitted, the format to use is determined from the filename extension.
If a file object was used instead of a filename, this parameter should always be used.
"""
if not (isinstance(ndarray, jnp.ndarray) or
(isinstance(ndarray, list) and all(isinstance(t, jnp.ndarray) for t in ndarray))):
raise TypeError('array_like of tensors expected, got {}'.format(type(ndarray)))
ndarray = jnp.asarray(ndarray)
if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images
ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)
# make the mini-batch of images into a grid
nmaps = ndarray.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)
num_channels = ndarray.shape[3]
grid = jnp.full((height * ymaps + padding, width * xmaps + padding, num_channels), pad_value).astype(jnp.float32)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
grid = jax.ops.index_update(
grid, jax.ops.index[y * height + padding:(y + 1) * height,
x * width + padding:(x + 1) * width],
ndarray[k])
k = k + 1
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = jnp.clip(grid * 255.0, 0, 255).astype(jnp.uint8)
plt.imshow(ndarr)
plt.savefig(fp)
# im = Image.fromarray(ndarr.copy())
# im.save(fp, format=format) |
pyrival/graphs/centroid_decomposition.py | MattJDavidson/aoc2021 | 748 | 11126903 | <filename>pyrival/graphs/centroid_decomposition.py
def centroid_decomposition(graph):
"""
Given a tree, this function is a generator that
1. Roots the tree at its centroid (modifying graph)
2. Yields centroid
3. Removes centroid from the graph
4. Recurses on the forest left after the removal
This generator makes working with centroid decomposition easy. It yields the n
centroids involved in the decomposition. It also keeps the graph rooted at the yielded
centroid by modifying the input variable graph. In total this takes O(n log n) time.
Input:
graph: list of lists where graph[u] is a list containing all neighbours of node u
Example:
>>> graph = [[1], [0,2], [1]]
>>> for centroid in centroid_decomposition(graph):
>>> bfs = [centroid]
>>> for node in bfs:
>>> bfs += graph[node] # Valid since graph is rooted at the centroid
>>> print('BFS from centroid:', bfs)
BFS from centroid: [1, 0, 2]
BFS from centroid: [0]
BFS from centroid: [2]
"""
n = len(graph)
bfs = [n - 1]
for node in bfs:
bfs += graph[node]
for nei in graph[node]:
graph[nei].remove(node)
size = [0] * n
for node in reversed(bfs):
size[node] = 1 + sum(size[child] for child in graph[node])
def reroot_centroid(root):
N = size[root]
while True:
for child in graph[root]:
if size[child] > N // 2:
size[root] = N - size[child]
graph[root].remove(child)
graph[child].append(root)
root = child
break
else:
return root
bfs = [n - 1]
for node in bfs:
centroid = reroot_centroid(node)
bfs += graph[centroid]
yield centroid
|
test/test_web.py | complexdb/zincbase | 174 | 11126905 | <gh_stars>100-1000
def test_index(server_and_args):
app, server, args = server_and_args
response = app.test_client().get('/')
assert response.status_code == 200
data = response.data.decode('utf-8')
assert '<title>Zincbase Graph Server</title>' in data
assert 'src="bundle.js"' in data |
libweasyl/libweasyl/alembic/versions/c1f8375b5805_split_login_charsettings_to_columns.py | akash143143/weasyl | 111 | 11126923 | <reponame>akash143143/weasyl<gh_stars>100-1000
"""Split login charsettings to columns, removing birthday reset
Removes login.settings, replacing with a distinct column (login.force_password_reset) for forced password resets.
Drops forced resets of birthdays entirely, and opts to leverage existing permaban/suspension tables for determining
if a user is banned or suspended.
Revision ID: c1f8375b5805
Revises: <PASSWORD>
Create Date: 2020-02-14 02:29:53.525649
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
def upgrade():
op.add_column('login', sa.Column('force_password_reset', sa.Boolean(), server_default='f', nullable=False))
op.execute("UPDATE login SET force_password_reset = TRUE WHERE settings ~ 'p'")
# NB: Bans/suspensions based on the contents of the permaban/suspension tables, so no replacement column needed
# during upgrade()
op.drop_column('login', 'settings')
def downgrade():
op.add_column('login', sa.Column('settings', sa.VARCHAR(length=20), server_default=sa.text(u"''::character varying"), autoincrement=False, nullable=False))
op.execute("UPDATE login SET settings = settings || 'p' WHERE force_password_reset")
# Restore the ban flag ('b')
op.execute("""
UPDATE login
SET settings = settings || 'b'
FROM permaban
WHERE login.userid = permaban.userid
""")
# Restore the suspended flag ('s')
op.execute("""
UPDATE login
SET settings = login.settings || 's'
FROM suspension
WHERE login.userid = suspension.userid
""")
op.drop_column('login', 'force_password_reset')
|
bin/config.py | hhcho/ample | 176 | 11126936 | <gh_stars>100-1000
import fileinput
import sys
data_names = None
if data_names is None:
if len(sys.argv) == 1:
print('Enter data names followed by EOF/Ctrl-D:')
data_names = []
for line in fileinput.input():
fields = line.rstrip().split(',')
for f in fields:
if f.strip() == '':
continue
data_names.append(f)
print('Data names loaded')
|
src/arcrest/manageportal/administration.py | kevinsigwart/ArcREST | 208 | 11126961 | """
The ArcREST API allows you to perform administrative tasks not available in
the Portal for ArcGIS website. The API is organized into resources and
operations. Resources are entities within Portal for ArcGIS that hold some
information and have a well-defined state. Operations act on these
resources and update their information or state. Resources and operations
are hierarchical and have unique universal resource locators (URLs).
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import tempfile
from datetime import datetime
from .._abstract.abstract import BaseAGOLClass
from ..security import PortalTokenSecurityHandler,ArcGISTokenSecurityHandler,OAuthSecurityHandler
########################################################################
class _Federation(BaseAGOLClass):
"""
"""
_resources = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if url.lower().endswith("/federation") == False:
url = url + "/federation"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in manageportal.administration._Federation class.")
#----------------------------------------------------------------------
@property
def servers(self):
"""
This resource returns detailed information about the ArcGIS Servers
registered with Portal for ArcGIS, such as the ID of the server,
name of the server, ArcGIS Web Adaptor URL, administration URL, and
if the server is set as a hosting server.
"""
params = {"f" : "json"}
url = self._url + "/servers"
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def unfederate(self, serverId):
"""
This operation unfederates an ArcGIS Server from Portal for ArcGIS
"""
url = self._url + "/servers/{serverid}/unfederate".format(
serverid=serverId)
params = {"f" : "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_ur)
#----------------------------------------------------------------------
def updateServer(self, serverId, serverRole):
"""
This operation unfederates an ArcGIS Server from Portal for ArcGIS
Parameters:
serverRole - Whether the server is a hosting server for the
portal, a federated server, or a server with restricted access
to publishing. The allowed values are FEDERATED_SERVER,
FEDERATED_SERVER_WITH_RESTRICTED_PUBLISHING, or HOSTING_SERVER.
serverId - unique id of the server
"""
url = self._url + "/servers/{serverid}/update".format(
serverid=serverId)
params = {"f" : "json",
"serverRole" : serverRole}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_ur)
#----------------------------------------------------------------------
def validateServer(self, serverId):
"""
This operation provides status information about a specific ArcGIS
Server federated with Portal for ArcGIS.
Parameters:
serverId - unique id of the server
"""
url = self._url + "/servers/{serverid}/validate".format(
serverid=serverId)
params = {"f" : "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_ur)
#----------------------------------------------------------------------
def validateAllServers(self):
"""
This operation provides status information about a specific ArcGIS
Server federated with Portal for ArcGIS.
Parameters:
serverId - unique id of the server
"""
url = self._url + "/servers/validate"
params = {"f" : "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_ur)
########################################################################
class _log(BaseAGOLClass):
"""handles the portal log information at 10.3.1+"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_resources = None
_operations = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if url.lower().endswith("/logs") == False:
url = url + "/logs"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in manageportal.administration.log class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def resources(self):
"""returns the admin sites resources"""
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
@property
def operations(self):
"""lists operations available to user"""
if self._operations is None:
self.__init()
return self._operations
#----------------------------------------------------------------------
@property
def settings(self):
"""returns the log settings for portal"""
url = self._url + "/settings"
params = {
"f" : "json",
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def editLogSettings(self, logLocation, logLevel="WARNING", maxLogFileAge=90):
"""
edits the log settings for the portal site
Inputs:
logLocation - file path to where you want the log files saved
on disk
logLevel - this is the level of detail saved in the log files
Levels are: OFF, SEVERE, WARNING, INFO, FINE, VERBOSE, and
DEBUG
maxLogFileAge - the numbers of days to keep a single log file
"""
url = self._url + "/settings/edit"
params = {
"f" : "json",
"logDir" : logLocation,
"logLevel" : logLevel,
"maxLogFileAge" : maxLogFileAge
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def query(self, logLevel="WARNING", source="ALL",
startTime=None, endTime=None,
logCodes=None, users=None, messageCount=1000):
"""
allows users to look at the log files from a the REST endpoint
Inputs:
logLevel - this is the level of detail saved in the log files
Levels are: OFF, SEVERE, WARNING, INFO, FINE, VERBOSE, and
DEBUG
source - the type of information to search. Allowed values
are: ALL, PORTAL_ADMIN, SHARING, PORTAL
startTime - datetime object to start search at
endTime - datetime object to end search
logCodes - comma seperate list of codes to search
users - comma seperated list of users to query
messageCount - integer number of the max number of log
entries to return to the user.
"""
url = self._url + "/query"
filter_value = {"codes":[], "users":[], "source": "*"}
if source.lower() == "all":
filter_value['source'] = "*"
else:
filter_value['source'] = [source]
params = {
"f" : "json",
"level" : logLevel
}
if not startTime is None and \
isinstance(startTime, datetime):
params['startTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S")#2015-01-31T15:00:00
if not endTime is None and \
isinstance(endTime, datetime):
params['endTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S")
if not logCodes is None:
filter_value['codes'] = logCodes.split(',')
if not users is None:
filter_value['users'] = users.split(',')
if messageCount is None:
params['pageSize'] = 1000
elif isinstance(messageCount, (int, long, float)):
params['pageSize'] = int(messageCount)
else:
params['pageSize'] = 1000
params['filter'] = filter_value
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def cleanLogs(self):
"""erases all the log data"""
url = self._url + "/clean"
params = {
"f":"json"
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class _Security(BaseAGOLClass):
"""
The security resource is the root of all the security configurations
and operations in the portal. Through this resource, you can change
the identity providers and the authentication mode for your portal.
"""
_securityHandler = None
_url = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_resources = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if securityHandler is None:
pass
elif isinstance(securityHandler, PortalTokenSecurityHandler) or \
isinstance(securityHandler, ArcGISTokenSecurityHandler) or \
isinstance(securityHandler, OAuthSecurityHandler):
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in manageportal.administration.log class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def resources(self):
"""returns the admin sites resources"""
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
def createUser(self,
username,
password,
firstName,
lastName,
email,
role="org_user",
provider="arcgis",
description="",
idpUsername=None):
"""
This operation is used to create a new user account in the portal.
Inputs:
username - The name of the user account.
password - The password for the account. This is a required
parameter only if the provider is arcgis; otherwise,
the password parameter is ignored.
firstName - first name of the user account
lastName - last name of the user account
email - The email address for the user account.
description - An optional description string for the user
account.
role - The role for the user account. The default value is
org_user.
Values: org_user | org_publisher | org_admin
provider - The provider for the account. The default value is
arcgis.
Values: arcgis | webadaptor | enterprise
idpUsername - name of the user on the domain controller.
Ex: domain\account
"""
url = self._url + "/users/createUser"
params = {
"f" : "json",
"username" : username,
"password" : password,
"firstname" : firstName,
"lastname" : lastName,
"email" : email,
"role" : role,
"provider" : provider,
"description" : description
}
if idpUsername is None:
params['idpUsername'] = idpUsername
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def deleteCertificate(self, certName):
"""
This operation deletes an SSL certificate from the key store. Once
a certificate is deleted, it cannot be retrieved or used to enable
SSL.
Inputs:
certName - name of the cert to delete
"""
params = {"f" : "json"}
url = self._url + "/sslCertificates/{cert}/delete".format(
cert=certName)
return self._post(url=url, param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def exportCertificate(self, certName, outFolder=None):
"""
This operation downloads an SSL certificate. The file returned by
the server is an X.509 certificate. The downloaded certificate can
be imported into a client that is making HTTP requests.
Inputs:
certName - name of the cert to export
outFolder - folder on disk to save the certificate.
"""
params = {"f" : "json"}
url = self._url + "/sslCertificates/{cert}/export".format(
cert=certName)
if outFolder is None:
outFolder = tempfile.gettempdir()
return self._post(url=url, param_dict=params,
out_folder=outFolder,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def generateCertificate(self, alias,
commonName, organizationalUnit,
city, state, country,
keyalg="RSA", keysize=1024,
sigalg="SHA256withRSA",
validity=90
):
"""
Use this operation to create a self-signed certificate or as a
starting point for getting a production-ready CA-signed
certificate. The portal will generate a certificate for you and
store it in its keystore.
"""
params = {"f" : "json",
"alias" : alias,
"commonName" : commonName,
"organizationalUnit" : organizationalUnit,
"city" : city,
"state" : state,
"country" : country,
"keyalg" : keyalg,
"keysize" : keysize,
"sigalg" : sigalg,
"validity" : validity
}
url = self._url + "/SSLCertificate/ generateCertificate"
return self._post(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def generateCSR(self, certName):
"""
"""
url = self._url + "/sslCertificates/{cert}/generateCsr".format(cert=certName)
params = {"f" : "json"}
return self._post(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getAppInfo(self, appId):
"""
Every application registered with Portal for ArcGIS has a unique
client ID and a list of redirect URIs that are used for OAuth. This
operation returns these OAuth-specific properties of an application.
You can use this information to update the redirect URIs by using
the Update App Info operation.
Input:
appId - unique id of the application to get the information
about.
"""
params = {
"f" : "json",
"appID" : appId
}
url = self._url + "/oauth/getAppInfo"
return self._get(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getUsersEnterpriseGroups(self, username, searchFilter, maxCount=100):
"""
This operation lists the groups assigned to a user account in the
configured enterprise group store. You can use the filter parameter
to narrow down the search results.
Inputs:
username - name of the user to find
searchFilter - helps narrow down results
maxCount - maximum number of results to return
"""
params = {
"f" : "json",
"username" : username,
"filter" : searchFilter,
"maxCount" : maxCount
}
url = self._url + "/Groups/getEnterpriseGroupsForUser"
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getEnterpriseUser(self, username):
""""""
url = self._url + "/users/getEnterpriseUser"
params = {
"f" : "json",
"username" : username
}
return self._get(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getUsersWithinEnterpriseGroup(self,
groupName,
searchFilter=None,
maxCount=10):
"""
This operation returns the users that are currently assigned to the
enterprise group within the enterprise user/group store. You can
use the filter parameter to narrow down the user search.
Inputs:
groupName - name of the group
searchFilter - string used to narrow down the search
maxCount - maximum number of users to return
"""
params = {
"f" : "json",
"groupName" : groupName,
"maxCount" : maxCount
}
if searchFilter:
params['filters'] = searchFilter
url = self._url + "/groups/getUsersWithinEnterpriseGroup"
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def importExistingServerCertificate(self, alias, certPassword,
certFile):
"""
This operation imports an existing server certificate, stored in
the PKCS #12 format, into the keystore. If the certificate is a CA
signed certificate, you must first import the CA Root or
Intermediate certificate using the Import Root or Intermediate
Certificate operation.
Parameters
alias - certificate name
certPassword - <PASSWORD>
certFile - certificate file
"""
url = self._url + "/sslCertificates/importExistingServerCertificate"
files = {}
files['certFile'] = certFile
params = {
"f" : "json",
"alias" : alias,
"certPassword" : <PASSWORD>
}
return self._post(url=url,
param_dict=params,
files=files,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def importRootOrIntermediate(self, alias, rootCSCertificate):
""""""
params = {
"alias" : alias,
"f" : "json"
}
files = {
"rootCSCertificate" : rootCSCertificate
}
url = self._url + "/sslCertificates/importRootOrIntermediate"
return self._post(url=url,
param_dict=params,
files=files,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def importSignedCertificate(self, alias, certFile):
"""
This operation imports a certificate authority (CA) signed SSL
certificate into the key store.
"""
params = { "f" : "json" }
files = {"file" : certFile}
url = self._url + \
"/sslCertificates/{cert}/importSignedCertificate".format(cert=alias)
return self._post(url=url,
files=files,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def oauth(self):
"""
The OAuth resource contains a set of operations that update the
OAuth2-specific properties of registered applications in Portal for
ArcGIS.
"""
url = self._url + "/oauth"
params = {"f" : "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def refreshGroupMembership(self, groups):
"""
This operation iterates over every enterprise account configured in
the portal and determines if the user account is a part of the
input enterprise group. If there are any change in memberships, the
database and the indexes are updated for each group.
While portal automatically refreshes the memberships during a user
login and during a periodic refresh configured through the Update
Identity Store operation, this operation allows an administrator to
force a refresh.
Parameters:
groups - comma seperated list of group names
"""
params = {
"f" : "json",
"groups" : groups
}
url = self._url + "/groups/refreshMembership"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def refreshUserMembership(self, users):
"""
This operation iterates over every enterprise group configured in
the portal and determines if the input user accounts belong to any
of the configured enterprise groups. If there is any change in
membership, the database and the indexes are updated for each user
account. While portal automatically refreshes the memberships
during a user login and during a periodic refresh (configured
through the Update Identity Store operation), this operation allows
an administrator to force a refresh.
Parameters:
users - comma seperated list of user names
"""
params = {
"f" : "json",
"users" : users
}
url = self._url + "/users/refreshMembership"
return self._post(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def searchEnterpriseGroups(self, searchFilter="", maxCount=100):
"""
This operation searches groups in the configured enterprise group
store. You can narrow down the search using the search filter
parameter.
Parameters:
searchFilter - text value to narrow the search down
maxCount - maximum number of records to return
"""
params = {
"f" : "json",
"filter" : searchFilter,
"maxCount" : maxCount
}
url = self._url + "/groups/searchEnterpriseGroups"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def searchEnterpriseUsers(self, searchFilter="", maxCount=100):
"""
This operation searches users in the configured enterprise user
store. You can narrow down the search using the search filter
parameter.
Parameters:
searchFilter - text value to narrow the search down
maxCount - maximum number of records to return
"""
params = {
"f" : "json",
"filter" : searchFilter,
"maxCount" : maxCount
}
url = self._url + "/users/searchEnterpriseUsers"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def SSLCertificates(self):
"""
Lists certificates.
"""
url = self._url + "/SSLCertificate"
params = {"f" : "json"}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getSSLCertificate(self, alias):
""""""
url = self._url + "/sslCertificates/{cert}".format(cert=alias)
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def testIdentityStore(self):
"""
This operation can be used to test the connection to a user or
group store.
"""
params = {"f" : "json"}
url = self._url + "/config/testIdentityStore"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def user_count(self):
"""
The users resource is an umbrella for operations to manage members
within Portal for ArcGIS. The resource returns the total number of
members in the system.
As an administrator, you can register enterprise accounts in your
portal instance by using the Create User operation. When automatic
sign-in for users is disabled in the security configuration,
registered enterprise accounts can sign in as members of the
portal. This gives you full control on all the accounts within a
portal instance.
"""
params = {"f" : "json"}
url = self._url + "/users"
return self._get(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateAppInfo(self, appInfo):
"""
This operation allows you to update the OAuth-specific properties
associated with an application. Use the Get App Info operation to
obtain the existing OAuth properties that can be edited.
"""
params = {"f" : "json",
"appInfo" : appInfo}
url = self._url + "/oauth/updateAppInfo"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateEnterpriseUser(self, username, idpUsername):
"""
This operation allows an administrator to update the idpUsername
for an enterprise user in the portal. This is used when migrating
from accounts used with web-tier authentication to SAML
authentication.
Parameters:
username - username of the enterprise account
idpUsername - username used by the SAML identity provider
"""
params = {
"f" : "json",
"username" : username,
"idpUsername" : idpUsername
}
url = self._url + "/users/updateEnterpriseUser"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateWebServerCertificate(self, webServerCertificateAlias,
sslProtocols,
cipherSuites):
""""""
params = {
"f" : "json",
"webServerCertificateAlias": webServerCertificateAlias,
"sslProtocols" : sslProtocols,
"cipherSuites" : cipherSuites
}
url = self._url + "/sslcertificates/update"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateSecurityConfiguration(self,
enableAutomaticAccountCreation=False,
disableServicesDirectory=False
):
"""
This operation can be used to update the portal's security settings
such as whether or not enterprise accounts are automatically
registered as members of your ArcGIS organization the first time
they accesses the portal.
The security configuration is stored as a collection of properties
in a JSON object. The following properties are supported:
enableAutomaticAccountCreation
disableServicesDirectory
The automatic account creation flag (enableAutomaticAccountCreation)
determines the behavior for unregistered enterprise accounts the
first time they access the portal. When the value for this property
is set to false, first time users are not automatically registered
as members of your ArcGIS organization, and have the same access
privileges as other nonmembers. For these accounts to sign in, an
administrator must register the enterprise accounts using the
Create User operation.
The default value for the enableAutomaticAccountCreation property
is false. When this value is set to true, portal will add
enterprise accounts automatically as members of your ArcGIS
organization.
The disableServicesDirectory property controls whether the HTML
pages of the services directory should be accessible to the users.
The default value for this property is false, meaning the services
directory HTML pages are accessible to everyone.
"""
url = self._url + "/config/update"
params = {
"f" : "json",
"enableAutomaticAccountCreation": enableAutomaticAccountCreation,
"disableServicesDirectory" : disableServicesDirectory
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateIdenityStore(self,
userPassword,
user,
userFullnameAttribute,
ldapURLForUsers,
userEmailAttribute,
usernameAttribute,
isPasswordEncrypted=False,
caseSensitive=True):
r"""
You can use this operation to change the identity provider
configuration in your portal. When Portal for ArcGIS is first
installed, it supports token-based authentication using the
built-in identity store for accounts. To configure your portal to
connect to your enterprise authentication mechanism, it must be
configured to use an enterprise identity store such as Windows
Active Directory or LDAP.
Inputs:
userPassword -The password for the domain account, for example,
secret.
isPasswordEncrypted - Indicates if the userPassword property is
an encrypted password or plain text. If
the property is false, the API will
encrypt the password automatically.
user - A user account with at least read permissions to look up
the email addresses and user names of users in your
organization. If possible, use an account whose password
does not expire.
Windows Active Directory example: mydomain\\winaccount
LDAP example: uid=admin\,ou=system
userFullnameAttribute - The attribute in Windows Active
Directory or LDAP that contains the full
name of the users, for example, cn.
ldapURLForUsers - The URL to your LDAP that points to the user
accounts, for example,
ldap://bar2:10389/ou=users\,ou=ags\,dc=example\,dc=com.
The URL to your LDAP will need to be provided
by your LDAP administrator.
This property is not applicable when
configuring Windows Active Directory.
userEmailAttribute - The attribute in Windows Active Directory
or LDAP that contains the email addresses
of the users, for example, email.
usernameAttribute - The LDAP attribute of the user entry that is
to be treated as the user name, for example, cn.
This property is not applicable when
configuring Windows Active Directory.
caseSensitive - In the rare case where your Windows Active
Directory is configured to be case sensitive,
set this property to true.
If your LDAP is configured to be case
insensitive, set parameter to false.
"""
url = self._url + "/config/updateIdentityStore"
params = {
"f" : "json",
"userPassword" : <PASSWORD>,
"isPasswordEncrypted" : isPasswordEncrypted,
"user" : user,
"userFullnameAttribute": userFullnameAttribute,
"ldapURLForUsers" : ldapURLForUsers,
"userEmailAttribute" : userEmailAttribute,
"usernameAttribute" : usernameAttribute,
"caseSensitive" : caseSensitive
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateTokenConfiguration(self, sharedKey):
"""
You can use this operation to change the shared key for the token
configuration. Shared keys are used to generate tokens and must be
of a suitable length to ensure strong encryption.
Input:
sharedKey - key used to generate token
"""
url = self._url + "/tokens/update"
params = {
"f" : "json",
"tokenConfig" : {"sharedKey" : sharedKey}
}
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def tokenConfigurations(self):
"""
This resource represents the token configuration within your portal
Use the Update Token Configuration operation to change the
configuration properties of the token service.
"""
url = self._url + "/tokens"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def securityConfiguration(self):
"""
The security configuration consists of the identity store
configuration.
If your portal will be authenticated through ArcGIS Web Adaptor,
you must set up your preferred authentication on your web server.
Use the Update Identity Store operation to configure your portal to
connect to your enterprise identity provider such as Windows Domain
or LDAP. By default, Portal for ArcGIS is configured to use the
built-in store and token-based authentication.
"""
url = self._url + "/config"
params = {
"f" : "json",
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def users(self):
""" returns the number of registered users on site """
url = self._url + "/users"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class _System(BaseAGOLClass):
"""
This resource is an umbrella for a collection of system-wide
resources for your portal. This resource provides access to the
ArcGIS Web Adaptor configuration, portal directories, database
management server, indexing capabilities, license information, and
the properties of your portal.
"""
_securityHandler = None
_url = None
_proxy_url = None
_proxy_port = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if securityHandler is not None:
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = url
#----------------------------------------------------------------------
@property
def webAdaptors(self):
"""
The Web Adaptors resource lists the ArcGIS Web Adaptor configured
with your portal. You can configure the Web Adaptor by using its
configuration web page or the command line utility provided with
the installation.
"""
url = self._url + "/webadaptors"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def webAdaptor(self, webAdaptorID):
"""
The ArcGIS Web Adaptor is a web application that runs in a
front-end web server. One of the Web Adaptor's primary
responsibilities is to forward HTTP requests from end users to
Portal for ArcGIS. The Web Adaptor acts a reverse proxy, providing
the end users with an entry point to the system, hiding the
back-end servers, and providing some degree of immunity from
back-end failures.
The front-end web server can authenticate incoming requests against
your enterprise identity stores and provide specific authentication
schemes such as Integrated Windows Authentication (IWA), HTTP Basic,
or Digest.
Most importantly, a Web Adaptor provides your end users with a
well-defined entry point to your system without exposing the
internal details of your portal. Portal for ArcGIS will trust
requests being forwarded by the Web Adaptor and will not challenge
the user for any credentials. However, the authorization of the
request (by looking up roles and permissions) is still enforced by
the portal's sharing rules.
Input:
webAdaptorID - id of the web adaptor
"""
url = self._url + "/webadaptors/%s" % webAdaptorID
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def unregisterWebAdaptor(self, webAdaptorID):
"""
You can use this operation to unregister the ArcGIS Web Adaptor
from your portal. Once a Web Adaptor has been unregistered, your
portal will no longer trust the Web Adaptor and will not accept any
credentials from it. This operation is typically used when you want
to register a new Web Adaptor or when your old Web Adaptor needs to
be updated.
Input:
webAdaptorID - id of the web adaptor
"""
url = self._url + "/webadaptors/%s/unregister" % webAdaptorID
params = {
"f" : "json"
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateWebAdaptorsConfiguration(self, webAdaptorsConfig):
"""
This operation is used to change the common properties and
configuration of the ArcGIS Web Adaptor configured with the portal.
The properties are stored as a JSON object and, therefore, every
update must include all the necessary properties.
Inputs:
webAdaptorsConfig - The JSON object containing all the properties
in the configuration.
"""
url = self._url + "/webadaptors/config/update"
params = {
"f" : "json",
"webAdaptorsConfig" : webAdaptorsConfig
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def webAdaptorsConfiguration(self):
"""
This resource is a collection of configuration properties that
apply to the ArcGIS Web Adaptor configured with the portal. The Web
Adaptor fetches these properties periodically, which alters its
behavior. Only one property is supported:
sharedSecret - This property represents credentials that are
shared with the Web Adaptor. The Web Adaptor uses
these credentials to communicate with the portal.
"""
url = self._url + "/webadaptors/config"
params = {
"f" : "json",
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def directories(self):
"""
The directories resource is a collection of directories that are
used by the portal to store and manage content. At 10.2.1, Portal
for ArcGIS supports five types of directories:
Content directory-The content directory contains the data
associated with every item in the portal.
Database directory-The built-in security store and sharing rules
are stored in a Database server that places
files in the database directory.
Temporary directory-The temporary directory is used as a scratch
workspace for all the portal's runtime
components.
Index directory-The index directory contains all the indexes
associated with the content in the portal. The
indexes are used for quick retrieval of
information and for querying purposes.
Logs directory-Errors and warnings are written to text files in
the log file directory. Each day, if new errors
or warnings are encountered, a new log file is
created.
If you would like to change the path for a directory, you can use
the Edit Directory operation.
"""
url = self._url + "/directories"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def directory(self, directoryName):
"""
A directory is a file system-based folder that contains a specific
type of content for the portal. The physicalPath property of a
directory locates the actual path of the folder on the file system.
At 10.2.1, Portal for ArcGIS supports local directories and network
shares as valid locations.
During the Portal for ArcGIS installation, the setup program asks
you for the root portal directory (that will contain all the
portal's sub directories). However, you can change each registered
directory through this API.
Input:
directoryName - name of diretory category
"""
url = self._url + "/directories/%s" % directoryName
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def editDirectory(self, directoryName, physicalPath, description):
"""
The edit operation on a directory can be used to change the
physical path and description properties of the directory. This is
useful when changing the location of a directory from a local path
to a network share. However, the API does not copy your content and
data from the old path to the new path. This has to be done
independently by the system administrator.
Input:
directoryName - name of the directory to change
physicalPath - new path for directroy
description - new description of the directory
"""
url = self._url + "/directories/%s/edit" % directoryName
params = {
"f" : "json",
"physicalPath": physicalPath,
"description" : description
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def getEntitlements(self, appId):
"""
This operation returns the currently queued entitlements for a
product, such as ArcGIS Pro or Navigator for ArcGIS, and applies
them when their start dates become effective. It's possible that
all entitlements imported using the Import Entitlements operation
are effective immediately and no entitlements are added to the
queue. In this case, the operation returns an empty result.
"""
params = {
"f" : "json",
"appId" : appId
}
url = self._url + "/licenses/getEntitlements"
return self._get(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def importEntitlements(self, entitlementFile, appId):
""""""
params = {
"f" : "json",
"appId" : appId
}
url = self._url + "/licenses/importEntitlements"
files = {"file" : entitlementFile}
return self._post(url=url, param_dict=params,
files=files,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def languages(self):
"""
This resource lists which languages will appear in portal content
search results.
"""
params = {"f" : "json"}
url = self._url + "/languages"
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def releaseLicense(self, username):
"""
If a user checks out an ArcGIS Pro license for offline or
disconnected use, this operation releases the license for the
specified account. A license can only be used with a single device
running ArcGIS Pro. To check in the license, a valid access token
and refresh token is required. If the refresh token for the device
is lost, damaged, corrupted, or formatted, the user will not be
able to check in the license. This prevents the user from logging
in to ArcGIS Pro from any other device. As an administrator, you
can release the license. This frees the outstanding license and
allows the user to check out a new license or use ArcGIS Pro in a
connected environment.
Parameters:
username - username of the account
"""
url = self._url + "/licenses/releaseLicense"
params = {
"username" : username,
"f" : "json"
}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def removeAllEntitlements(self, appId):
"""
This operation removes all entitlements from the portal for ArcGIS
Pro or additional products such as Navigator for ArcGIS and revokes
all entitlements assigned to users for the specified product. The
portal is no longer a licensing portal for that product.
License assignments are retained on disk. Therefore, if you decide
to configure this portal as a licensing portal for the product
again in the future, all licensing assignments will be available in
the website.
Parameters:
appId - The identifier for the application for which the
entitlements are being removed.
"""
params = {
"f" : "json",
"appId" : appId
}
url = self._url + "/licenses/removeAllEntitlements"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def system_properties(self):
"""
This resource lists system properties that have been modified to
control the portal's environment.
"""
params = {"f" : "json"}
url = self._url + "/properties"
return self._get(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateLanguages(self, languages):
"""
You can use this operation to change which languages will have
content displayed in portal search results.
Parameters:
languages - The JSON object containing all of the possible
portal languages and their corresponding status (true or
false).
"""
url = self._url = "/languages/update"
params = {
"f" : "json",
"languages" : languages
}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateLicenseManager(self, licenseManagerInfo):
"""
ArcGIS License Server Administrator works with your portal and
enforces licenses for ArcGIS Pro. This operation allows you to
change the license server connection information for your portal.
When you import entitlements into portal using the Import
Entitlements operation, a license server is automatically configured
for you. If your license server changes after the entitlements have
been imported, you only need to change the license server
connection information.
You can register a backup license manager for high availability of
your licensing portal. When configuring a backup license manager,
you need to make sure that the backup license manager has been
authorized with the same organizational entitlements. After
configuring the backup license manager, Portal for ArcGIS is
restarted automatically. When the restart completes, the portal is
configured with the backup license server you specified.
Parameters:
licenseManagerInfo - The JSON representation of the license
server connection information.
"""
url = self._url + "/licenses/updateLicenseManager"
params = {
"f" : "json",
"licenseManagerInfo" : licenseManagerInfo
}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def database(self):
"""
The database resource represents the database management system
(DBMS) that contains all of the portal's configuration and
relationship rules. This resource also returns the name and version
of the database server currently running in the portal.
You can use the Update Database Account operation to edit the
administrative database account that is used by components within
the portal to communicate with the database server.
"""
url = self._url + "/database"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateDatabaseAccount(self, username,
password):
"""
By default, the initial administrator account you define during the
Create Site operation is used as the database administrator
account. However, you can use this operation to change the database
administrator account credentials. To change just the password for
the account, provide the password parameter. If you want to create
a completely new account for the database, provide new values for
the username and password parameters.
Input:
username - database user name
password - <PASSWORD>
"""
url = self._url + "/database/updateAdminAccount"
params = {
"f" : "json",
"username" : username,
"password" : password
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def indexer(self):
"""
The indexer resource contains connection information to the default
indexing service. You can change its configuration properties such
as the port number and host name if you want the portal sharing API
to connect to and access another indexing service.
"""
url = self._url + "/indexer"
params = {
"f" : "json",
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def indexerStatus(self):
"""
The status resource allows you to view the status of the indexing
service. You can view the number of users, groups, relationships,
and search items in both the database (store) and the index.
If the database and index do not match, indexing is either in
progress or there is a problem with the index. It is recommended
that you reindex to correct any issues. If indexing is in progress,
you can monitor the status by refreshing the page.
"""
url = self._url + "/indexer/status"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def reindex(self, mode, includes=""):
"""
This operation allows you to generate or update the indexes for
content; such as users, groups, and items stored in the database
(store). During the process of upgrading an earlier version of
Portal for ArcGIS, you are required to update the indexes by
running this operation. You can check the status of your indexes
using the status resource.
Input:
mode - The mode in which the indexer should run.
Values: USER_MODE | GROUP_MODE | RELATION_MODE |
SEARCH_MODEL | FULL
includes An optional comma separated list of elements to
include in the index. This is useful if you want to
only index certain items or user accounts.
"""
url = self._url + "/indexer/reindex"
params = {
"f" : "json",
"mode" : mode,
"includes" : includes
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateIndexConfiguration(self,
indexerHost="localhost",
indexerPort=7199):
"""
You can use this operation to change the connection information for
the indexing service. By default, Portal for ArcGIS runs an
indexing service that runs on port 7199. If you want the sharing
API to refer to the indexing service on another instance, you need
to provide the host and port parameters.
Input:
indexerHost - The name of the server (hostname) on which the
index service runs. The default value is localhost
indexerPort - The port number on which the index service is
listening. The default value is 7199
"""
url = self._url + "/indexer/update"
params = {
"f" : "json",
"indexerHost": indexerHost,
"indexerPort": indexerPort
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def licenses(self):
"""
Portal for ArcGIS requires a valid license to function correctly.
This resource returns the current status of the license.
"""
url = self._url + "/licenses"
params = {
"f" : "json"
}
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class PortalAdministration(BaseAGOLClass):
"""
This is the root resource for administering your portal. Starting from
this root, all of the portal's environment is organized into a
hierarchy of resources and operations.
After installation, the portal can be configured using the Create Site
operation. Once initialized, the portal environment is available
through System and Security resources.
"""
_siteKey = None
_securityHandler = None
_url = None
_proxy_url = None
_proxy_port = None
_resources = None
_version = None
_json = None
_json_dict = None
#----------------------------------------------------------------------
def __init__(self, admin_url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
if admin_url.endswith("/portaladmin") == False:
admin_url = admin_url + "/portaladmin"
if securityHandler is not None:
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = admin_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
setattr(self, k, v)
print( k, " - attribute not implemented in manageportal.administration class.")
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""returns the raw key/values for the object"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
@property
def siteKey(self):
"""gets the portal siteKey property"""
if self._siteKey is None:
self.__init()
return self._siteKey
#----------------------------------------------------------------------
@property
def resources(self):
"""returns the admin sites resources"""
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
@property
def version(self):
"""returns the portal version"""
if self._version is None:
self.__init()
return self._version
#----------------------------------------------------------------------
def createSite(self, username, password, fullname,
email, description, securityQuestionIdx,
secuirtyQuestionAns, contentDir
):
"""
The create site operation initializes and configures Portal for
ArcGIS for use. It must be the first operation invoked after
installation. Creating a new site involves:
Creating the initial administrator account
Creating a new database administrator account (which is same as the
initial administrator account)
Creating token shared keys
Registering directories
This operation is time consuming, as the database is initialized
and populated with default templates and content. If the database
directory is not empty, this operation attempts to migrate the
database to the current version while keeping its data intact. At
the end of this operation, the web server that hosts the API is
restarted.
Inputs:
username - The initial administrator account name
password - <PASSWORD>
fullname - The full name for the initial administrator account
email - The account email address
description - An optional description for the account
securityQuestionIdx - The index of the secret question to retrieve
a forgotten password
securityQuestionAns - The answer to the secret question
contentDir - The path to the location of the site's content
"""
params = {
"username" : username,
"password" : password,
"fullname" : fullname,
"email" : email,
"description" : description,
"secuirtyQuestionAns" : secuirtyQuestionAns,
"securityQuestionIdx" : securityQuestionIdx,
"contentDir" : contentDir
}
url = self._url + "/createNewSite"
return self._get(url=url,
param_dict=params)
#----------------------------------------------------------------------
def exportSite(self, location):
"""
This operation exports the portal site configuration to a location
you specify.
"""
params = {
"location" : location,
"f" : "json"
}
url = self._url + "/exportSite"
return self._post(url=url, param_dict=params)
#----------------------------------------------------------------------
def importSite(self, location):
"""
This operation imports the portal site configuration to a location
you specify.
"""
params = {
"location" : location,
"f" : "json"
}
url = self._url + "/importSite"
return self._post(url=url, param_dict=params)
#----------------------------------------------------------------------
def joinSite(self, machineAdminUrl,
username, password):
"""
The joinSite operation connects a portal machine to an existing
site. You must provide an account with administrative privileges to
the site for the operation to be successful.
"""
params = {
"machineAdminUrl" : machineAdminUrl,
"username" : username,
"password" : password,
"f" : "json"
}
url = self._url + "/joinSite"
return self._post(url=url, param_dict=params)
#----------------------------------------------------------------------
def unregisterMachine(self, machineName):
"""
This operation unregisters a portal machine from a portal site. The
operation can only performed when there are two machines
participating in a portal site.
"""
url = self._url + "/machines/unregister"
params = {
"f" : "json",
"machineName" : machineName
}
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def federation(self):
"""returns the class that controls federation"""
url = self._url + "/federation"
return _Federation(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def system(self):
"""
Creates a reference to the System operations for Portal
"""
url = self._url + "/system"
return _System(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def security(self):
"""
Creates a reference to the Security operations for Portal
"""
url = self._url + "/security"
return _Security(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def logs(self):
"""returns the portals log information"""
url = self._url + "/logs"
return _log(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def root(self):
"""gets/sets the root admin url"""
return self._url
#----------------------------------------------------------------------
@root.setter
def root(self, value):
"""gets/sets the root admin url"""
if self._url != value:
self._url = value
|
tests/pydbgen_test.py | alexwohletz/pydbgen | 221 | 11126969 | from fakedbclass import fakedb
fdb = fakedb()
for _ in range(10):
print (fdb.ph()) |
src/genie/libs/parser/iosxe/tests/ShowDeviceTrackingFeatures/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 11127002 | expected_output = {
"features":{
"DHCP Guard":{
"feature":"DHCP Guard",
"priority":200,
"state":"READY"
},
"RA guard":{
"feature":"RA guard",
"priority":192,
"state":"READY"
}
}
} |
examples.py/3D/Form/CubicGrid.py | timgates42/processing.py | 1,224 | 11127064 | """
Cubic Grid
by <NAME>.
3D translucent colored grid uses nested pushMatrix()
and popMatrix() functions.
"""
boxSize = 40
margin = boxSize*2
depth = 400
def setup():
size(640, 360, P3D)
noStroke()
def draw():
background(255)
# Center and spin grid
translate(width/2, height/2, -depth)
rotateY(frameCount * 0.01)
rotateX(frameCount * 0.01)
# Build grid using multiple translations
i = -depth/2+margin
while i <= depth/2-margin:
pushMatrix()
j = -height+margin
while j <= height-margin:
pushMatrix()
k = -width + margin
while k <= width-margin:
# Base fill color on counter values, abs function
# ensures values stay within legal range
boxFill = color(abs(i), abs(j), abs(k), 50)
pushMatrix()
translate(k, j, i)
fill(boxFill)
box(boxSize, boxSize, boxSize)
popMatrix()
k += boxSize
popMatrix()
j += boxSize
popMatrix()
i += boxSize
|
datasets/chinese_bert_dataset.py | chinaliwenbo/ChineseBert | 298 | 11127075 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : chinese_bert_dataset.py
@author: zijun
@contact : <EMAIL>
@date : 2021/6/29 17:35
@version: 1.0
@desc : Base Class for dataset
"""
import json
import os
from typing import List
import tokenizers
from pypinyin import pinyin, Style
from tokenizers import BertWordPieceTokenizer
from torch.utils.data import Dataset
class ChineseBertDataset(Dataset):
def __init__(self, data_path, chinese_bert_path, max_length: int = 512):
"""
Dataset Base class
Args:
data_path: dataset file path
chinese_bert_path: pretrain model path
max_length: max sentence length
"""
super().__init__()
self.vocab_file = os.path.join(chinese_bert_path, 'vocab.txt')
self.config_path = os.path.join(chinese_bert_path, 'config')
self.data_path = data_path
self.max_length = max_length
self.tokenizer = BertWordPieceTokenizer(self.vocab_file)
# load pinyin map dict
with open(os.path.join(self.config_path, 'pinyin_map.json'), encoding='utf8') as fin:
self.pinyin_dict = json.load(fin)
# load char id map tensor
with open(os.path.join(self.config_path, 'id2pinyin.json'), encoding='utf8') as fin:
self.id2pinyin = json.load(fin)
# load pinyin map tensor
with open(os.path.join(self.config_path, 'pinyin2tensor.json'), encoding='utf8') as fin:
self.pinyin2tensor = json.load(fin)
self.lines = self.get_lines()
@property
def get_lines(self):
"""read data lines"""
raise NotImplementedError
def convert_sentence_to_pinyin_ids(self, sentence: str, tokenizer_output: tokenizers.Encoding) -> List[List[int]]:
# get pinyin of a sentence
pinyin_list = pinyin(sentence, style=Style.TONE3, heteronym=True, errors=lambda x: [['not chinese'] for _ in x])
pinyin_locs = {}
# get pinyin of each location
for index, item in enumerate(pinyin_list):
pinyin_string = item[0]
# not a Chinese character, pass
if pinyin_string == "not chinese":
continue
if pinyin_string in self.pinyin2tensor:
pinyin_locs[index] = self.pinyin2tensor[pinyin_string]
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_locs[index] = ids
# find chinese character location, and generate pinyin ids
pinyin_ids = []
for idx, (token, offset) in enumerate(zip(tokenizer_output.tokens, tokenizer_output.offsets)):
if offset[1] - offset[0] != 1:
pinyin_ids.append([0] * 8)
continue
if offset[0] in pinyin_locs:
pinyin_ids.append(pinyin_locs[offset[0]])
else:
pinyin_ids.append([0] * 8)
return pinyin_ids
|
lib/utils/video.py | wjbKimberly/DetectAndTrack-wjb | 1,007 | 11127099 | <gh_stars>1000+
##############################################################
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import os
import numpy as np
import scipy.sparse
from tqdm import tqdm
import logging
import sys
import math
from core.config import cfg
import utils.general as gen_utils
# OpenCL is enabled by default in OpenCV3 and it is not thread-safe leading
# to huge GPU memory allocations. See https://fburl.com/9d7tvusd
try:
cv2.ocl.setUseOpenCL(False)
except AttributeError:
pass
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
def get_video_info(roidb):
"""
For each entry in roidb, returns a dictionary with
(video_name, key_frame, flipped or not)
"""
video_frames = {}
for i, entry in enumerate(roidb):
if entry['dataset'].frames_from_video:
# For a video dataset like Kinetics
video_name = entry['image']
key_frame = entry['frame_id']
else:
video_name = os.path.dirname(entry['image'])
key_frame = int(os.path.splitext(os.path.basename(
entry['image']))[0])
video_frames[i] = (video_name, key_frame, entry['flipped'])
return video_frames
def _center_crop_list(l, sz):
assert(len(l) >= sz)
start_pt = (len(l) // 2) - (sz // 2)
res = l[start_pt: (start_pt + sz)]
assert len(res) == sz, 'Make sure got right number of elts'
assert res[len(res) // 2] == l[len(l) // 2], 'Make sure got the center correct'
return res
def _combine_clips(entry):
""" entry[clip_ids] contains the various frames. Combine them all
into the main entry to construct tubes etc.
"""
new_entry = {}
new_entry['image'] = [clip['image'] for clip in entry['clip_ids']]
# take a subset of the clip ids now for the remaining stuff
assert(cfg.VIDEO.NUM_FRAMES_MID <= cfg.VIDEO.NUM_FRAMES)
entry['clip_ids'] = _center_crop_list(entry['clip_ids'], cfg.VIDEO.NUM_FRAMES_MID)
copy_fields = [
'dataset',
'has_visible_keypoints',
'id',
'nframes',
'width',
'head_boxes',
'is_labeled',
'frame_id',
'height',
'flipped',
]
for field_name in copy_fields:
if field_name in entry:
new_entry[field_name] = entry[field_name]
outframes = range(len(entry['clip_ids']))
# union all the track ids
all_track_ids = np.array(list(set(gen_utils.flatten_list([
clip['tracks'].reshape((-1,)).tolist() for clip in entry['clip_ids']]))),
dtype=entry['tracks'].dtype)
new_entry['tracks'] = all_track_ids
ntracks = len(all_track_ids)
noutframes = len(outframes)
new_entry['all_frame_ids'] = [clip['frame_id'] for clip in entry['clip_ids']]
if 'original_file_name' in entry.keys():
new_entry['original_file_name'] = [
clip['original_file_name'] for clip in entry['clip_ids']]
new_entry['gt_keypoints'] = np.zeros((
ntracks,
noutframes,
entry['gt_keypoints'].shape[-2],
entry['gt_keypoints'].shape[-1]), dtype=entry['gt_keypoints'].dtype)
new_entry['boxes'] = np.zeros((ntracks, 4 * noutframes), dtype=entry['boxes'].dtype)
new_entry['is_crowd'] = np.zeros((ntracks,), dtype=entry['is_crowd'].dtype)
new_entry['gt_overlaps'] = scipy.sparse.csr_matrix(np.zeros(
(ntracks, entry['gt_overlaps'].shape[1]), dtype=entry['gt_overlaps'].dtype))
new_entry['gt_classes'] = np.zeros((ntracks,), dtype=entry['gt_classes'].dtype)
new_entry['track_visible'] = np.full((ntracks, noutframes), False)
new_entry['segms'] = [[]] * ntracks
new_entry['box_to_gt_ind_map'] = np.arange(
ntracks, dtype=entry['box_to_gt_ind_map'].dtype)
# Just assume 1-class for now => has to be the "person" class
new_entry['max_classes'] = np.ones(
(ntracks,), dtype=entry['max_classes'].dtype)
# All GT, so 1.0 overlap
new_entry['max_overlaps'] = np.ones((ntracks,), dtype=entry['max_overlaps'].dtype)
# This isn't really used, so just set to 1s
new_entry['seg_areas'] = np.ones((ntracks,), dtype=entry['seg_areas'].dtype)
for cur_track_pos, track_id in enumerate(all_track_ids):
for cur_frame_pos, frame_id in enumerate(outframes):
tracks = entry['clip_ids'][frame_id]['tracks'].reshape((-1,)).tolist()
track_pos = tracks.index(track_id) if track_id in tracks else -1
if track_pos >= 0:
new_entry['boxes'][cur_track_pos, cur_frame_pos * 4:
(cur_frame_pos + 1) * 4] = \
entry['clip_ids'][frame_id]['boxes'][track_pos]
new_entry['gt_keypoints'][cur_track_pos, cur_frame_pos, ...] = \
entry['clip_ids'][frame_id]['gt_keypoints'][track_pos]
new_entry['track_visible'][cur_track_pos, cur_frame_pos] = True
new_entry['gt_classes'][cur_track_pos] = \
entry['clip_ids'][frame_id]['gt_classes'][track_pos]
new_entry['gt_overlaps'][cur_track_pos, 1] = 1.0
# Since boxes were defined as Nx(4*T+1), I'm modifying the keypoints from
# NxTx3x17 to Nx3x(17*T). This make the blob dimensions consistent with
# 2D cases, saving a lot of "if" conditions in the future. Can simply
# think of predicting 17*T keypoints for a T-length video instead of 17.
# Also, since 17 is not a fixed number, I can always compute T using
# the "boxes" entry, as T = boxes.shape[-1] // 4 (a box is always 4 numbers)
nkpts = new_entry['gt_keypoints'].shape[-1]
new_entry['gt_keypoints'] = new_entry['gt_keypoints'].transpose(
(0, 2, 1, 3)).reshape((ntracks, 3, noutframes * nkpts))
return new_entry
def get_clip(roidb, remove_imperfect=False):
"""
Add a 'clip_ids' field to each entry of roidb, which contains pointers to
other elements of roidb that contain the other frames that should go with
this frame in case of a video.
"""
video_info = get_video_info(roidb) # is a dict
video_info_to_pos = {}
for el_id, el in video_info.items():
video_info_to_pos[el] = el_id
half_T = (cfg.VIDEO.NUM_FRAMES - 1) / 2
vid_list = range(int(math.floor(-half_T)), int(math.floor(half_T)) + 1)
assert(len(vid_list) == cfg.VIDEO.NUM_FRAMES)
assert(vid_list[len(vid_list) // 2] == 0)
new_roidb = []
for i, entry in enumerate(tqdm(roidb, desc='Video-fying the roidb')):
# extract video and frame number information
this_video_info = video_info[i]
# collect the clips of T length
roidb_indexes = [None] * cfg.VIDEO.NUM_FRAMES
for dt_i, dt in enumerate(vid_list):
target = (
this_video_info[0],
this_video_info[1] + dt * cfg.VIDEO.TIME_INTERVAL,
this_video_info[2])
if target in video_info_to_pos.keys():
pos = video_info_to_pos[target]
# roidb_indexes.append(
# video_info.keys()[video_info.values().index(target)])
# The pointers are not good enough.. On removing the empty
# clips the pointers would break
roidb_indexes[dt_i] = roidb[pos]
if len([el for el in roidb_indexes if el is None]) > 0 and remove_imperfect:
continue
else:
last_non_none = None
for k in range(cfg.VIDEO.NUM_FRAMES // 2, -1, -1):
if roidb_indexes[k] is not None:
last_non_none = roidb_indexes[k]
if roidb_indexes[k] is None:
roidb_indexes[k] = last_non_none
last_non_none = None
for k in range(cfg.VIDEO.NUM_FRAMES // 2, cfg.VIDEO.NUM_FRAMES):
if roidb_indexes[k] is not None:
last_non_none = roidb_indexes[k]
if roidb_indexes[k] is None:
roidb_indexes[k] = last_non_none
assert(len([el for el in roidb_indexes if el is None]) == 0)
entry['clip_ids'] = roidb_indexes
entry = _combine_clips(entry)
new_roidb.append(entry)
logger.info('Video-fied roidb contains {} elements'.format(len(new_roidb)))
return new_roidb
|
create_caffemodel.py | lFatality/tensorflow2caffe | 115 | 11127103 | from __future__ import print_function, division
caffe_root = '/TUB/robo/caffe-master/'
import sys
sys.path.insert(0, caffe_root+'python')
import caffe
import numpy as np
#load the data file
data_file = np.load('vgg_net_19_112.npy')
#get the weights and biases out of the array
#the weights have to be transposed because of differences between Caffe and Tensorflow
#format filter weights:
#Tensorflow: [height (0), width (1), depth (2), number of filters (3)]
#Caffe: [number of filters (3), depth (2), height (0), width (1)]
weights1 = data_file[0][0].transpose((3,2,0,1))
bias1 = data_file[0][1]
weights2 = data_file[1][0].transpose((3,2,0,1))
bias2 = data_file[1][1]
weights3 = data_file[2][0].transpose((3,2,0,1))
bias3 = data_file[2][1]
weights4 = data_file[3][0].transpose((3,2,0,1))
bias4 = data_file[3][1]
weights5 = data_file[4][0].transpose((3,2,0,1))
bias5 = data_file[4][1]
weights6 = data_file[5][0].transpose((3,2,0,1))
bias6 = data_file[5][1]
weights7 = data_file[6][0].transpose((3,2,0,1))
bias7 = data_file[6][1]
weights8 = data_file[7][0].transpose((3,2,0,1))
bias8 = data_file[7][1]
weights9 = data_file[8][0].transpose((3,2,0,1))
bias9 = data_file[8][1]
weights10 = data_file[9][0].transpose((3,2,0,1))
bias10 = data_file[9][1]
weights11 = data_file[10][0].transpose((3,2,0,1))
bias11 = data_file[10][1]
weights12 = data_file[11][0].transpose((3,2,0,1))
bias12 = data_file[11][1]
weights13 = data_file[12][0].transpose((3,2,0,1))
bias13 = data_file[12][1]
weights14 = data_file[13][0].transpose((3,2,0,1))
bias14 = data_file[13][1]
weights15 = data_file[14][0].transpose((3,2,0,1))
bias15 = data_file[14][1]
weights16 = data_file[15][0].transpose((3,2,0,1))
bias16 = data_file[15][1]
#connecting the tensor after last pooling layer with the first fully-connected layer
#here is the link to the video where this part is explained (https://youtu.be/kvXHOIn3-8s?t=3m38s)
fc1_w = data_file[16][0].reshape((4,4,512,4096))
fc1_w = fc1_w.transpose((3,2,0,1))
fc1_w = fc1_w.reshape((4096,8192))
fc1_b = data_file[16][1]
#fully connected layer format:
#Tensorflow: [number of inputs (0), number of outputs (1)]
#Caffe: [number of outputs (1), number of inputs (0)]
fc2_w = data_file[17][0].transpose((1,0))
fc2_b = data_file[17][1]
fc3_w = data_file[18][0].transpose((1,0))
fc3_b = data_file[18][1]
#define architecture
net = caffe.Net('vgg_net_19.prototxt', caffe.TEST)
#load parameters
net.params['conv1'][0].data[...] = weights1
net.params['conv1'][1].data[...] = bias1
net.params['conv2'][0].data[...] = weights2
net.params['conv2'][1].data[...] = bias2
net.params['conv3'][0].data[...] = weights3
net.params['conv3'][1].data[...] = bias3
net.params['conv4'][0].data[...] = weights4
net.params['conv4'][1].data[...] = bias4
net.params['conv5'][0].data[...] = weights5
net.params['conv5'][1].data[...] = bias5
net.params['conv6'][0].data[...] = weights6
net.params['conv6'][1].data[...] = bias6
net.params['conv7'][0].data[...] = weights7
net.params['conv7'][1].data[...] = bias7
net.params['conv8'][0].data[...] = weights8
net.params['conv8'][1].data[...] = bias8
net.params['conv9'][0].data[...] = weights9
net.params['conv9'][1].data[...] = bias9
net.params['conv10'][0].data[...] = weights10
net.params['conv10'][1].data[...] = bias10
net.params['conv11'][0].data[...] = weights11
net.params['conv11'][1].data[...] = bias11
net.params['conv12'][0].data[...] = weights12
net.params['conv12'][1].data[...] = bias12
net.params['conv13'][0].data[...] = weights13
net.params['conv13'][1].data[...] = bias13
net.params['conv14'][0].data[...] = weights14
net.params['conv14'][1].data[...] = bias14
net.params['conv15'][0].data[...] = weights15
net.params['conv15'][1].data[...] = bias15
net.params['conv16'][0].data[...] = weights16
net.params['conv16'][1].data[...] = bias16
net.params['fc1'][0].data[...] = fc1_w
net.params['fc1'][1].data[...] = fc1_b
net.params['fc2'][0].data[...] = fc2_w
net.params['fc2'][1].data[...] = fc2_b
net.params['fc3'][0].data[...] = fc3_w
net.params['fc3'][1].data[...] = fc3_b
#save caffemodel
net.save('vgg_net_19.caffemodel')
|
tests/test_backends.py | Zagrebelin/django_mail_admin | 179 | 11127145 | <gh_stars>100-1000
from django.core.mail import send_mail
from django.core.mail.backends.base import BaseEmailBackend
from django.test import TestCase
from django.test.utils import override_settings
from django_mail_admin.mail import send
from django_mail_admin.settings import get_backend
from django_mail_admin.backends import CustomEmailBackend
from django_mail_admin.models import Outbox, OutgoingEmail
class ErrorRaisingBackend(BaseEmailBackend):
"""
An EmailBackend that always raises an error during sending
to test if django_mailer handles sending error correctly
"""
def send_messages(self, email_messages):
raise Exception('Fake Error')
class BackendTest(TestCase):
# @override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_get_invalid_backend(self):
with self.assertRaises(ValueError):
send('<EMAIL>', backend='some_non_existing')
@override_settings(DJANGO_MAIL_ADMIN={'EMAIL_BACKEND': 'test'})
def test_old_settings(self):
backend = get_backend()
self.assertEqual(backend, 'test')
@override_settings(DJANGO_MAIL_ADMIN={}, EMAIL_BACKEND='django_mail_admin.backends.CustomEmailBackend')
def test_backend_fallback(self):
backend = get_backend()
self.assertEqual(backend, 'django_mail_admin.backends.CustomEmailBackend')
def test_custom_email_backend(self):
outbox = Outbox.objects.create(name='test', email_host='example.com',
email_host_user='<EMAIL>', email_host_password='<PASSWORD>', active=True)
backend = CustomEmailBackend()
self.assertEqual(backend.host, outbox.email_host)
self.assertEqual(backend.password, outbox.email_host_password)
outbox.delete()
with self.assertRaises(ValueError):
backend2 = CustomEmailBackend()
@override_settings(DJANGO_MAIL_ADMIN={}, EMAIL_BACKEND='django_mail_admin.backends.OutboxEmailBackend')
def test_outbox_email_backend(self):
count_before = len(OutgoingEmail.objects.all())
sent_count = send_mail(
subject='Test subject', message='message.',
from_email='<EMAIL>', recipient_list=['<EMAIL>'],
fail_silently=False)
self.assertEqual(sent_count, 1)
count_after = len(OutgoingEmail.objects.all())
self.assertEqual(count_after, count_before + 1)
|
Utils/Arguments.py | GingerNg/SDNet | 112 | 11127149 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
class Arguments:
def __init__(self, confFile):
if not os.path.exists(confFile):
raise Exception("The argument file does not exist: " + confFile)
self.confFile = confFile
def is_int(self, s):
try:
int(s)
return True
except ValueError:
return False
def is_float(self, s):
try:
float(s)
return True
except ValueError:
return False
def is_bool(self, s):
return s.lower() == 'true' or s.lower() == 'false'
def readHyperDriveArguments(self, arguments):
hyperdrive_opts = {}
for i in range(0, len(arguments), 2):
hp_name, hp_value = arguments[i:i+2]
hp_name = hp_name.replace("--", "")
if self.is_int(hp_value):
hp_value = int(hp_value)
elif self.is_float(hp_value):
hp_value = float(hp_value)
hyperdrive_opts[hp_name] = hp_value
return hyperdrive_opts
def readArguments(self):
opt = {}
with open(self.confFile, encoding='utf-8') as f:
for line in f:
l = line.replace('\t', ' ').strip()
if l.startswith("#"):
continue
parts = l.split()
if len(parts) == 1:
key = parts[0]
if not key in opt:
opt[key] = True
if len(parts) == 2:
key = parts[0]
value = parts[1]
if not key in opt:
opt[key] = value
if self.is_int(value):
opt[key] = int(value)
elif self.is_float(value):
opt[key] = float(value)
elif self.is_bool(value):
opt[key] = value.lower() == 'true'
else:
print('Warning: key %s already exists' % key)
return opt
|
sample_files/sample_CSV/verify.py | elihschiff/Submitty | 411 | 11127150 |
def parse_assigned_zones():
allowed_str = 'ABCDEFGHJKLMNPUZ'
assigned_zone_dict = {}
with open('exam1_seating.txt', 'r') as assigned:
for line in assigned:
line = line.strip()
line_list = line.split(' ')
line_list = [ line_.strip() for line_ in line_list ]
line_list = [ line_ for line_ in line_list if len(line_) > 0 ]
if len(line_list) == 3:
assigned_zone = 'U'
elif len(line_list) == 6:
assigned_zone = line_list[-1]
else:
assigned_zone = line_list[-2]
if assigned_zone == 'UNASSIGNED':
assigned_zone = None
assert assigned_zone is None or assigned_zone in allowed_str
student_rcs = line_list[2]
assigned_zone_dict[student_rcs] = assigned_zone
return assigned_zone_dict
def get_actual_zone_dict():
actual_dict = {}
showed_dict = {}
assigned_zone_dict = parse_assigned_zones()
direct_list = ['CSCI_1100_Exam_1']
for direct in direct_list:
with open('%s/9_Zone_Assignment.csv' % (direct, ), 'r') as zones:
# Get header row contents
header_str = zones.readline()
header_list = header_str.strip().split(',')[6: -3]
line_list = zones.readlines()
# Trim last three rows
line_list = line_list[:-3]
for index, line in enumerate(line_list):
line = line.strip()
if len(line) > 0:
record = line.split(',')
student_name = record[1]
student_rcs = record[2]
assigned_zone = assigned_zone_dict[student_rcs]
actual_list = record[6: -3]
actual_index = actual_list.index('true')
actual_zone = header_list[actual_index]
actual_dict[student_rcs] = actual_zone
if assigned_zone == actual_zone:
if assigned_zone not in showed_dict:
showed_dict[assigned_zone] = 0
showed_dict[assigned_zone] += 1
else:
print('%s (%s)' % (student_name, student_rcs, ))
print('\tAssigned: %s' % (assigned_zone, ))
print('\tActual: %s' % (actual_zone, ))
for key in sorted(showed_dict.keys()):
print('Zone % 2s: %d' % (key, showed_dict[key]))
return actual_dict
if __name__ == '__main__':
get_actual_zone_dict()
|
lib/discoverlib/coords.py | Pandinosaurus/roadtracer | 171 | 11127165 | <filename>lib/discoverlib/coords.py
import geom
import math
ORIGIN_SHIFT = 2 * math.pi * 6378137 / 2.0
def lonLatToMeters(p):
mx = p.x * ORIGIN_SHIFT / 180.0
my = math.log(math.tan((90 + p.y) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * ORIGIN_SHIFT / 180.0
return geom.FPoint(mx, my)
def metersToLonLat(p):
lon = (p.x / ORIGIN_SHIFT) * 180.0
lat = (p.y / ORIGIN_SHIFT) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return geom.FPoint(lon, lat)
def getMetersPerPixel(zoom):
return 2 * math.pi * 6378137 / (2**zoom) / 256
def lonLatToPixel(p, origin, zoom):
p = lonLatToMeters(p).sub(lonLatToMeters(origin))
p = p.scale(1 / getMetersPerPixel(zoom))
p = geom.FPoint(p.x, -p.y)
p = p.add(geom.FPoint(256, 256))
return p
def pixelToLonLat(p, origin, zoom):
p = p.sub(geom.FPoint(256, 256))
p = geom.FPoint(p.x, -p.y)
p = p.scale(getMetersPerPixel(zoom))
p = metersToLonLat(p.add(lonLatToMeters(origin)))
return p
def lonLatToMapboxTile(p, zoom):
n = 2**zoom
xtile = int((p.x + 180.0) / 360 * n)
ytile = int((1 - math.log(math.tan(p.y * math.pi / 180) + (1 / math.cos(p.y * math.pi / 180))) / math.pi) / 2 * n)
return (xtile, ytile)
def lonLatToMapbox(p, zoom, origin_tile):
n = 2**zoom
x = (p.x + 180.0) / 360 * n
y = (1 - math.log(math.tan(p.y * math.pi / 180) + (1 / math.cos(p.y * math.pi / 180))) / math.pi) / 2 * n
xoff = x - origin_tile[0]
yoff = y - origin_tile[1]
return geom.FPoint(xoff, yoff).scale(256)
def mapboxToLonLat(p, zoom, origin_tile):
n = 2**zoom
x = p.x / 256.0 + origin_tile[0]
y = p.y / 256.0 + origin_tile[1]
x = x * 360.0 / n - 180
y = math.atan(math.sinh(math.pi * (1 - 2.0 * y / n)))
y = y * 180 / math.pi
return geom.FPoint(x, y)
|
plaso/formatters/winlnk.py | cugu-stars/plaso | 1,253 | 11127216 | <reponame>cugu-stars/plaso<filename>plaso/formatters/winlnk.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""Windows Shortcut (LNK) custom event formatter helpers."""
from plaso.formatters import interface
from plaso.formatters import manager
class WindowsShortcutLinkedPathFormatterHelper(
interface.CustomEventFormatterHelper):
"""Windows Shortcut (LNK) linked path formatter helper."""
IDENTIFIER = 'windows_shortcut_linked_path'
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
linked_path = event_values.get('local_path', None)
if not linked_path:
linked_path = event_values.get('network_path', None)
if not linked_path:
linked_path = event_values.get('relative_path', None)
if linked_path:
working_directory = event_values.get('working_directory', None)
if working_directory:
linked_path = '\\'.join([working_directory, linked_path])
event_values['linked_path'] = linked_path or 'Unknown'
manager.FormattersManager.RegisterEventFormatterHelper(
WindowsShortcutLinkedPathFormatterHelper)
|
tests/unit/operations/test_configops.py | myungseokang/aws-elastic-beanstalk-cli | 110 | 11127227 | # -*- coding: utf-8 -*-
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import unittest
import mock
from ebcli.operations import configops
from ebcli.objects.exceptions import InvalidSyntaxError
from yaml.scanner import ScannerError
from json import JSONDecodeError
class TestConfigOperations(unittest.TestCase):
app_name = 'ebcli-app'
env_name = 'ebcli-env'
platform_arn = 'arn:aws:elasticbeanstalk:us-east-1::platform/Platform1/1.0.0'
new_platform_arn = 'arn:aws:elasticbeanstalk:us-east-1::platform/Platform2/1.0.0'
file_location = '/wow/eb/white space/.intere-sting'
editor = 'emacs'
nohang = False
changes = [{'Namespace': 'aws:autoscaling:asg', 'OptionName': 'MaxSize', 'Value': '6'}]
remove = [{'Namespace': 'aws:autoscaling:asg', 'OptionName': 'MinSize'}]
api_model = {'PlatformArn': platform_arn}
usr_model = {'PlatformArn': new_platform_arn}
usr_modification = '{"OptionsToRemove":{"aws:autoscaling:asg":["MinSize"]},"OptionSettings":{' \
'"aws:autoscaling:asg":{"MaxSize":"6"}}} '
usr_modification_file = 'file://' + file_location
@mock.patch('ebcli.operations.configops.commonops')
@mock.patch('ebcli.operations.configops.EnvironmentSettings')
@mock.patch('ebcli.operations.configops.elasticbeanstalk')
@mock.patch('ebcli.operations.configops.fileoperations')
def test_update_environment_configuration_solution_stack_changed(self, mock_fileops, mock_elasticbeanstalk, mock_env_settings, mock_commonops):
mock_elasticbeanstalk.describe_configuration_settings.return_value = self.api_model
mock_fileops.get_environment_from_file.return_value = self.usr_model
mock_env_settings.return_value = mock_env_settings
mock_env_settings.convert_api_to_usr_model.return_value = self.usr_model
mock_env_settings.collect_changes.return_value = self.changes, self.remove
mock_fileops.save_env_file.return_value = self.file_location
configops.update_environment_configuration(self.app_name, self.env_name, self.nohang)
# verify that changes will be made
mock_commonops.update_environment.assert_called_with(self.env_name, self.changes, self.nohang,
platform_arn=self.new_platform_arn,
remove=self.remove, timeout=None,
solution_stack_name=None)
@mock.patch('ebcli.operations.configops.commonops')
@mock.patch('ebcli.operations.configops.EnvironmentSettings')
@mock.patch('ebcli.operations.configops.elasticbeanstalk')
@mock.patch('ebcli.operations.configops.fileoperations')
def test_update_environment_configuration_no_change(self, mock_fileops, mock_elasticbeanstalk, mock_env_settings, mock_commonops):
mock_elasticbeanstalk.describe_configuration_settings.return_value = self.usr_model
mock_fileops.get_environment_from_file.return_value = self.usr_model
mock_env_settings.return_value = mock_env_settings
mock_env_settings.convert_api_to_usr_model.return_value = self.usr_model
mock_env_settings.collect_changes.return_value = None, None
mock_fileops.save_env_file.return_value = self.file_location
configops.update_environment_configuration(self.app_name, self.env_name, self.nohang)
mock_commonops.update_environment.assert_not_called()
@mock.patch('ebcli.operations.configops.commonops')
@mock.patch('ebcli.operations.configops.EnvironmentSettings')
@mock.patch('ebcli.operations.configops.elasticbeanstalk')
@mock.patch('ebcli.operations.configops.fileoperations')
def test_update_environment_configuration_bad_usr_modification(self, mock_fileops, mock_elasticbeanstalk, mock_env_settings,
mock_commonops):
mock_elasticbeanstalk.describe_configuration_settings.return_value = self.usr_model
mock_fileops.get_environment_from_file.side_effect = InvalidSyntaxError("Bad user changes")
mock_env_settings.return_value = mock_env_settings
mock_env_settings.convert_api_to_usr_model.return_value = self.usr_model
mock_fileops.save_env_file.return_value = self.file_location
configops.update_environment_configuration(self.app_name, self.env_name, self.nohang)
mock_commonops.update_environment.assert_not_called()
@mock.patch('ebcli.operations.configops.commonops')
def test_modify_environment_configuration(self, mock_commonops):
configops.modify_environment_configuration(self.env_name, self.usr_modification, self.nohang)
# verify that changes will be made
mock_commonops.update_environment.assert_called_with(self.env_name, self.changes, self.nohang,
platform_arn=None,
remove=self.remove, timeout=None,
solution_stack_name=None)
@mock.patch('ebcli.operations.configops.safe_load')
@mock.patch('ebcli.operations.configops.loads')
def test_modify_environment_configuration_bad_usr_modification(self, mock_loads, mock_safe_load):
mock_safe_load.side_effect = ScannerError("Bad user changes")
mock_loads.side_effect = JSONDecodeError("Bad user changes", "", 0)
with self.assertRaises(InvalidSyntaxError) as context_manager:
configops.modify_environment_configuration(self.env_name, self.usr_modification, self.nohang)
self.assertEqual(
'The environment configuration contains invalid syntax. Be sure your input matches one of the '
'supported formats: yaml, json',
str(context_manager.exception)
)
|
interpreter.py | aluo-x/shape2prog | 109 | 11127237 | <reponame>aluo-x/shape2prog<gh_stars>100-1000
from __future__ import print_function
import numpy as np
class Interpreter(object):
"""interpreting program vectors into understandable program strings"""
def __init__(self, translate, rotate, end):
self.translate = translate
self.rotate = rotate
self.end = end
def interpret(self, pgm, param):
n_block = pgm.shape[0]
param = np.round(param).astype(np.int32)
result = ""
for i in range(n_block):
res = self.interpret_block(pgm[i], param[i])
if res is None:
continue
else:
result += res
result += "\n"
return result
def interpret_block(self, pgm, param):
"""
interpret each block
"""
flag = 1
block_res = []
if pgm[0] == self.translate:
if pgm[1] == self.translate:
if 1 <= pgm[2] < self.translate:
sentence = "for(i<{}, 'Trans', u1=({},{},{}))"\
.format(param[0, 0], param[0, 1], param[0, 2], param[0, 3])
block_res.append(sentence)
sentence = "for(i<{}, 'Trans', u2=({},{},{}))"\
.format(param[1, 0], param[1, 1], param[1, 2], param[1, 3])
block_res.append(" "+sentence)
sentence = self.interpret_sentence(pgm[2], param[2], num_trans=2, num_rot=0)
block_res.append(" "+sentence)
else:
pass
elif 1 <= pgm[1] < self.translate:
sentence = "for(i<{}, 'Trans', u=({},{},{}))" \
.format(param[0, 0], param[0, 1], param[0, 2], param[0, 3])
block_res.append(sentence)
sentence = self.interpret_sentence(pgm[1], param[1], num_trans=1, num_rot=0)
block_res.append(" " + sentence)
else:
pass
elif pgm[0] == self.rotate:
if pgm[1] == 10 or pgm[1] == 17:
sentence = "for(i<{}, 'Rot', theta={}\N{DEGREE SIGN}, axis=({},{},{})"\
.format(param[0, 0], int(360/param[0,0]),
param[1, 0], param[1, 1], param[1, 2])
block_res.append(sentence)
sentence = self.interpret_sentence(pgm[1], param[1], num_trans=0, num_rot=1)
block_res.append(" " + sentence)
else:
pass
elif 1 <= pgm[0] < self.translate:
sentence = self.interpret_sentence(pgm[0], param[0], num_trans=0, num_rot=0)
block_res.append(sentence)
else:
pass
if len(block_res) == 0:
return None
else:
res = ''
for i in range(len(block_res)):
res += block_res[i] + '\n'
return res
def interpret_sentence(self, pgm, param, num_trans=0, num_rot=0):
"""
interpret each sentence
"""
if num_trans == 0 and num_rot == 0:
if pgm == 1:
sentence = "draw('Leg', 'Cub', P=({},{},{}), G=({},{},{}))"\
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 2:
sentence = "draw('Top', 'Rec', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 3:
sentence = "draw('Top', 'Square', P=({},{},{}), G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 4:
sentence = "draw('Top', 'Circle', P=({},{},{}), G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 5:
sentence = "draw('Layer', 'Rec', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 6:
sentence = "draw('Sup', 'Cylinder', P=({},{},{}), G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 7:
sentence = "draw('Sup', 'Cub', P=({},{},{}), G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 8:
sentence = "draw('Base', 'Circle', P=({},{},{}), G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 9:
sentence = "draw('Base', 'Square', P=({},{},{}), G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 10:
angle = round(param[5]) % 4
if angle == 0:
p1, p2, p3 = param[0], param[1], param[2] - param[4]
elif angle == 1:
p1, p2, p3 = param[0], param[1] + param[4], param[2]
elif angle == 2:
p1, p2, p3 = param[0], param[1], param[2] + param[4]
elif angle == 3:
p1, p2, p3 = param[0], param[1] - param[4], param[2]
else:
raise ValueError("The angle type of the cross is wrong")
sentence = "draw('Base', 'Line', P1=({},{},{}), P2=({},{},{}))" \
.format(param[0], param[1], param[2], p1, p2, p3)
elif pgm == 11:
sentence = "draw('Sideboard', 'Cub', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 12:
sentence = "draw('Hori_Bar', 'Cub', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 13:
sentence = "draw('Vert_Board', 'Cub', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 14:
sentence = "draw('Locker', 'Cub', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 15:
theta = np.arctan(float(param[6])/param[3]) / np.pi * 180
sentence = "draw('Back', 'Cub', P=({},{},{}), G=({},{},{}), theta={}\N{DEGREE SIGN})" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5], int(theta))
elif pgm == 16:
sentence = "draw('Chair_Beam', 'Cub', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 17:
sentence = "draw('Connect', 'Line', P1=({},{},{}), P2=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 18:
sentence = "draw('Back_sup', 'Cub', P=({},{},{}), G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif self.translate <= pgm <= self.end:
sentence = None
else:
sentence = None
elif num_trans == 1 and num_rot == 0:
if pgm == 1:
sentence = "draw('Leg', 'Cub', P=({},{},{})+i*u, G=({},{},{}))"\
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 2:
sentence = "draw('Top', 'Rec', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 3:
sentence = "draw('Top', 'Square', P=({},{},{})+i*u, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 4:
sentence = "draw('Top', 'Circle', P=({},{},{})+i*u, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 5:
sentence = "draw('Layer', 'Rec', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 6:
sentence = "draw('Sup', 'Cylinder', P=({},{},{})+i*u, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 7:
sentence = "draw('Sup', 'Cub', P=({},{},{})+i*u, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 8:
sentence = "draw('Base', 'Circle', P=({},{},{})+i*u, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 9:
sentence = "draw('Base', 'Square', P=({},{},{})+i*u, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 10:
angle = round(param[5]) % 4
if angle == 0:
p1, p2, p3 = param[0], param[1], param[2] - param[4]
elif angle == 1:
p1, p2, p3 = param[0], param[1] + param[4], param[2]
elif angle == 2:
p1, p2, p3 = param[0], param[1], param[2] + param[4]
elif angle == 3:
p1, p2, p3 = param[0], param[1] - param[4], param[2]
else:
raise ValueError("The angle type of the cross is wrong")
sentence = "draw('Base', 'Line', P1=({},{},{})+i*u, P2=({},{},{}))+i*u" \
.format(param[0], param[1], param[2], p1, p2, p3)
elif pgm == 11:
sentence = "draw('Sideboard', 'Cub', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 12:
sentence = "draw('Hori_Bar', 'Cub', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 13:
sentence = "draw('Vert_Board', 'Cub', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 14:
sentence = "draw('Locker', 'Cub', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 15:
theta = np.arctan(float(param[6])/param[3]) / np.pi * 180
sentence = "draw('Back', 'Cub', P=({},{},{})+i*u, G=({},{},{}), theta={}\N{DEGREE SIGN})" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5], int(theta))
elif pgm == 16:
sentence = "draw('Chair_Beam', 'Cub', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 17:
sentence = "draw('Connect', 'Line', P1=({},{},{})+i*u, P2=({},{},{}))+i*u" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 18:
sentence = "draw('Back_sup', 'Cub', P=({},{},{})+i*u, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif self.translate <= pgm <= self.end:
sentence = None
else:
sentence = None
elif num_trans == 2 and num_rot == 0:
if pgm == 1:
sentence = "draw('Leg', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))"\
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 2:
sentence = "draw('Top', 'Rec', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 3:
sentence = "draw('Top', 'Square', P=({},{},{})+i*u1+j*u2, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 4:
sentence = "draw('Top', 'Circle', P=({},{},{})+i*u1+j*u2, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 5:
sentence = "draw('Layer', 'Rec', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 6:
sentence = "draw('Sup', 'Cylinder', P=({},{},{})+i*u1+j*u2, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 7:
sentence = "draw('Sup', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 8:
sentence = "draw('Base', 'Circle', P=({},{},{})+i*u1+j*u2, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 9:
sentence = "draw('Base', 'Square', P=({},{},{})+i*u1+j*u2, G=({},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4])
elif pgm == 10:
angle = round(param[5]) % 4
if angle == 0:
p1, p2, p3 = param[0], param[1], param[2] - param[4]
elif angle == 1:
p1, p2, p3 = param[0], param[1] + param[4], param[2]
elif angle == 2:
p1, p2, p3 = param[0], param[1], param[2] + param[4]
elif angle == 3:
p1, p2, p3 = param[0], param[1] - param[4], param[2]
else:
raise ValueError("The angle type of the cross is wrong")
sentence = "draw('Base', 'Line', P1=({},{},{})+i*u1+j*u2, P2=({},{},{}))+i*u1+j*u2" \
.format(param[0], param[1], param[2], p1, p2, p3)
elif pgm == 11:
sentence = "draw('Sideboard', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 12:
sentence = "draw('Hori_Bar', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 13:
sentence = "draw('Vert_Board', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 14:
sentence = "draw('Locker', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 15:
theta = np.arctan(float(param[6])/param[3]) / np.pi * 180
sentence = "draw('Back', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}), theta={}\N{DEGREE SIGN})" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5], int(theta))
elif pgm == 16:
sentence = "draw('Chair_Beam', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 17:
sentence = "draw('Connect', 'Line', P1=({},{},{})+i*u1+j*u2, P2=({},{},{}))+i*u" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif pgm == 18:
sentence = "draw('Back_sup', 'Cub', P=({},{},{})+i*u1+j*u2, G=({},{},{}))" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
elif self.translate <= pgm <= self.end:
sentence = None
else:
sentence = None
elif num_trans == 0 and num_rot == 1:
if pgm == 10:
angle = round(param[5]) % 4
if angle == 0:
p1, p2, p3 = param[0], param[1], param[2] - param[4]
elif angle == 1:
p1, p2, p3 = param[0], param[1] + param[4], param[2]
elif angle == 2:
p1, p2, p3 = param[0], param[1], param[2] + param[4]
elif angle == 3:
p1, p2, p3 = param[0], param[1] - param[4], param[2]
else:
raise ValueError("The angle type of the cross is wrong")
sentence = "draw('Base', 'Line', P1=({},{},{}), P2=({},{},{}), theta*i, axis)" \
.format(param[0], param[1], param[2], p1, p2, p3)
elif pgm == 17:
sentence = "draw('Base', 'Line', P1=({},{},{}), P2=({},{},{}), theta*i, axis)" \
.format(param[0], param[1], param[2],
param[3], param[4], param[5])
else:
sentence = None
else:
sentence = None
return sentence
|
nodes/2.x/python/View.Phase.py | andydandy74/ClockworkForDynamo | 147 | 11127287 | <filename>nodes/2.x/python/View.Phase.py
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetViewPhase(view):
try: return view.Document.GetElement(view.get_Parameter(BuiltInParameter.VIEW_PHASE).AsElementId())
except: return None
views = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetViewPhase(x) for x in views]
else: OUT = GetViewPhase(views) |
bark/python_wrapper/tests/py_renderer_tests.py | xmyqsh/bark | 174 | 11127293 |
try:
import debug_settings
except:
pass
import unittest
# BARK
from bark.core.world.renderer import *
from bark.core.geometry import *
class RendererTests(unittest.TestCase):
def test_renderer(self):
renderer = Renderer()
renderer.Clear()
def test_line2d_primitive(self):
renderer = Renderer()
renderer.Clear()
l = Line2d()
l.AddPoint(Point2d(10, 4))
l.AddPoint(Point2d(1.555555, 1.244222))
line_primitive = RenderPrimitive(l)
renderer.Add("line_prim_0", line_primitive)
renderer.Add("line_prim_0", line_primitive)
renderer.Add("line_prim_1", line_primitive)
# asserts
rp = renderer.primitives
self.assertTrue(len(rp) == 2)
self.assertTrue(
(l.ToArray() == rp["line_prim_0"][0].object.ToArray()).all())
self.assertTrue(
(l.ToArray() == rp["line_prim_0"][1].object.ToArray()).all())
self.assertTrue(
(l.ToArray() == rp["line_prim_1"][0].object.ToArray()).all())
def test_polygon_primitive(self):
renderer = Renderer()
renderer.Clear()
polygon = Polygon2d(
[0, 0, 0],
[Point2d(-1,-1),
Point2d(-1,1),
Point2d(1,1),
Point2d(1,-1)])
poly_primitive = RenderPrimitive(polygon)
renderer.Add("poly_prim_0", poly_primitive)
renderer.Add("poly_prim_0", poly_primitive)
renderer.Add("poly_prim_1", poly_primitive)
# asserts
rp = renderer.primitives
self.assertTrue(len(rp) == 2)
self.assertTrue(
(polygon.ToArray() == rp["poly_prim_0"][0].object.ToArray()).all())
self.assertTrue(
(polygon.ToArray() == rp["poly_prim_0"][1].object.ToArray()).all())
self.assertTrue(
(polygon.ToArray() == rp["poly_prim_1"][0].object.ToArray()).all())
if __name__ == '__main__':
unittest.main() |
src/super_gradients/training/utils/export_utils.py | Deci-AI/super-gradients | 308 | 11127309 | import torch
import torch.nn as nn
import torch.nn.functional as F
class ExportableHardswish(nn.Module):
'''
Export-friendly version of nn.Hardswish()
'''
@staticmethod
def forward(x):
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
class ExportableSiLU(nn.Module):
"""
Export-friendly version of nn.SiLU()
From https://github.com/ultralytics/yolov5
"""
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
def fuse_conv_bn(model: nn.Module, replace_bn_with_identity: bool = False):
"""
Fuses consecutive nn.Conv2d and nn.BatchNorm2d layers recursively inplace in all of the model
:param replace_bn_with_identity: if set to true, bn will be replaced with identity. otherwise, bn will be removed
:param model: the target model
:return: the number of fuses executed
"""
children = list(model.named_children())
counter = 0
for i in range(len(children) - 1):
if isinstance(children[i][1], torch.nn.Conv2d) and isinstance(children[i + 1][1], torch.nn.BatchNorm2d):
setattr(model, children[i][0], torch.nn.utils.fuse_conv_bn_eval(children[i][1], children[i + 1][1]))
if replace_bn_with_identity:
setattr(model, children[i + 1][0], nn.Identity())
else:
delattr(model, children[i + 1][0])
counter += 1
for child_name, child in children:
counter += fuse_conv_bn(child, replace_bn_with_identity)
return counter
|
tests/models/siamese_bilstm/test_siamese_matching_bilstm.py | tjaffri/paraphrase-id-tensorflow | 354 | 11127312 | <reponame>tjaffri/paraphrase-id-tensorflow<filename>tests/models/siamese_bilstm/test_siamese_matching_bilstm.py
from overrides import overrides
import math
import tensorflow as tf
from duplicate_questions.data.data_manager import DataManager
from duplicate_questions.data.embedding_manager import EmbeddingManager
from duplicate_questions.models.siamese_bilstm.siamese_matching_bilstm import (
SiameseMatchingBiLSTM
)
from duplicate_questions.data.instances.sts_instance import STSInstance
from ...common.test_case import DuplicateTestCase
class TestSiameseMatchingBiLSTM(DuplicateTestCase):
@overrides
def setUp(self):
super(TestSiameseMatchingBiLSTM, self).setUp()
self.write_duplicate_questions_train_file()
self.write_duplicate_questions_validation_file()
self.write_duplicate_questions_test_file()
self.data_manager = DataManager(STSInstance)
self.batch_size = 2
self.get_train_gen, self.train_size = self.data_manager.get_train_data_from_file(
[self.TRAIN_FILE])
self.get_val_gen, self.val_size = self.data_manager.get_validation_data_from_file(
[self.VALIDATION_FILE])
self.get_test_gen, self.test_size = self.data_manager.get_test_data_from_file(
[self.TEST_FILE])
self.embedding_manager = EmbeddingManager(self.data_manager.data_indexer)
self.word_embedding_dim = 5
self.embedding_matrix = self.embedding_manager.get_embedding_matrix(
self.word_embedding_dim)
self.rnn_hidden_size = 6
self.output_keep_prob = 1.0
self.share_encoder_weights = True
self.config_dict = {
"mode": "train",
"word_vocab_size": self.data_manager.data_indexer.get_vocab_size(),
"word_embedding_dim": self.word_embedding_dim,
"fine_tune_embeddings": False,
"word_embedding_matrix": self.embedding_matrix,
"rnn_hidden_size": self.rnn_hidden_size,
"output_keep_prob": self.output_keep_prob,
"share_encoder_weights": self.share_encoder_weights
}
self.num_train_steps_per_epoch = int(math.ceil(self.train_size / self.batch_size))
self.num_val_steps = int(math.ceil(self.val_size / self.batch_size))
self.num_test_steps = int(math.ceil(self.test_size / self.batch_size))
def test_default_does_not_crash(self):
# Initialize the model
model = SiameseMatchingBiLSTM(self.config_dict)
model.build_graph()
# Train the model
model.train(get_train_instance_generator=self.get_train_gen,
get_val_instance_generator=self.get_val_gen,
batch_size=self.batch_size,
num_train_steps_per_epoch=self.num_train_steps_per_epoch,
num_epochs=2,
num_val_steps=self.num_val_steps,
save_path=self.TEST_DIR,
log_path=self.TEST_DIR,
log_period=2,
val_period=2,
save_period=2,
patience=0)
tf.reset_default_graph()
# Load and predict with the model
self.config_dict["mode"] = "test"
del self.config_dict["word_embedding_matrix"]
loaded_model = SiameseMatchingBiLSTM(self.config_dict)
loaded_model.build_graph()
loaded_model.predict(get_test_instance_generator=self.get_test_gen,
model_load_dir=self.TEST_DIR,
batch_size=self.batch_size,
num_test_steps=self.num_test_steps)
def test_non_sharing_encoders_does_not_crash(self):
# Initialize the model
self.config_dict["share_encoder_weights"] = False
model = SiameseMatchingBiLSTM(self.config_dict)
model.build_graph()
# Train the model
model.train(get_train_instance_generator=self.get_train_gen,
get_val_instance_generator=self.get_val_gen,
batch_size=self.batch_size,
num_train_steps_per_epoch=self.num_train_steps_per_epoch,
num_epochs=2,
num_val_steps=self.num_val_steps,
save_path=self.TEST_DIR,
log_path=self.TEST_DIR,
log_period=2,
val_period=2,
save_period=2,
patience=0)
tf.reset_default_graph()
# Load and predict with the model
self.config_dict["mode"] = "test"
del self.config_dict["word_embedding_matrix"]
loaded_model = SiameseMatchingBiLSTM(self.config_dict)
loaded_model.build_graph()
loaded_model.predict(get_test_instance_generator=self.get_test_gen,
model_load_dir=self.TEST_DIR,
batch_size=self.batch_size,
num_test_steps=self.num_test_steps)
|
python/eet/transformers/modeling_bert.py | NetEase-FuXi/EET | 174 | 11127333 | #
# Created by djz on 2021/01/21.
#
"""EET transformers bert model. """
import math
import time
import torch
import torch.nn as nn
import numpy as np
from torch import Tensor
from typing import Any, Dict, List, Optional, Tuple
from transformers import BertModel
from EET import MetaDesc as meta_desc
from EET import FeedForwardNetwork as eet_ffn
from EET import MultiHeadAttention as eet_attention
from EET import Embedding as eet_embedding
BEGIN_OF_PARAM = 8
__all__ = [
'EETBertEmbedding', 'EETBertFeedforward', 'EETBertAttention', 'EETBertEncoderLayer', 'EETBertEncoder', 'EETBertModel'
]
class EETBertEmbedding():
def __init__(self,config,embedding_dict,data_type = torch.float32):
self.if_layernorm = True
self.embedding_weights = embedding_dict['embeddings.word_embeddings.weight'].cuda().type(data_type)
self.position_weights = embedding_dict['embeddings.position_embeddings.weight'].cuda().type(data_type)
self.token_type_weights = embedding_dict['embeddings.token_type_embeddings.weight'].cuda().type(data_type)
self.Layernorm_weights = embedding_dict['embeddings.LayerNorm.weight'].cuda().type(data_type)
self.Layernorm_bias = embedding_dict['embeddings.LayerNorm.bias'].cuda().type(data_type)
self.embedding = eet_embedding(config,self.embedding_weights,self.position_weights,self.token_type_weights,self.Layernorm_weights,self.Layernorm_bias)
def __call__(self,
input_ids,
position_ids,
token_type_ids):
return self.embedding.forward_transformers(input_ids,position_ids,token_type_ids,self.if_layernorm)
@staticmethod
def from_torch(config,embedding_dict,data_type = torch.float32):
feedforward = EETBertEmbedding(config,embedding_dict,data_type = data_type)
return feedforward
class EETBertFeedforward():
def __init__(self,config,model_dict,layer_id,data_type = torch.float32):
self.intermediate_weights = torch.t([x[1] for x in model_dict.items() if 'intermediate.dense.weight' in x[0]][0]).contiguous().cuda().type(data_type)
self.intermediate_bias = [x[1] for x in model_dict.items() if 'intermediate.dense.bias' in x[0]][0].cuda().type(data_type)
self.output_weights = torch.t([x[1] for x in model_dict.items() if str(layer_id)+'.output.dense.weight' in x[0]][0]).contiguous().cuda().type(data_type)
self.output_bias = [x[1] for x in model_dict.items() if str(layer_id)+'.output.dense.bias' in x[0]][0].cuda().type(data_type)
self.layernorm_weights = [x[1] for x in model_dict.items() if str(layer_id)+'.output.LayerNorm.weight' in x[0]][0].cuda().type(data_type)
self.layernorm_bias = [x[1] for x in model_dict.items() if str(layer_id)+'.output.LayerNorm.bias' in x[0]][0].cuda().type(data_type)
self.ffn = eet_ffn(config,self.intermediate_weights,self.intermediate_bias,self.output_weights,self.output_bias,self.layernorm_weights,self.layernorm_bias)
def __call__(self,
input_id,
pre_layernorm = True,
add_redusial = True):
return self.ffn.forward(input_id,pre_layernorm,add_redusial)
@staticmethod
def from_torch(config,model_dict,layer_id,data_type = torch.float32):
feedforward = EETBertFeedforward(config,model_dict,layer_id,data_type = data_type)
return feedforward
class EETBertAttention():
def __init__(self,config, model_dict,layer_id,data_type = torch.float32):
q_weights = [x[1] for x in model_dict.items() if 'self.query.weight' in x[0]][0].contiguous().cuda().type(data_type)
k_weights = [x[1] for x in model_dict.items() if 'self.key.weight' in x[0]][0].contiguous().cuda().type(data_type)
v_weights = [x[1] for x in model_dict.items() if 'self.value.weight' in x[0]][0].contiguous().cuda().type(data_type)
self.qkv_weight = torch.cat((q_weights,k_weights,v_weights),0).transpose(0,1).contiguous()
self.q_bias = [x[1] for x in model_dict.items() if 'self.query.bias' in x[0]][0].cuda().type(data_type)
self.k_bias = [x[1] for x in model_dict.items() if 'self.key.bias' in x[0]][0].cuda().type(data_type)
self.v_bias = [x[1] for x in model_dict.items() if 'self.value.bias' in x[0]][0].cuda().type(data_type)
self.out_weights = torch.t([x[1] for x in model_dict.items() if 'attention.output.dense.weight' in x[0]][0]).contiguous().cuda().type(data_type)
self.out_bias = [x[1] for x in model_dict.items() if 'attention.output.dense.bias' in x[0]][0].cuda().type(data_type)
self.layernorm_weights = [x[1] for x in model_dict.items() if 'attention.output.LayerNorm.weight' in x[0]][0].cuda().type(data_type)
self.layernorm_bias = [x[1] for x in model_dict.items() if 'attention.output.LayerNorm.bias' in x[0]][0].cuda().type(data_type)
self.attention = eet_attention(config,self.qkv_weight,self.q_bias,self.k_bias,self.v_bias,self.out_weights,self.out_bias,self.layernorm_weights,self.layernorm_bias)
def __call__(self,
input_id,
pre_padding_len,
pre_layernorm = False,
add_redusial = True):
return self.attention.forward(input_id,pre_padding_len,pre_layernorm,add_redusial)
@staticmethod
def from_torch(config,model_dict,layer_id,data_type = torch.float32):
attention = EETBertAttention(config,model_dict,layer_id,data_type = data_type)
return attention
class EETBertEncoderLayer():
def __init__(self, config, attention,feedforward):
self.attetion = attention
self.feedforward = feedforward
def __call__(self,
x,
pre_padding_len = None,
normalize_before = False):
''' gpt2 model struct '''
''' layernorm->self_attention-> project->addinputbias->layernorm->ffn->addinputbias'''
self_attn_out = self.attetion(input_id = x,
pre_padding_len = pre_padding_len,
pre_layernorm = normalize_before,
add_redusial = True)
out = self.feedforward(self_attn_out,
pre_layernorm = normalize_before,
add_redusial = True)
return out
@staticmethod
def from_torch(config, model_dict,layer_id,data_type = torch.float32):
attention = EETBertAttention.from_torch(config = config, model_dict = model_dict, layer_id = layer_id,data_type = data_type)
feedforward = EETBertFeedforward.from_torch(config = config, model_dict = model_dict, layer_id = layer_id,data_type = data_type)
layer = EETBertEncoderLayer(config, attention, feedforward)
return layer
class EETBertEncoder():
def __init__(self,EncoderLayers):
self.layers = EncoderLayers
def __call__(
self,
x,
pre_padding_len = None,
normalize_before = False
):
for layer in self.layers:
x = layer(x,
pre_padding_len = pre_padding_len,
normalize_before = False)
return x
@staticmethod
def from_torch(layer_model_dict,config,layer_num,data_type = torch.float32):
"""from torch."""
EncoderLayers = []
for i in range(layer_num):
if i < 10:
EncoderLayers.extend(
[
EETBertEncoderLayer.from_torch(config,layer_model_dict['layer.'+str(i)+'.'],i,data_type = data_type)
]
)
else:
EncoderLayers.extend(
[
EETBertEncoderLayer.from_torch(config,layer_model_dict['layer.'+str(i)],i,data_type = data_type)
]
)
eet_encoder = EETBertEncoder(EncoderLayers)
return eet_encoder
class EETBertModel():
def __init__(self,config,embedding,encoder):
self.embedding = embedding
self.encoder = encoder
self.pre_padding_len = torch.empty(0).long()
self.position_ids = torch.arange(0,config.max_position_embeddings).reshape(1,config.max_position_embeddings).cuda()
def __call__(
self,
input_ids,
position_ids = None,
token_type_ids = None,
attention_mask = None,
):
'''
attention_mask:attention_padding_mask(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input.)
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
'''
input_shape = input_ids.size()
position_ids = self.position_ids[:, :input_shape[1]]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=input_ids.device)
if attention_mask is None:
pre_padding_len = self.pre_padding_len
else:
# transformers 0 - padding;1 - nopadding
pre_padding_len = torch.sum(1 - attention_mask,1).long().cuda()
embedding_out = self.embedding(input_ids,position_ids,token_type_ids)
encoder_out = self.encoder(embedding_out,
pre_padding_len = pre_padding_len,
normalize_before = False)
return encoder_out
@staticmethod
def from_pretrained(model_id_or_path: str,max_batch, data_type):
"""from torch."""
torch.set_grad_enabled(False)
model_dict = {}
embedding_dict = {}
torch_model = BertModel.from_pretrained(model_id_or_path)
cfg = torch_model.config
for k, v in torch_model.state_dict().items():
if 'embeddings' in k:
embedding_dict[k] = v
if 'layer' in k:
#BEGIN_OF_PARAM(Length of the beginning of the parameter):
#like 'encoder.layer.0.attention.self.query.weight',the BEGIN_OF_PARAM is the length of 'encoder.'-->8
k = k[BEGIN_OF_PARAM:]
model_dict[k] = v
from itertools import groupby
layer_model_dict = {k: dict(v) for k, v in groupby(list(model_dict.items()), lambda item: item[0][:BEGIN_OF_PARAM])}
device = "cuda:0"
activation_fn = cfg.hidden_act
batch_size = max_batch
config = meta_desc(batch_size, cfg.num_attention_heads, cfg.hidden_size, cfg.num_hidden_layers , cfg.max_position_embeddings, cfg.max_position_embeddings, data_type, device, False, activation_fn)
embedding = EETBertEmbedding.from_torch(config,embedding_dict,data_type)
# embedding = None
encoder = EETBertEncoder.from_torch(layer_model_dict,config, cfg.num_hidden_layers,data_type)
eet_model = EETBertModel(cfg,embedding, encoder)
return eet_model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.