max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
models/SemanticSegmentation/LWnet.py | Dou-Yu-xuan/deep-learning-visal | 150 | 12737254 | <gh_stars>100-1000
# !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2020/6/28 18:04
# @Author : liumin
# @File : LWnet.py
import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F
def ConvBNReLU(in_channels,out_channels,kernel_size,stride,padding,dilation=1,groups=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding,dilation=dilation,groups=groups, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
def ConvBN(in_channels,out_channels,kernel_size,stride,padding,dilation=1,groups=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding,dilation=dilation,groups=groups, bias=False),
nn.BatchNorm2d(out_channels)
)
def Conv1x1BNReLU(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
def Conv1x1BN(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
class LWbottleneck(nn.Module):
def __init__(self, in_channels,out_channels,stride):
super(LWbottleneck, self).__init__()
self.stride = stride
self.pyramid_list = nn.ModuleList()
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=[5,1], stride=stride, padding=[2,0]))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=[1,5], stride=stride, padding=[0,2]))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=[3,1], stride=stride, padding=[1,0]))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=[1,3], stride=stride, padding=[0,1]))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=[2,1], stride=stride, padding=[1,0]))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=[1,2], stride=stride, padding=[0,1]))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=2, stride=stride, padding=1))
self.pyramid_list.append(ConvBNReLU(in_channels, in_channels, kernel_size=3, stride=stride, padding=1))
self.shrink = Conv1x1BN(in_channels*8,out_channels)
def forward(self, x):
b,c,w,h = x.shape
if self.stride>1:
w, h = w//self.stride,h//self.stride
outputs = []
for pyconv in self.pyramid_list:
pyconv_x = pyconv(x)
if x.shape[2:] != pyconv_x.shape[2:]:
pyconv_x = pyconv_x[:,:,:w,:h]
outputs.append(pyconv_x)
out = torch.cat(outputs, 1)
return self.shrink(out)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.stage1 = nn.Sequential(
ConvBNReLU(in_channels=3, out_channels=32, kernel_size=3, stride=2, padding=1),
Conv1x1BN(in_channels=32, out_channels=16),
)
self.stage2 = nn.Sequential(
LWbottleneck(in_channels=16,out_channels=24,stride=2),
LWbottleneck(in_channels=24, out_channels=24, stride=1),
)
self.stage3 = nn.Sequential(
LWbottleneck(in_channels=24, out_channels=32, stride=2),
LWbottleneck(in_channels=32, out_channels=32, stride=1),
)
self.stage4 = nn.Sequential(
LWbottleneck(in_channels=32, out_channels=32, stride=2)
)
self.stage5 = nn.Sequential(
LWbottleneck(in_channels=32, out_channels=64, stride=2),
LWbottleneck(in_channels=64, out_channels=64, stride=1),
LWbottleneck(in_channels=64, out_channels=64, stride=1),
LWbottleneck(in_channels=64, out_channels=64, stride=1),
)
self.conv1 = Conv1x1BN(in_channels=64, out_channels=320)
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = F.pad(x,pad=(0,1,0,1),mode='constant',value=0)
out1 = x = self.stage3(x)
x = self.stage4(x)
x = F.pad(x, pad=(0, 1, 0, 1), mode='constant', value=0)
x = self.stage5(x)
out2 = self.conv1(x)
return out1,out2
class ASPP(nn.Module):
def __init__(self, in_channels, out_channels):
super(ASPP, self).__init__()
self.depthwise1 = ConvBNReLU(in_channels, out_channels, 3, 1, 6, dilation=6)
self.depthwise2 = ConvBNReLU(in_channels, out_channels, 3, 1, 12, dilation=12)
self.depthwise3 = ConvBNReLU(in_channels, out_channels, 3, 1, 18, dilation=18)
self.pointconv = Conv1x1BN(in_channels, out_channels)
def forward(self, x):
x1 = self.depthwise1(x)
x2 = self.depthwise2(x)
x3 = self.depthwise3(x)
x4 = self.pointconv(x)
return torch.cat([x1,x2,x3,x4], dim=1)
class Decoder(nn.Module):
def __init__(self,num_classes=2):
super(Decoder, self).__init__()
self.aspp = ASPP(320, 128)
self.pconv1 = Conv1x1BN(128*4, 512)
self.pconv2 = Conv1x1BN(512+32, 128)
self.pconv3 = Conv1x1BN(128, num_classes)
def forward(self, x, y):
x = self.pconv1(self.aspp(x))
x = F.interpolate(x,y.shape[2:],align_corners=True,mode='bilinear')
x = torch.cat([x,y], dim=1)
out = self.pconv3(self.pconv2(x))
return out
class LW_Network(nn.Module):
def __init__(self, num_classes=2):
super(LW_Network, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(num_classes)
def forward(self, x):
x1,x2 = self.encoder(x)
out = self.decoder(x2,x1)
return out
if __name__ == '__main__':
model = LW_Network()
print(model)
input = torch.randn(1, 3, 331, 331)
output = model(input)
print(output.shape) |
pynab/categories.py | bigblue/pynab | 161 | 12737266 | import regex
import pickle
import os.path
from pynab import log, root_dir
# category codes
# these are stored in the db, as well
CAT_GAME_NDS = 1010
CAT_GAME_PSP = 1020
CAT_GAME_WII = 1030
CAT_GAME_XBOX = 1040
CAT_GAME_XBOX360 = 1050
CAT_GAME_WIIWARE = 1060
CAT_GAME_XBOX360DLC = 1070
CAT_GAME_PS3 = 1080
CAT_MOVIE_FOREIGN = 2010
CAT_MOVIE_OTHER = 2020
CAT_MOVIE_SD = 2030
CAT_MOVIE_HD = 2040
CAT_MOVIE_BLURAY = 2050
CAT_MOVIE_3D = 2060
CAT_MUSIC_MP3 = 3010
CAT_MUSIC_VIDEO = 3020
CAT_MUSIC_AUDIOBOOK = 3030
CAT_MUSIC_LOSSLESS = 3040
CAT_PC_0DAY = 4010
CAT_PC_ISO = 4020
CAT_PC_MAC = 4030
CAT_PC_MOBILEOTHER = 4040
CAT_PC_GAMES = 4050
CAT_PC_MOBILEIOS = 4060
CAT_PC_MOBILEANDROID = 4070
CAT_TV_FOREIGN = 5020
CAT_TV_SD = 5030
CAT_TV_HD = 5040
CAT_TV_OTHER = 5050
CAT_TV_SPORT = 5060
CAT_TV_ANIME = 5070
CAT_TV_DOCU = 5080
CAT_XXX_DVD = 6010
CAT_XXX_WMV = 6020
CAT_XXX_XVID = 6030
CAT_XXX_X264 = 6040
CAT_XXX_PACK = 6050
CAT_XXX_IMAGESET = 6060
CAT_XXX_OTHER = 6070
CAT_BOOK_MAGS = 7010
CAT_BOOK_EBOOK = 7020
CAT_BOOK_COMICS = 7030
CAT_MISC_OTHER = 8010
CAT_PARENT_GAME = 1000
CAT_PARENT_MOVIE = 2000
CAT_PARENT_MUSIC = 3000
CAT_PARENT_PC = 4000
CAT_PARENT_TV = 5000
CAT_PARENT_XXX = 6000
CAT_PARENT_BOOK = 7000
CAT_PARENT_MISC = 8000
CATEGORISER = pickle.load(open(os.path.join(root_dir, 'db/release_categoriser.pkl'), 'rb'))
def extract_features(name):
def find(reg, str):
res = regex.findall(reg, str, regex.I)
if res:
return '|'.join(sorted(res))
else:
return None
return {
'length': len(name),
'tokens': len(regex.findall('[\w\']+', name)),
'resolution': find('(720|1080)', name),
'quality': find('(SDTV|HDTV|PDTV|WEB-?DL|WEBRIP|XVID|DIVX|DVDR|DVD-RIP|x264|dvd|XvidHD|AVC|AAC|VC\-?1|wmvhd|web\-dl|BRRIP|HDRIP|HDDVD|bddvd|BDRIP|webscr|bluray|bd?25|bd?50|blu-ray|BDREMUX)', name),
'3d': bool(find('(3D)', name)),
'subgroup': find('\[(\w+)\]', name),
'filehash': bool(find('\[([0-9a-fA-F]{8})\]', name)),
'season': bool(find('(S\d{1,2})', name)),
'episode': bool(find('(E\d{1,2})', name)),
'airdate': bool(find('((?:\d{4}[.-/ ]\d{2}[.-/ ]\d{2})|(?:\d{2}[.-/ ]\d{2}[.-/ ]\d{4}))', name)),
'year': bool(find('[.-/ ](\d{4})[.-/ ]', name)),
'versus': bool(find('[.-/ ](vs?)[.-/ ]', name)),
'music': bool(find('((?:^VA(?:\-|\_|\ ))|(?:MP3|VBR|NMR|CDM|FLAC|\-(?:CDR?|EP|LP|SAT|2CD|FM|VINYL|DE|CABLE|TAPE)\-))', name)),
'ebook': bool(find('(e?\-?book|html|epub|pdf|mobi|azw|doc|isbn)', name)),
'comic': bool(find('(cbr|cbz)', name)),
'magazine': bool(find('(mag(?:s|azine?s?))', name)),
'sport': find('(epl|motogp|bellator|supercup|wtcc|bundesliga|uefa|espn|wwe|wwf|wcw|mma|ucf|fia|pga|nfl|ncaa|fifa|mlb|nrl|nhl|afl|nba|wimbledon|cricket)[\. -_]', name),
'xxx': bool(find('(xxx|imageset|porn|erotica)', name)),
'game': find('(PS3|3DS|NDS|PS4|XBOX|XBONE|WII|DLC|CONSOLE|PSP|X360|PS4)', name),
'foreign': bool(find('(seizoen|staffel|danish|flemish|dutch|Deutsch|nl\.?subbed|nl\.?sub|\.NL|\.ITA|norwegian|swedish|swesub|french|german|spanish|icelandic|finnish|Chinese\.Subbed|vostfr|Hebrew\.Dubbed|\.HEB\.|Nordic|Hebdub|NLSubs|NL\-Subs|NLSub|Deutsch| der |German | NL |\.PL\.)', name)),
'pc': bool(find('((?:v?\d\.\d\.)|(?:x64|32bit|64bit|exe))', name)),
'documentary': bool(find('(documentary|national geographic|natgeo)', name))
}
def determine_category(name, group_name=''):
"""Categorise release based on release name and group name."""
features = extract_features(name)
features['name'] = name
features['group'] = group_name
category = int(CATEGORISER.classify(features))
log.debug('category: ({}) [{}]: {}'.format(
group_name,
name,
category
))
return category
|
avionics/motor/motor_client_test.py | leozz37/makani | 1,178 | 12737316 | <reponame>leozz37/makani
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for makani.avionics.motor.motor_client.
This module uses snake case for new function names so that test functions can be
consistent with cmd.Cmd methods without offending Lint.
"""
import copy
import re
import socket
import tempfile
import textwrap
import threading
import time
import unittest
from makani.avionics.common import actuator_types
from makani.avionics.common import cmd_client
from makani.avionics.common import pack_avionics_messages
from makani.avionics.motor import motor_client
from makani.avionics.motor.firmware import flags
from makani.avionics.network import aio_node
from makani.lib.python import test_util
import numpy
_TIMEOUT = 0.01
_EPS32 = numpy.finfo(numpy.float32).eps
class MulticastListener(cmd_client.AioThread):
def __init__(self, set_state_callback, command_callback, param_callback):
super(MulticastListener, self).__init__(
['kMessageTypeControllerCommand', 'kMessageTypeDynoCommand',
'kMessageTypeMotorSetParam', 'kMessageTypeDynoMotorSetParam',
'kMessageTypeMotorSetState', 'kMessageTypeDynoMotorSetState',
'kMessageTypeMotorGetParam', 'kMessageTypeDynoMotorGetParam'],
allowed_sources=['kAioNodeControllerA', 'kAioNodeOperator'],
timeout=_TIMEOUT)
self._set_state_callback = set_state_callback
self._command_callback = command_callback
self._param_callback = param_callback
def _RunOnce(self):
try:
_, header, message = self._client.Recv()
if header.source == aio_node.kAioNodeOperator:
if isinstance(message, pack_avionics_messages.MotorSetStateMessage):
self._set_state_callback(message)
elif (isinstance(message, pack_avionics_messages.MotorSetParamMessage)
or isinstance(
message, pack_avionics_messages.DynoMotorSetParamMessage)):
self._param_callback(message)
elif (isinstance(message, pack_avionics_messages.MotorGetParamMessage)
or isinstance(
message, pack_avionics_messages.DynoMotorGetParamMessage)):
self._param_callback(message)
elif isinstance(message, pack_avionics_messages.DynoCommandMessage):
self._command_callback(message)
elif header.source == aio_node.kAioNodeControllerA:
if isinstance(message, pack_avionics_messages.ControllerCommandMessage):
self._command_callback(message)
except socket.timeout:
pass
class FakeMotor(cmd_client.AioThread):
def __init__(self, nickname):
self._node_string = motor_client.AioNodeNameFromMotorNickname(nickname)
self._index = motor_client.MOTORS.index(nickname)
self._bitmask = 1 << self._index
self._status = pack_avionics_messages.MotorStatusMessage()
self._status.motor_status = flags.kMotorStatusInit
self._status_lock = threading.Lock()
self.running = False
self.params = {v: 0.0 for v in motor_client.MOTOR_PARAMS.itervalues()}
self.torque = 0.0
self.speed_lower = 0.0
self.speed_upper = 0.0
super(FakeMotor, self).__init__(['kMessageTypeMotorStatus',
'kMessageTypeMotorAckParam'],
allowed_sources=[self._node_string],
timeout=_TIMEOUT)
self._multicast_listener = MulticastListener(
self._HandleMotorSetStateMessage, self._HandleControllerCommandMessage,
self._HandleParamMessage)
def GetParam(self, param_name):
return self.params[motor_client.MOTOR_PARAMS[param_name]]
def __enter__(self):
self.start()
self._multicast_listener.start()
return self
def __exit__(self, *args):
self._multicast_listener.Exit()
self._multicast_listener.join()
self.Exit()
self.join()
def GetStatus(self):
with self._status_lock:
return copy.copy(self._status)
def GetError(self):
with self._status_lock:
return self._status.motor_error
def SetError(self, error):
with self._status_lock:
self._status.motor_error = error
self._status.motor_status |= flags.kMotorStatusError
def SetWarning(self, warning):
with self._status_lock:
self._status.motor_warning = warning
self._status.motor_status |= flags.kMotorStatusError
def ClearError(self):
with self._status_lock:
self._status.motor_error = flags.kMotorErrorNone
self._status.motor_status &= ~flags.kMotorStatusError
def _HandleMotorSetStateMessage(self, message):
if (message.selected_motors & self._bitmask
and message.command == actuator_types.kActuatorStateCommandArm):
with self._status_lock:
self._status.motor_status = flags.kMotorStatusArmed
def _HandleControllerCommandMessage(self, message):
self.torque = message.motor_torque[self._index]
self.speed_lower = message.motor_speed_lower_limit[self._index]
self.speed_upper = message.motor_speed_upper_limit[self._index]
self.running = bool(message.motor_command & flags.kMotorCommandRun)
if message.motor_command & flags.kMotorCommandClearError:
self.ClearError()
if message.motor_command & flags.kMotorCommandDisarm:
with self._status_lock:
self._status.motor_status &= ~flags.kMotorStatusArmed
def _HandleParamMessage(self, message):
if message.selected_motors & self._bitmask:
if isinstance(message, pack_avionics_messages.MotorSetParamMessage):
self.params[message.id] = message.value
ack = pack_avionics_messages.MotorAckParamMessage()
ack.id = message.id
ack.value = self.params[message.id]
self._client.Send(ack, 'kMessageTypeMotorAckParam',
self._node_string)
def _RunOnce(self):
with self._status_lock:
self._client.Send(self._status, 'kMessageTypeMotorStatus',
self._node_string)
time.sleep(0.1)
class MotorCommandClientTest(unittest.TestCase):
def setUp(self):
super(MotorCommandClientTest, self).setUp()
self.client = motor_client.MotorCommandClient()
self.stdout = test_util.StdoutPatch()
def tearDown(self):
super(MotorCommandClientTest, self).tearDown()
with self.stdout:
self.client.onecmd('quit')
def assert_eventually_true(self, func):
num_tries = 30
for i in xrange(num_tries):
if func():
return True
if i < num_tries - 1:
time.sleep(0.1)
self.assertTrue(False) # pylint: disable=redundant-unittest-assert
def test_do_set_targets(self):
with self.stdout:
self.client.onecmd('set_targets SBO')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*SBO.*')
self.client.onecmd('quit')
with self.stdout:
self.client.onecmd('set_targets SBI PTO')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*SBI.*')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*PTO.*')
def test_do_set_targets_dyno(self):
with self.stdout:
self.client.onecmd('set_targets_dyno SBO')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*SBO.*')
self.client.onecmd('quit')
with self.stdout:
self.client.onecmd('set_targets_dyno SBI PTO')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*SBI.*')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*PTO.*')
def test_do_arm_fail(self):
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('arm')
self.assertRegexpMatches(self.stdout.Read(),
'(?s).*Invalid set of targets.*')
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('set_targets SBO')
self.client.onecmd('arm SBO')
self.assertRegexpMatches(self.stdout.Read(),
'(?s).*Wrong number of arguments.*')
def test_do_arm_succeed(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('arm')
self.assertEqual(motor.GetStatus().motor_status,
flags.kMotorStatusArmed)
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Successfully armed.*')
def test_do_arm_multiple_motors(self):
with self.stdout, FakeMotor('SBI') as sbi, FakeMotor('PTO') as pto:
self.client.onecmd('set_targets SBI PTO')
self.client.onecmd('arm')
self.assertEqual(sbi.GetStatus().motor_status,
flags.kMotorStatusArmed)
self.assertEqual(pto.GetStatus().motor_status,
flags.kMotorStatusArmed)
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Successfully armed.*')
def test_do_disarm(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('arm')
self.client.onecmd('disarm')
self.assertEqual(motor.GetStatus().motor_status,
flags.kMotorStatusInit)
def test_do_set_param(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_param SBO i_kp 3.14')
self.assertRegexpMatches(self.stdout.Read(),
'(?s).*Setting i_kp to 3.14 on SBO.*')
self.assertAlmostEqual(motor.GetParam('i_kp'), 3.14, places=6)
def test_do_get_param(self):
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('get_param SBO i_kp')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*SBO i_kp: 0[^0-9]*')
def test_do_run_fail(self):
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('run 1 s')
self.assertRegexpMatches(
self.stdout.Read(), '(?s).*Invalid set of targets.*')
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('set_targets SBO')
self.client.onecmd('run 1 s')
self.assertRegexpMatches(
self.stdout.Read(), '(?s).*Invalid(?s).*status.*')
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_run_succeed(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('arm')
self.client.onecmd('run 100 s')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Running.*')
self.assert_eventually_true(lambda: motor.running)
def test_do_stop_fail(self):
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('stop')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Not running.*')
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_stop_succeed(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('arm')
self.client.onecmd('run 100 s')
self.assert_eventually_true(lambda: motor.running)
self.client.onecmd('stop')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Run stopped.*')
self.assert_eventually_true(lambda: not motor.running)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_set_torque(self):
with self.stdout, FakeMotor('SBI'), FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBI')
self.client.onecmd('set_targets_dyno SBO')
self.client.onecmd('set_speed_limits -3.14 3.14')
self.client.onecmd('set_torque 3.14')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Torque desired.*')
self.assert_eventually_true(
lambda: abs(motor.torque - 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_lower + 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_upper - 3.14) / 3.14 < _EPS32)
def test_do_set_torque_fail(self):
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('set_torque 3.14')
self.assertRegexpMatches(
self.stdout.Read(), 'No dynos selected. Use "set_targets_dyno".')
self.client.onecmd('set_targets_dyno SBO')
self.client.onecmd('set_torque abc')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Invalid argument.*')
self.client.onecmd('set_torque 3.14')
self.assertRegexpMatches(
self.stdout.Read(), 'Omega limits not set. Use "set_speed_limits".')
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_set_speed_limits(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets_dyno SBO')
self.client.onecmd('set_speed_limits -3.14 3.14')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Omega limits set.*')
self.assert_eventually_true(
lambda: abs(motor.speed_lower + 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_upper - 3.14) / 3.14 < _EPS32)
def test_do_set_speed_limits_fail(self):
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('set_targets_dyno SBO')
self.client.onecmd('set_speed_limits abc 20')
self.assertRegexpMatches(
self.stdout.Read(), '(?s).*Invalid argument.*')
self.client.onecmd('set_speed_limits 22 20')
self.assertRegexpMatches(
self.stdout.Read(), '(?s).*Invalid(?s).*i.e. min value.*')
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_set_omega(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('set_omega 3.14')
self.assertRegexpMatches(self.stdout.Read(), 'Omega desired: 3.14')
self.assert_eventually_true(
lambda: abs(motor.speed_lower - motor.speed_upper) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_lower - 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_upper - 3.14) / 3.14 < _EPS32)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_ramp_omega(self):
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('ramp_omega 3.14 0.0')
self.client.onecmd('arm')
self.client.onecmd('run 5s')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Ramping.*')
self.assert_eventually_true(
lambda: abs(motor.speed_lower - 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_upper - 3.14) / 3.14 < _EPS32)
self.client.onecmd('ramp_omega 6.28 0.5')
self.assertRegexpMatches(self.stdout.Read(), '(?s).*Ramping.*')
self.assert_eventually_true(
lambda: abs(motor.speed_lower - 6.28) / 6.28 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_upper - 6.28) / 6.28 < _EPS32)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_set_command_function_python_succeed(self):
with tempfile.NamedTemporaryFile(suffix='.py') as python_file:
data_length = 3*len(motor_client.MOTORS)
python_file.write(textwrap.dedent("""
t_step = 0.1
t_end = 1.0
def Cmd(t):
command = [0.0] * %d
command[0] = 3.14
command[8] = 3.14
command[16] = 3.14
return command""" % data_length))
python_file.flush()
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('set_command_function ' + python_file.name)
self.assertRegexpMatches(self.stdout.Read(),
r'(?s).*Using %s to generate command '
r'profile.*' % python_file.name)
self.client.onecmd('arm')
self.client.onecmd('run 10s')
self.assert_eventually_true(
lambda: abs(motor.torque - 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_lower - 3.14) / 3.14 < _EPS32)
self.assert_eventually_true(
lambda: abs(motor.speed_upper - 3.14) / 3.14 < _EPS32)
def test_do_set_command_function_python_fail(self):
with tempfile.NamedTemporaryFile(suffix='.py') as python_file:
python_file.write('this will raise a syntax error')
python_file.flush()
with self.stdout:
self.client.onecmd('set_targets SBO')
self.client.onecmd('set_command_function ' + python_file.name)
self.assertRegexpMatches(self.stdout.Read(),
'(?s).*Generation of lookup table from %s '
'failed.*' % python_file.name)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_set_command_function_text_succeed(self):
with tempfile.NamedTemporaryFile(suffix='.txt') as text_file:
text_file.write(textwrap.dedent("""
0.0 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1
100 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1 3.14 1 1 1 1 1 1 1"""[1:]))
text_file.flush()
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_targets SBO')
self.client.onecmd('set_command_function ' + text_file.name)
self.assertRegexpMatches(self.stdout.Read(),
r'(?s).*Using interpolated values from %s '
r'for command profile.*' % text_file.name)
self.client.onecmd('arm')
self.client.onecmd('run 10s')
self.assert_eventually_true(
lambda: abs(motor.torque - 3.14) / 3.14 < _EPS32)
def test_do_set_command_function_text_fail(self):
with tempfile.NamedTemporaryFile(suffix='.txt') as text_file:
text_file.write('numpy.load will raise ValueError')
text_file.flush()
with self.stdout:
self.client.onecmd('set_targets SBO')
self.client.onecmd('set_command_function ' + text_file.name)
self.assertRegexpMatches(self.stdout.Read(),
'(?s).*Invalid input text file: %s.*' %
text_file.name)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_set_command_function_limit_fail(self):
with tempfile.NamedTemporaryFile(suffix='.txt') as text_file:
text_file.write(textwrap.dedent("""
0.0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
100 2000 1 1 1 1 1 1 1 2000 1 1 1 1 1 1 1 2000 1 1 1 1 1 1 1"""[1:]))
text_file.flush()
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('set_command_function ' + text_file.name)
self.assertRegexpMatches(self.stdout.Read(),
r'(?s).*Extreme(?s).*outside of '
r'limits \[%f, %f\] detected.*'
% (motor_client.TORQUE_MIN_LIMIT,
motor_client.TORQUE_MAX_LIMIT))
self.client.onecmd('arm')
self.client.onecmd('run 1s')
self.assert_eventually_true(
lambda: abs(motor.torque) < _EPS32)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_print_new_errors_asynchronously(self):
regex = re.compile('(?s).*SBO: kMotorErrorOverSpeed'
' | kMotorErrorOverVoltage.*')
with FakeMotor('SBO') as motor:
motor.SetError(flags.kMotorErrorOverVoltage
| flags.kMotorErrorOverSpeed)
with self.stdout:
self.client.onecmd('set_targets SBO')
self.assert_eventually_true(lambda: regex.match(self.stdout.Read()))
other_motors = [m for m in motor_client.MOTORS if m != 'SBO']
# Make sure we only print the motor with an error.
self.assertFalse(re.match('(?s).*(%s).*' % '|'.join(other_motors),
self.stdout.Read()))
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_print_new_warnings_asynchronously(self):
regex = re.compile('(?s).*SBO: kMotorWarningOverTempBoard'
' | kMotorWarningOverTempStatorCore.*')
with FakeMotor('SBO') as motor:
motor.SetWarning(flags.kMotorWarningOverTempBoard
| flags.kMotorWarningOverTempStatorCore)
with self.stdout:
self.client.onecmd('set_targets SBO')
self.assert_eventually_true(lambda: regex.match(self.stdout.Read()))
other_motors = [m for m in motor_client.MOTORS if m != 'SBO']
# Make sure we only print the motor with an error.
self.assertFalse(re.match('(?s).*(%s).*' % '|'.join(other_motors),
self.stdout.Read()))
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_get_errors(self):
regex = re.compile('(?s).*SBO: kMotorErrorOverSpeed'
' | kMotorErrorOverVoltage.*')
with FakeMotor('SBO') as motor:
motor.SetError(flags.kMotorErrorOverVoltage
| flags.kMotorErrorOverSpeed)
with self.stdout:
self.client.onecmd('set_targets SBO')
self.assert_eventually_true(lambda: regex.match(self.stdout.Read()))
with self.stdout: # Reset stdout contents.
self.client.onecmd('get_errors')
self.assertRegexpMatches(self.stdout.Read(), regex)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_get_warnings(self):
regex = re.compile('(?s).*SBO: kMotorWarningOverTempBoard'
' | kMotorWarningOverTempStatorCore.*')
with FakeMotor('SBO') as motor:
motor.SetWarning(flags.kMotorWarningOverTempBoard
| flags.kMotorWarningOverTempStatorCore)
with self.stdout:
self.client.onecmd('set_targets SBO')
self.assert_eventually_true(lambda: regex.match(self.stdout.Read()))
with self.stdout: # Reset stdout contents.
self.client.onecmd('get_errors')
self.assertRegexpMatches(self.stdout.Read(), regex)
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_do_clear_errors(self):
regex = re.compile('(?s).*SBO: kMotorErrorOverVoltage.*')
with self.stdout, FakeMotor('SBO') as motor:
motor.SetError(flags.kMotorErrorOverVoltage)
self.client.onecmd('set_targets SBO')
self.assert_eventually_true(lambda: regex.match(self.stdout.Read()))
self.client.onecmd('clear_errors')
self.assert_eventually_true(
lambda: motor.GetError() == flags.kMotorErrorNone)
def test_do_source_fail(self):
with tempfile.NamedTemporaryFile() as source_file:
source_file.write(textwrap.dedent("""
set_targets SBO
arm
run 100 s"""[1:]))
source_file.flush()
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('source ' + source_file.name)
regex = re.compile(
'(?s).*Only "set_param"-like commands.*')
self.assertRegexpMatches(self.stdout.Read(), regex)
def test_do_source_succeed(self):
with tempfile.NamedTemporaryFile() as source_file:
source_file.write(textwrap.dedent("""
set_param SBO i_kp 3.14
set_param SBO cos_offset 0.2
# This is a comment.
set_param SBO iq_lower_limit -1e-3
set_param SBO iq_upper_limit 245""")[1:])
source_file.flush()
with self.stdout, FakeMotor('SBO') as motor:
self.client.onecmd('source ' + source_file.name)
self.assertAlmostEqual(motor.GetParam('i_kp'), 3.14, places=6)
self.assertAlmostEqual(motor.GetParam('cos_offset'), 0.2, places=6)
self.assertAlmostEqual(motor.GetParam('iq_lower_limit'),
-1e-3, places=6)
self.assertAlmostEqual(motor.GetParam('iq_upper_limit'),
245, places=6)
def test_do_source_track_errors(self):
with tempfile.NamedTemporaryFile() as source_file:
source_file.write(textwrap.dedent("""
set_param SBO i_kp 3.14
set_param SBO foo 0.2
set_param SBO omega_kp 0.1
set_param SBO bar 0.5""")[1:])
source_file.flush()
with self.stdout, FakeMotor('SBO'):
self.client.onecmd('source ' + source_file.name)
self.assertRegexpMatches(
self.stdout.Read(),
'(?s).*Errors encountered.*Line 2.*foo.*Line 4.*bar.*')
@unittest.skipIf(socket.gethostname().startswith('jenkins-'),
'This test is flaky when run on GCE.')
def test_stop_running_on_error(self):
regex = re.compile('(?s).*SBO: kMotorErrorOverVoltage.*')
with FakeMotor('SBO') as motor:
with self.stdout:
self.client.onecmd('set_targets SBO')
self.client.onecmd('arm')
self.client.onecmd('run 1000 s')
motor.SetError(flags.kMotorErrorOverVoltage)
self.assert_eventually_true(lambda: regex.match(self.stdout.Read()))
self.assert_eventually_true(lambda: not motor.running)
if __name__ == '__main__':
unittest.main()
|
tox_helpers/test_missing_dependencies.py | sivchand/smart_open | 2,047 | 12737322 | <reponame>sivchand/smart_open<gh_stars>1000+
import os
import subprocess
os.environ['SMART_OPEN_TEST_MISSING_DEPS'] = '1'
command = [
'pytest',
'smart_open/tests/test_package.py',
'-v',
'--cov', 'smart_open',
'--cov-report', 'term-missing',
]
subprocess.check_call(command)
|
alg/dklite/model.py | loramf/mlforhealthlabpub | 171 | 12737362 | <gh_stars>100-1000
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
import logging
logging.getLogger('tensorflow').disabled = True
import numpy as np
class DKLITE(object):
def __init__(self, input_dim, output_dim, num_hidden=50, num_layers =2, learning_rate=0.001,
reg_var=1.0,reg_rec=1.0):
self.num_layers = num_layers
self.output_dim = output_dim
self.num_hidden = num_hidden
self.input_dim = input_dim
self.size_z = num_hidden
self.ml_primal = {}
self.ker_inv = {}
self.params = {}
self.mean = {}
self.num = {}
''' Initialize parameter weight '''
self.params = self.initialize_weights()
self.mu = tf.reduce_mean(self.T)
self.Z_train = self.Encoder(self.X)
self.Z_test = self.Encoder(self.X_u)
self.loss_1 = tf.reduce_mean(tf.reduce_sum(tf.square(self.X - self.Decoder(self.Z_train)),axis=1))
Z_0 = tf.gather(self.Z_train, tf.where(self.T < 0.5)[:, 0])
Y_0 = tf.gather(self.Y, tf.where(self.T < 0.5)[:, 0])
Z_1 = tf.gather(self.Z_train, tf.where(self.T > 0.5)[:, 0])
Y_1 = tf.gather(self.Y, tf.where(self.T > 0.5)[:, 0])
mean_0 = tf.reduce_mean(Y_0)
mean_1 = tf.reduce_mean(Y_1)
Y_0 = (Y_0-mean_0)
Y_1 = (Y_1-mean_1)
self.GP_NN(Y_0, Z_0, 0)
self.GP_NN(Y_1, Z_1,1)
self.var_0 = tf.reduce_mean(tf.diag_part(tf.matmul(Z_1,tf.matmul(self.ker_inv['0'], tf.transpose(Z_1)))))
self.var_1 = tf.reduce_mean(tf.diag_part(tf.matmul(Z_0,tf.matmul(self.ker_inv['1'], tf.transpose(Z_0)))))
self.ele_var_0_tr = tf.diag_part(tf.matmul(self.Z_train,tf.matmul(self.ker_inv['0'], tf.transpose(self.Z_train))))
self.ele_var_1_tr = tf.diag_part(tf.matmul(self.Z_train,tf.matmul(self.ker_inv['1'], tf.transpose(self.Z_train))))
self.ele_var_0_te = tf.diag_part(tf.matmul(self.Z_test,tf.matmul(self.ker_inv['0'], tf.transpose(self.Z_test))))
self.ele_var_1_te = tf.diag_part(tf.matmul(self.Z_test,tf.matmul(self.ker_inv['1'], tf.transpose(self.Z_test))))
pred_tr_0 = tf.matmul(self.Z_train, self.mean['0']) + mean_0
pred_tr_1 = tf.matmul(self.Z_train, self.mean['1']) + mean_1
pred_te_0 = tf.matmul(self.Z_test, self.mean['0']) + mean_0
pred_te_1 = tf.matmul(self.Z_test, self.mean['1']) + mean_1
self.Y_train = tf.concat([pred_tr_0,pred_tr_1],axis=1)
self.Y_test = tf.concat([pred_te_0,pred_te_1],axis=1)
self.loss_0 = self.ml_primal['0']+ self.ml_primal['1']
self.prediction_loss = self.ml_primal['0']+ self.ml_primal['1'] + reg_var *(self.var_0 + self.var_1)+ reg_rec * self.loss_1
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.prediction_loss)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def element_var(self, X, Y, T, X_u):
var_0_tr, var_1_tr,var_0_te, var_1_te = self.sess.run([self.ele_var_0_tr,self.ele_var_1_tr,self.ele_var_0_te,self.ele_var_1_te],
feed_dict={self.X: X, self.X_u: X_u, self.Y: Y, self.T: T})
return var_0_tr,var_1_tr,var_0_te,var_1_te
def embed(self, X, Y, T):
Z= self.sess.run(self.Z_train, feed_dict={self.X: X, self.Y: Y, self.T: T})
return Z
def fit(self, X, Y, T, num_iteration):
loss_list = []
for i in range(num_iteration):
loss, _ = self.sess.run([self.prediction_loss,self.optimizer], feed_dict={self.X: X, self.Y: Y, self.T: T})
loss_list.append(np.sum(loss))
diff_list = np.abs(np.diff(loss_list))
if i>50 and np.abs(np.mean(diff_list[-10:]) - np.mean(diff_list[-40:-10]) )< np.std(diff_list[-40:-10]):
break
def pred(self, X, Y, T, X_u):
Y_hat_train, Y_hat_test = self.sess.run([self.Y_train, self.Y_test], feed_dict={self.X: X, self.X_u: X_u, self.Y: Y, self.T: T})
return Y_hat_train, Y_hat_test
def destroy_graph(self):
tf.reset_default_graph()
def Encoder(self, X):
X_h =tf.nn.elu(tf.matmul(X, self.params['e_w_in']) + self.params['e_b_in'])
for layer_i in range( self.num_layers):
X_h = tf.nn.elu(tf.matmul(X_h, self.params['e_w_' + str(layer_i)])+self.params['e_b_' + str(layer_i)])
Z = tf.nn.elu(tf.matmul(X_h, self.params['e_w_' + str(self.num_layers)])+ self.params['e_b_' + str(self.num_layers)])
return Z
def Decoder(self,Z):
Z_pred = tf.nn.elu(tf.matmul(Z, self.params['d_w_in']) + self.params['d_b_in'])
for layer_i in range(self.num_layers):
Z_pred = tf.nn.elu(tf.matmul(Z_pred, self.params['d_w_' + str(layer_i)])+ self.params['d_b_' + str(layer_i)])
X_p = tf.matmul(Z_pred, self.params['d_w_' + str(self.num_layers)]+ self.params['d_b_' + str(self.num_layers)])
return X_p
def GP_NN(self, Y_f, Z_f,index):
beta = tf.ones([1,1],tf.float32)
lam = 1000*tf.ones([1,1],tf.float32)
r = beta / lam
self.DD = tf.shape(Z_f)[1]
phi_phi = tf.matmul(tf.transpose(Z_f), Z_f)
Ker = r * phi_phi + tf.eye(tf.shape(Z_f)[1], dtype=tf.float32)
L_matrix = tf.cholesky(Ker)
L_inv_reduce = tf.linalg.triangular_solve(L_matrix, rhs=tf.eye(self.DD, dtype=tf.float32))
L_y = tf.matmul(L_inv_reduce, tf.matmul(tf.transpose(Z_f), Y_f))
self.ker_inv[str(index)] = tf.matmul(tf.transpose(L_inv_reduce), L_inv_reduce) / lam
self.mean[str(index)] = r * tf.matmul(tf.transpose(L_inv_reduce), L_y)
term1 = - tf.reduce_mean(tf.square(L_y))
#term2 = tf.log(tf.linalg.diag_part(L_matrix)) / ((1-index)*tf.reduce_sum(1 - self.T) + (index)* tf.reduce_sum(self.T))
self.ml_primal[str(index)] = term1 #+ term2
def initialize_weights(self):
self.X = tf.placeholder(tf.float32, [None, self.input_dim])
self.X_u = tf.placeholder(tf.float32, [None, self.input_dim])
self.Y = tf.placeholder(tf.float32, [None, 1])
self.T = tf.placeholder(tf.float32, [None, 1])
all_weights = {}
''' Input layer of the encoder '''
name_wi = 'e_w_in'
all_weights[name_wi] = tf.get_variable(name=name_wi, shape=[self.input_dim, self.num_hidden], trainable=True)
name_bi = 'e_b_in'
all_weights[name_bi] = tf.get_variable(name=name_bi, shape=[self.num_hidden], trainable=True)
''' Hidden layer of the encoder '''
for layer_i in range(self.num_layers):
name_wi = 'e_w_' + str(layer_i)
all_weights[name_wi ] = tf.get_variable(name =name_wi, shape=[self.num_hidden,self.num_hidden], trainable=True)
name_bi = 'e_b_' + str(layer_i)
all_weights[name_bi] = tf.get_variable(name =name_bi, shape = [self.num_hidden], trainable=True)
''' Final layer of the encoder '''
name_wi = 'e_w_' + str(self.num_layers)
all_weights[name_wi] = tf.get_variable(name=name_wi, shape=[self.num_hidden, self.size_z], trainable=True)
name_bi = 'e_b_' + str(self.num_layers)
all_weights[name_bi] = tf.get_variable(name=name_bi, shape=[self.size_z], trainable=True)
name_wi = 'e_w_out_0'
all_weights[name_wi] = tf.get_variable(name=name_wi, shape=[self.size_z, self.output_dim], trainable=True)
name_bi = 'e_b_out_0'
all_weights[name_bi] = tf.get_variable(name=name_bi, shape=[self.output_dim], trainable=True)
name_wi = 'e_w_out_1'
all_weights[name_wi] = tf.get_variable(name=name_wi, shape=[self.size_z, self.output_dim], trainable=True)
name_bi = 'e_b_out_1'
all_weights[name_bi] = tf.get_variable(name=name_bi, shape=[self.output_dim], trainable=True)
''' Input layer of the decoder '''
name_wi = 'd_w_in'
all_weights[name_wi] = tf.get_variable(name=name_wi, shape=[self.size_z, self.num_hidden],trainable=True)
name_bi = 'd_b_in'
all_weights[name_bi] = tf.get_variable(name=name_bi, shape=[self.num_hidden],trainable=True)
''' Hidden layer of the decoder '''
for layer_i in range(self.num_layers):
name_wi = 'd_w_' + str(layer_i)
all_weights[name_wi ] = tf.get_variable(name =name_wi, shape=[self.num_hidden,self.num_hidden],trainable=True)
name_bi = 'd_b_' + str(layer_i)
all_weights[name_bi] = tf.get_variable(name =name_bi, shape = [self.num_hidden],trainable=True)
''' Final layer of the decoder '''
name_wi = 'd_w_' + str(self.num_layers)
all_weights[name_wi] = tf.get_variable(name=name_wi, shape=[self.num_hidden, self.input_dim],trainable=True)
name_bi = 'd_b_' + str(self.num_layers)
all_weights[name_bi] = tf.get_variable(name=name_bi, shape=[(self.input_dim)],trainable=True)
return all_weights
|
utils/stop_test_exception_util.py | go000o/POMautomateCodeFramework | 207 | 12737364 | '''
This utility is for Custom Exceptions.
a) Stop_Test_Exception
You can raise a generic exceptions using just a string.
This is particularly useful when you want to end a test midway based on some condition.
'''
class Stop_Test_Exception(Exception):
def __init__(self,message):
self.message=message
def __str__(self):
return self.message |
PhysicsTools/PatAlgos/python/recoLayer0/jetCorrections_cff.py | ckamtsikis/cmssw | 852 | 12737390 | import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi import *
from JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff import *
## for scheduled mode
patJetCorrectionsTask = cms.Task(patJetCorrFactors)
patJetCorrections = cms.Sequence(patJetCorrectionsTask)
|
SimGeneral/TrackingAnalysis/python/trackingParticles_cfi.py | ckamtsikis/cmssw | 852 | 12737402 | import FWCore.ParameterSet.Config as cms
mergedtruth = cms.EDProducer("TrackingTruthProducer",
mixLabel = cms.string('mix'),
simHitLabel = cms.string('g4SimHits'),
volumeRadius = cms.double(1200.0),
vertexDistanceCut = cms.double(0.003),
volumeZ = cms.double(3000.0),
mergedBremsstrahlung = cms.bool(True),
removeDeadModules = cms.bool(False),
HepMCDataLabels = cms.vstring('generatorSmeared',
'generator',
'PythiaSource',
'source'
),
useMultipleHepMCLabels = cms.bool(False),
simHitCollections = cms.PSet(
pixel = cms.vstring (
'g4SimHitsTrackerHitsPixelBarrelLowTof',
'g4SimHitsTrackerHitsPixelBarrelHighTof',
'g4SimHitsTrackerHitsPixelEndcapLowTof',
'g4SimHitsTrackerHitsPixelEndcapHighTof'
),
tracker = cms.vstring (
'g4SimHitsTrackerHitsTIBLowTof',
'g4SimHitsTrackerHitsTIBHighTof',
'g4SimHitsTrackerHitsTIDLowTof',
'g4SimHitsTrackerHitsTIDHighTof',
'g4SimHitsTrackerHitsTOBLowTof',
'g4SimHitsTrackerHitsTOBHighTof',
'g4SimHitsTrackerHitsTECLowTof',
'g4SimHitsTrackerHitsTECHighTof'
),
muon = cms.vstring (
'g4SimHitsMuonDTHits',
'g4SimHitsMuonCSCHits',
'g4SimHitsMuonRPCHits'
)
)
)
trackingParticles = cms.Sequence(mergedtruth)
|
examples/showcase/src/demos_widgets/namedFrame.py | takipsizad/pyjs | 739 | 12737434 | """
The ``ui.NamedFrame`` class is a variation of the ``ui.Frame`` which lets you
assign a name to the frame. Naming a frame allows you to refer to that frame
by name in Javascript code, and as the target for a hyperlink.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.NamedFrame import NamedFrame
from pyjamas.ui.HTML import HTML
class NamedFrameDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
vPanel = VerticalPanel(Spacing=5)
frame = NamedFrame("myFrame",
Width="100%",
Height="200px")
vPanel.add(frame)
vPanel.add(HTML('<a href="http://google.com" target="myFrame">Google</a>'))
vPanel.add(HTML('<a href="http://yahoo.com" target="myFrame">Yahoo</a>'))
vPanel.add(HTML('<a href="http://pyjs.org" target="myFrame">Pyjamas</a>'))
self.add(vPanel)
|
src/graph/config_indep_binary.py | AbhinavGopal/ts_tutorial | 290 | 12737451 | <reponame>AbhinavGopal/ts_tutorial
"""Specify the jobs to run via config file.
Binomial bridge bandit experiment.
Binomial bridge with only binary reward at the end --> no conjugate update.
See Figure 9 https://arxiv.org/pdf/1707.02038.pdf
"""
import collections
import functools
from base.config_lib import Config
from base.experiment import ExperimentNoAction
from graph.agent_indep_binary import BootstrapIndependentBBWithBinaryReward
from graph.agent_indep_binary import LaplaceIndependentBBWithBinaryReward
from graph.agent_indep_binary import StochasticLangevinMCMCIndependentBBWithBinaryReward
from graph.agent_indep_binary import EpsilonGreedyIndependentBBWithBinaryReward
from graph.env_graph_bandit import IndependentBinomialBridgeWithBinaryReward
def get_config():
"""Generates the config for the experiment."""
name = 'graph_indep_binary_new'
n_stages = 20
shape = 2
scale = 0.5
tol = 0.001
alpha = 0.2
beta = 0.5
langevin_batch_size = 100
langevin_step_count = 200
langevin_step_size = 0.0005
epsilon = 0
agents = collections.OrderedDict(
[('Langevin TS',
functools.partial(EpsilonGreedyIndependentBBWithBinaryReward,
n_stages, epsilon, shape, scale, tol, alpha, beta))])
# agents = collections.OrderedDict(
# [('Langevin TS',
# functools.partial(StochasticLangevinMCMCIndependentBBWithBinaryReward,
# n_stages, shape, scale, tol, alpha, beta, langevin_batch_size,
# langevin_step_count, langevin_step_size)),
# ('bootstrap TS',
# functools.partial(BootstrapIndependentBBWithBinaryReward,
# n_stages, shape, scale, tol, alpha, beta)),
# ('Laplace TS',
# functools.partial(LaplaceIndependentBBWithBinaryReward,
# n_stages, shape, scale, tol, alpha, beta))]
# )
environments = collections.OrderedDict(
[('env',
functools.partial(IndependentBinomialBridgeWithBinaryReward,
n_stages, shape, scale))]
)
experiments = collections.OrderedDict(
[(name, ExperimentNoAction)]
)
n_steps = 500
n_seeds = 1000
config = Config(name, agents, environments, experiments, n_steps, n_seeds)
return config
|
nn/modules.py | yizt/numpy_neural_network | 428 | 12737511 | <filename>nn/modules.py
# -*- coding: utf-8 -*-
"""
@File : modules.py
@Time : 2020/4/18 上午8:28
@Author : yizuotian
@Description :
"""
from typing import List
from activations import *
from layers import fc_forward, fc_backward, global_avg_pooling_forward, flatten_forward, flatten_backward
from layers_v2 import conv_forward, conv_backward, max_pooling_forward, max_pooling_backward, \
global_avg_pooling_backward
from losses import *
from optimizers import *
# pyximport.install()
# from clayers import *
class BaseModule(object):
def __init__(self, name=''):
"""
:param name: 层名
"""
self.name = name
self.weights = dict() # 权重参数字典
self.gradients = dict() # 梯度字典
self.in_features = None # 输入的feature map
def forward(self, x):
pass
def backward(self, in_gradient):
pass
def update_gradient(self, lr):
pass
def load_weights(self, weights):
"""
加载权重
:param weights:
:return:
"""
for key in self.weights.keys():
self.weights[key] = weights[key]
class Model(BaseModule):
"""
网络模型
"""
def __init__(self, layers: List[BaseModule], **kwargs):
super(Model, self).__init__(**kwargs)
self.layers = layers
# 收集所有权重和梯度
for l in self.layers:
self.weights.update(l.weights)
self.gradients.update(l.gradients)
def forward(self, x):
for l in self.layers:
x = l.forward(x)
# print('forward layer:{},feature:{}'.format(l.name, np.max(x)))
# 网络结果返回
return x
def backward(self, in_gradient):
# 反向传播
for l in self.layers[::-1]:
in_gradient = l.backward(in_gradient)
# print('backward layer:{},gradient:{}'.format(l.name, np.max(in_gradient)))
def update_gradient(self, lr):
for l in self.layers:
l.update_gradient(lr)
def load_weights(self, weights):
"""
加载模型权重
:param weights:
:return:
"""
# 逐层加载权重
for l in self.layers:
l.load_weights(weights)
class Linear(BaseModule):
"""
全连接层
"""
def __init__(self, in_units, out_units, **kwargs):
"""
:param in_units: 输入神经元数
:param out_units: 输出神经元数
"""
super(Linear, self).__init__(**kwargs)
# 权重参数
weight = np.random.randn(in_units, out_units) * np.sqrt(2 / in_units)
bias = np.zeros(out_units)
# 权重对应的梯度
g_weight = np.zeros_like(weight)
g_bias = np.zeros_like(bias)
# 权重和梯度的字典
self.weights = {"{}_weight".format(self.name): weight,
"{}_bias".format(self.name): bias}
self.gradients = {"{}_weight".format(self.name): g_weight,
"{}_bias".format(self.name): g_bias}
@property
def weight(self):
return self.weights["{}_weight".format(self.name)]
@property
def bias(self):
return self.weights["{}_bias".format(self.name)]
def set_gradient(self, name, gradient):
"""
更新梯度
:param name: weight 或 bias 中一个
:param gradient:
:return:
"""
self.gradients["{}_{}".format(self.name, name)] = gradient
def forward(self, x):
"""
:param x: [B,in_units]
:return output: [B,out_units]
"""
self.in_features = x
output = fc_forward(x, self.weight, self.bias)
return output
def backward(self, in_gradient):
"""
梯度反向传播
:param in_gradient: 后一层传递过来的梯度,[B,out_units]
:return out_gradient: 传递给前一层的梯度,[B,in_units]
"""
g_weight, g_bias, out_gradient = fc_backward(in_gradient,
self.weight,
self.in_features)
self.set_gradient('weight', g_weight)
self.set_gradient('bias', g_bias)
return out_gradient
def update_gradient(self, lr):
"""
更新梯度
:param lr:
:return:
"""
self.weight -= self.g_weight * lr
self.bias -= self.g_bias * lr
class Conv2D(BaseModule):
"""
2D卷积层
"""
def __init__(self, in_filters, out_filters, kernel=(3, 3), padding=(1, 1), stride=(1, 1), **kwargs):
super(Conv2D, self).__init__(**kwargs)
self.in_filters = in_filters
self.out_filters = out_filters
self.kernel = kernel
self.padding = padding
self.stride = stride
# 权重参数
fan_in = in_filters * kernel[0] * kernel[1] # 输入参数量
fan_out = out_filters * kernel[0] * kernel[1] # 输入参数量
weight = np.random.randn(in_filters,
out_filters,
*kernel) * np.sqrt(2 / (fan_in + fan_out))
bias = np.zeros(out_filters)
# 梯度
g_weight = np.zeros_like(weight)
g_bias = np.zeros_like(bias)
# 权重和梯度的字典
self.weights = {"{}_weight".format(self.name): weight,
"{}_bias".format(self.name): bias}
self.gradients = {"{}_weight".format(self.name): g_weight,
"{}_bias".format(self.name): g_bias}
@property
def weight(self):
return self.weights["{}_weight".format(self.name)]
@property
def bias(self):
return self.weights["{}_bias".format(self.name)]
def set_gradient(self, name, gradient):
"""
更新梯度
:param name: weight 或 bias 中一个
:param gradient:
:return:
"""
self.gradients["{}_{}".format(self.name, name)] = gradient
def forward(self, x):
"""
:param x: [B,in_filters,H,W]
:return output: [B,out_filters,H,W]
"""
self.in_features = x
output = conv_forward(x, self.weight, self.bias, self.padding, self.stride)
return output
def backward(self, in_gradient):
"""
:param in_gradient: 后一层传递过来的梯度,[B,out_filters,H,W]
:return out_gradient: 传递给前一层的梯度,[B,in_filters,H,W]
"""
g_weight, g_bias, out_gradient = conv_backward(in_gradient,
self.weight,
self.in_features,
self.padding, self.stride)
self.set_gradient('weight', g_weight)
self.set_gradient('bias', g_bias)
return out_gradient
def update_gradient(self, lr):
self.weight -= self.g_weight * lr
self.bias -= self.g_bias * lr
class ReLU(BaseModule):
def __init__(self, **kwargs):
super(ReLU, self).__init__(**kwargs)
def forward(self, x):
self.in_features = x
return relu_forward(x)
def backward(self, in_gradient):
"""
:param in_gradient: 后一层传递过来的梯度
:return out_gradient: 传递给前一层的梯度
"""
out_gradient = relu_backward(in_gradient, self.in_features)
return out_gradient
class MaxPooling2D(BaseModule):
"""
最大池化层
"""
def __init__(self, kernel=(2, 2), stride=(2, 2), padding=(0, 0), **kwargs):
"""
:param kernel: 池化尺寸
:param stride: 步长
:param padding: padding
:param kwargs:
"""
super(MaxPooling2D, self).__init__(**kwargs)
self.kernel = kernel
self.stride = stride
self.padding = padding
def forward(self, x):
"""
:param x: [B,C,H,W]
:return output : [B,C,H',W']
"""
self.in_features = x
output = max_pooling_forward(x, self.kernel, self.stride, self.padding)
return output
def backward(self, in_gradient):
"""
:param in_gradient: 后一层传递过来的梯度
:return out_gradient: 传递给前一层的梯度
"""
out_gradient = max_pooling_backward(in_gradient,
self.in_features,
self.kernel,
self.stride,
self.padding)
return out_gradient
class GlobalAvgPooling2D(BaseModule):
"""
全局平均池化
"""
def __init__(self, **kwargs):
super(GlobalAvgPooling2D, self).__init__(**kwargs)
def forward(self, x):
"""
:param x: [B,C,H,W]
:return output : [B,C,H',W']
"""
self.in_features = x
output = global_avg_pooling_forward(x)
return output
def backward(self, in_gradient):
"""
:param in_gradient: 后一层传递过来的梯度
:return out_gradient: 传递给前一层的梯度
"""
out_gradient = global_avg_pooling_backward(in_gradient,
self.in_features)
return out_gradient
class Flatten(BaseModule):
"""
打平层
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def forward(self, x):
self.in_features = x
return flatten_forward(x)
def backward(self, in_gradient):
"""
:param in_gradient: 后一层传递过来的梯度
:return out_gradient: 传递给前一层的梯度
"""
out_gradient = flatten_backward(in_gradient, self.in_features)
return out_gradient
def test_linear():
# 实际的权重和偏置
W = np.array([[3, 7, 4],
[5, 2, 6]])
b = np.array([2, 9, 3])
# 产生训练样本
x_data = np.random.randn(500, 2)
y_data = np.dot(x_data, W) + b
def next_sample(batch_size=1):
idx = np.random.randint(500)
return x_data[idx:idx + batch_size], y_data[idx:idx + batch_size]
fc_layer = Linear(2, 3, name='fc1')
# fc_layer.weights['fc1_weight'] *= 1e-2 # 单层权重初始化要小
m = Model([fc_layer])
sgd = SGD(m.weights, lr=1e-3)
i = 0
loss = 1
while loss > 1e-15:
x, y_true = next_sample(4) # 获取当前样本
# 前向传播
y = m.forward(x)
# 反向传播更新梯度
loss, dy = mean_squared_loss(y, y_true)
m.backward(dy)
# 更新梯度
sgd.iterate(m)
# 更新迭代次数
i += 1
if i % 10000 == 0:
print("y_pred:{},y_true:{}".format(y, y_true))
print("\n迭代{}次,当前loss:{}, 当前权重:{},当前偏置{},梯度:{}".format(i, loss,
m.layers[0].weight,
m.layers[0].bias,
m.layers[0].gradients))
# print(m.weights)
print('迭代{}次,当前权重:{} '.format(i, m.layers[0].weights))
if __name__ == '__main__':
test_linear()
|
src/visions/backends/python/types/string.py | bhumikapahariapuresoftware/visions | 142 | 12737515 | from typing import Sequence
from visions.backends.python.series_utils import (
sequence_handle_none,
sequence_not_empty,
)
from visions.types.string import String
@String.contains_op.register
@sequence_not_empty
@sequence_handle_none
def string_contains(sequence: Sequence, state: dict) -> bool:
return all(isinstance(v, str) for v in sequence)
|
f5/bigip/tm/sys/test/functional/test_disk.py | nghia-tran/f5-common-python | 272 | 12737537 | <filename>f5/bigip/tm/sys/test/functional/test_disk.py<gh_stars>100-1000
# Copyright 2018 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip.tm.sys.disk import Logical_Disk
import pytest
from requests import HTTPError
class TestLogicalDisk(object):
def test_load_refresh(self, mgmt_root):
d1 = mgmt_root.tm.sys.disk.logical_disks.logical_disk.load(name='HD1')
assert d1.name == 'HD1'
assert d1.kind == 'tm:sys:disk:logical-disk:logical-diskstate'
assert d1.mode == 'mixed'
d2 = mgmt_root.tm.sys.disk.logical_disks.logical_disk.load(name='HD1')
assert d2.name == d1.name
assert d2.kind == d1.kind
assert d2.mode == d1.mode
d1.refresh()
assert d1.name == d2.name
assert d1.kind == d2.kind
assert d1.mode == d2.mode
def test_load_no_object(self, mgmt_root):
rc = mgmt_root.tm.sys.disk.logical_disks
with pytest.raises(HTTPError) as err:
rc.logical_disk.load(name='not_exists')
assert err.value.response.status_code == 404
def test_logical_disks_collection(self, mgmt_root):
rc = mgmt_root.tm.sys.disk.logical_disks.get_collection()
assert isinstance(rc, list)
assert len(rc)
assert isinstance(rc[0], Logical_Disk)
|
src/mylib/torch/nn/modules/dense.py | murez/mobile-semantic-segmentation | 713 | 12737592 | from torch import nn
from torch.nn.init import xavier_uniform_
from mylib.torch.nn.init import zeros_initializer
class Dense(nn.Linear):
r"""Fully connected linear layer with activation function.
.. math::
y = activation(xW^T + b)
Args:
in_features (int): number of input feature :math:`x`.
out_features (int): number of output features :math:`y`.
bias (bool, optional): if False, the layer will not adapt bias :math:`b`.
activation (callable, optional): if None, no activation function is used.
weight_init (callable, optional): weight initializer from current weight.
bias_init (callable, optional): bias initializer from current bias.
"""
def __init__(
self,
in_features,
out_features,
bias=True,
activation=None,
weight_init=xavier_uniform_,
# weight_init=xavier_normal_,
bias_init=zeros_initializer,
):
self.weight_init = weight_init
self.bias_init = bias_init
self.activation = activation
# initialize linear layer y = xW^T + b
super(Dense, self).__init__(in_features, out_features, bias)
def reset_parameters(self):
"""Reinitialize models weight and bias values."""
self.weight_init(self.weight)
if self.bias is not None:
self.bias_init(self.bias)
def forward(self, inputs):
"""Compute layer output.
Args:
inputs (dict of torch.Tensor): batch of input values.
Returns:
torch.Tensor: layer output.
"""
# compute linear layer y = xW^T + b
y = super(Dense, self).forward(inputs)
# add activation function
if self.activation:
y = self.activation(y)
return y
|
python/ray/serve/examples/doc/quickstart_class.py | mgelbart/ray | 21,382 | 12737596 | <reponame>mgelbart/ray
import requests
import ray
from ray import serve
serve.start()
@serve.deployment
class Counter:
def __init__(self):
self.count = 0
def __call__(self, *args):
self.count += 1
return {"count": self.count}
# Deploy our class.
Counter.deploy()
# Query our endpoint in two different ways: from HTTP and from Python.
assert requests.get("http://127.0.0.1:8000/Counter").json() == {"count": 1}
assert ray.get(Counter.get_handle().remote()) == {"count": 2}
|
tests/unit/test_tty.py | tedivm/dockerpty | 129 | 12737611 | <gh_stars>100-1000
# dockerpty: test_tty.py.
#
# Copyright 2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from expects import expect, equal, be_none, be_true, be_false
import dockerpty.tty as tty
import tests.util as util
import os
import pty
import termios
import tempfile
def israw(fd):
__, __, __, flags, __, __, __ = termios.tcgetattr(fd)
return not flags & termios.ECHO
def test_size_returns_none_for_non_tty():
with tempfile.TemporaryFile() as t:
expect(tty.size(t)).to(be_none)
def test_size_returns_a_tuple_for_a_tty():
fd, __ = pty.openpty()
fd = os.fdopen(fd)
util.set_pty_size(fd, (43, 120))
expect(tty.size(fd)).to(equal((43, 120)))
class TestTerminal(object):
def test_start_when_raw(self):
fd, __ = pty.openpty()
terminal = tty.Terminal(os.fdopen(fd), raw=True)
expect(israw(fd)).to(be_false)
terminal.start()
expect(israw(fd)).to(be_true)
def test_start_when_not_raw(self):
fd, __ = pty.openpty()
terminal = tty.Terminal(os.fdopen(fd), raw=False)
expect(israw(fd)).to(be_false)
terminal.start()
expect(israw(fd)).to(be_false)
def test_stop_when_raw(self):
fd, __ = pty.openpty()
terminal = tty.Terminal(os.fdopen(fd), raw=True)
terminal.start()
terminal.stop()
expect(israw(fd)).to(be_false)
def test_raw_with_block(self):
fd, __ = pty.openpty()
fd = os.fdopen(fd)
with tty.Terminal(fd, raw=True):
expect(israw(fd)).to(be_true)
expect(israw(fd)).to(be_false)
def test_start_does_not_crash_when_fd_is_not_a_tty(self):
with tempfile.TemporaryFile() as f:
terminal = tty.Terminal(f, raw=True)
terminal.start()
terminal.stop()
def test_repr(self):
fd = 'some_fd'
terminal = tty.Terminal(fd, raw=True)
expect(repr(terminal)).to(equal("Terminal(some_fd, raw=True)"))
|
descarteslabs/workflows/types/geospatial/tests/test_geocontext.py | carderne/descarteslabs-python | 167 | 12737614 | import pytest
import shapely.geometry
from descarteslabs import scenes
from .. import GeoContext
from descarteslabs.workflows.types.containers import Tuple
from descarteslabs.workflows.types.primitives import Int, Float
def test_from_scenes_wrong_type():
with pytest.raises(
TypeError, match=r"expected a `descarteslabs\.scenes\.GeoContext`"
):
GeoContext.from_scenes("foo")
def test_from_scenes_aoi():
aoi = scenes.AOI(
geometry=shapely.geometry.box(-60.0, 30.0, -50.0, 40.0),
resolution=1,
crs="EPSG:4326",
align_pixels=False,
)
ctx = GeoContext.from_scenes(aoi)
assert ctx.graft[ctx.graft["returns"]][0] == "wf.GeoContext.create"
promoted = GeoContext._promote(aoi)
assert promoted.graft[promoted.graft["returns"]][0] == "wf.GeoContext.create"
def test_from_scenes_tile():
tile_dict = {
"geometry": {
"coordinates": [
[
[-100.10534464886125, 59.94175277369993],
[-99.91065247366876, 59.943240309707676],
[-99.91334037259435, 60.040922421458546],
[-100.10860694364838, 60.039429047992876],
[-100.10534464886125, 59.94175277369993],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32614",
"geotrans": [438240.0, 20.0, 0, 6656320.0, 0, -20.0],
"key": "512:16:20.0:14:-6:649",
"outputBounds": [438240.0, 6645440.0, 449120.0, 6656320.0],
"pad": 16,
"proj4": "+proj=utm +zone=14 +datum=WGS84 +units=m +no_defs ",
"resolution": 20.0,
"ti": -6,
"tilesize": 512,
"tj": 649,
"zone": 14,
},
"type": "Feature",
}
tile = scenes.DLTile(tile_dict)
ctx = GeoContext.from_scenes(tile)
assert ctx.graft[ctx.graft["returns"]][0] == "wf.GeoContext.from_dltile_key"
promoted = GeoContext._promote(tile)
assert (
promoted.graft[promoted.graft["returns"]][0] == "wf.GeoContext.from_dltile_key"
)
def test_from_scenes_xyztile():
tile = scenes.XYZTile(3, 5, 4)
ctx = GeoContext.from_scenes(tile)
assert ctx.graft[ctx.graft["returns"]][0] == "wf.GeoContext.from_xyz_tile"
promoted = GeoContext._promote(tile)
assert promoted.graft[promoted.graft["returns"]][0] == "wf.GeoContext.from_xyz_tile"
def test_promote_dltile_from_key():
ctx = GeoContext.from_dltile_key("500:0:10.0:13:-17:790")
assert GeoContext._promote(ctx) is ctx
def test_promote_xyztile_from_xyz():
ctx = GeoContext.from_xyz_tile(3, 5, 4)
assert GeoContext._promote(ctx) is ctx
@pytest.mark.parametrize("attr", ["arr_shape", "gdal_geotrans", "projected_bounds"])
def test_readonly_attributes(attr):
type_params = GeoContext._type_params[0]
ctx = GeoContext.from_xyz_tile(3, 5, 4)
assert isinstance(getattr(ctx, attr), type_params[attr])
def test_index_to_coords():
aoi = scenes.AOI(
geometry=shapely.geometry.box(-60.0, 30.0, -50.0, 40.0),
resolution=1,
crs="EPSG:4326",
align_pixels=False,
)
ctx = GeoContext.from_scenes(aoi)
coords = ctx.index_to_coords(0, 0)
assert isinstance(coords, Tuple[Float, Float])
def test_coords_to_index():
aoi = scenes.AOI(
geometry=shapely.geometry.box(-60.0, 30.0, -50.0, 40.0),
resolution=1,
crs="EPSG:4326",
align_pixels=False,
)
ctx = GeoContext.from_scenes(aoi)
ctx = GeoContext._promote(ctx)
index = ctx.coords_to_index(0.0, 1.0)
assert isinstance(index, Tuple[Int, Int])
|
nmrglue/util/misc.py | genematx/nmrglue | 150 | 12737639 | <filename>nmrglue/util/misc.py
"""
Misc. functions
"""
from __future__ import print_function
import numpy as np
# default tolerences
RTOL = 1.001e-01
ATOL = 1.001e-01
DTOL = 5.001e-01
def pair_similar(dic1, data1, dic2, data2, verb=False, atol=ATOL, rtol=RTOL,
dtol=DTOL, ignore_pipe_display=False):
"""
Check a dic, data pair against a second dic, data pair for differences.
Parameters
----------
dic1 : dict
First dictionary of NMR parameters.
data1 : ndarray
First array of NMR data
dic2 : dict
Second dictionary of NMR parameters
data2 : ndarray
Second array of NMR data
verb : bool, optional
Set True for verbose reporting.
atol : float, optional
The absolute tolerent parameter to pass to numpy.allclose.
rtol : float, optional
The relative tolenance parameter to pass to numpy.allclose.
Returns
-------
r1 : bool
True is data1 and data2 are similar, False if they differ.
r2 : bool
True is dic1 and dic2 are similar, False if they differ.
"""
r1 = isdatasimilar(data1, data2, verb, atol, rtol)
r2 = isdicsimilar(dict(dic1), dict(dic2), verb, dtol,
ignore_pipe_display=ignore_pipe_display)
return r1, r2
def isdatasimilar(data1, data2, verb=False, atol=ATOL, rtol=RTOL):
"""
Check that two sets of NMR data are equal within a tolerance.
Parameters
----------
data1 : ndarray
First array of NMR data
data2 : ndarray
Second array of NMR data
verb : bool, optional
Set True for verbose reporting.
atol : float, optional
The absolute tolerent parameter to pass to numpy.allclose.
rtol : float, optional
The relative tolenance parameter to pass to numpy.allclose.
Returns
-------
r1 : bool
True is data1 and data2 are similar, False if they differ.
"""
r = True
if data1.dtype != data2.dtype:
r = False
if verb:
print("Dtypes do not match:", data1.dtype, data2.dtype)
if data1.shape != data2.shape:
r = False
if verb:
print("Shapes do not match:", data1.shape, data2.shape)
if np.allclose(data1, data2, rtol=rtol, atol=atol) is False:
r = False
if verb:
print("Data does not match")
return r
def isitemsimilar(v1, v2, verb=False, dtol=DTOL):
"""
Compare two values for differences
See :py:func:`isdicsimilar` for Parameters.
"""
r = True
# type checking
if type(v1) != type(v2):
r = False
if verb:
print("Item has different type", type(v1), type(v2))
# iterable checking
elif isinstance(v1, dict):
r = r and isdicsimilar(v1, v2, verb=verb, dtol=dtol)
elif isinstance(v1, list):
r = r and islistsimilar(v1, v2, verb=verb, dtol=dtol)
# numeric type
elif isinstance(v1, (int, float)):
if abs(v1 - v2) > dtol:
r = False
if verb:
print("Key mismatch:", v1, v2)
# all other types: just check if equal
else:
if v1 != v2:
r = False
if verb:
print("Key mismatch:", v1, v2)
return r
def isdicsimilar(dic1, dic2, verb=False, dtol=DTOL, ignore_pipe_display=False):
"""
Compare two dictionaries for differences
Float and int types compared within dtol. Lists and dictionaries are
checked recursively all other checked by simple equivalence
Parameters
----------
dic1 : dict
First dictionary of NMR parameters.
dic2 : dict
Second dictionary of NMR parameters
verb : bool, optional
Set True for verbose reporting.
dtol : float, optional
Maximum allowable difference between int and float elements if dic1
and dic2.
Returns
-------
r1 : bool
True is dic1 and dic2 are similar, False if they differ.
"""
# create copies of the two dictionaries
dic1 = dict(dic1)
dic2 = dict(dic2)
# set return value to True
r = True
# create sets
kset1 = set(dic1.keys())
kset2 = set(dic2.keys())
dset = set.difference(kset1, kset2)
iset = set.intersection(kset1, kset2)
if ignore_pipe_display is True:
iset.discard('FDMIN')
iset.discard('FDMAX')
iset.discard('FDDISPMIN')
iset.discard('FDDISPMAX')
iset.discard('FDSCALEFLAG')
# print out any keys not in both dictionaries
if len(dset) != 0:
r = False
if verb:
print("Keys not in both dictionaries:", dset)
# loop over keys in both sets
for k in iset:
v1, v2 = dic1[k], dic2[k]
if not isitemsimilar(v1, v2, verb=verb, dtol=dtol):
print("For key:", k)
r = False
return r
def islistsimilar(l1, l2, verb=False, dtol=DTOL):
"""
Compare two lists (or iterable) for differences
See :py:func:`isdicsimilar` for Parameters.
"""
# set return value to True
r = True
# print out any keys not in both dictionaries
if len(l1) != len(l2):
r = False
if verb:
print("Lists not of same length:", len(l1), len(l2))
# loop over keys in both sets
for v1, v2 in zip(l1, l2):
if not isitemsimilar(v1, v2, verb=verb, dtol=dtol):
r = False
return r
|
tests/flow/test_tdigest.py | simonprickett/RedisBloom | 883 | 12737643 | #!/usr/bin/env python3
import os
from random import randint
from RLTest import Env
from redis import ResponseError
import redis
import sys
import random
import math
is_valgrind = True if ("VGD" in os.environ or "VALGRIND" in os.environ) else False
def parse_tdigest_info(array_reply):
reply_dict = {}
for pos in range(0, len(array_reply), 2):
property_name = array_reply[pos]
property_value = array_reply[pos + 1]
reply_dict[property_name] = property_value
return reply_dict
class testTDigest:
def __init__(self):
self.env = Env(decodeResponses=True)
self.assertOk = self.env.assertTrue
self.cmd = self.env.cmd
self.assertEqual = self.env.assertEqual
self.assertRaises = self.env.assertRaises
self.assertTrue = self.env.assertTrue
self.assertAlmostEqual = self.env.assertAlmostEqual
self.assertGreater = self.env.assertGreater
self.assertAlmostEqual = self.env.assertAlmostEqual
self.restart_and_reload = self.env.restartAndReload
def test_tdigest_create(self):
for compression in range(100, 1000, 100):
self.assertOk(self.cmd("tdigest.create", "tdigest", compression))
self.assertEqual(
compression,
parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Compression"],
)
def test_negative_tdigest_create(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", 100
)
self.cmd("DEL", "tdigest")
# arity lower
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest"
)
# arity upper
self.assertRaises(
redis.exceptions.ResponseError,
self.cmd,
"tdigest.create",
"tdigest",
100,
5,
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", "a"
)
# compression negative/zero value
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", 0
)
# compression negative/zero value
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.create", "tdigest", -1
)
def test_tdigest_reset(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# reset on empty histogram
self.assertOk(self.cmd("tdigest.reset", "tdigest"))
# insert datapoints into sketch
for x in range(100):
self.assertOk(self.cmd("tdigest.add", "tdigest", random.random(), 1.0))
# assert we have 100 unmerged nodes
self.assertEqual(
100,
parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Unmerged nodes"],
)
self.assertOk(self.cmd("tdigest.reset", "tdigest"))
# assert we have 100 unmerged nodes
self.assertEqual(
0, parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Unmerged nodes"]
)
def test_negative_tdigest_reset(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.reset", "tdigest"
)
self.cmd("DEL", "tdigest")
# empty key
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.reset", "tdigest"
)
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.reset")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.reset", "tdigest", 100
)
def test_tdigest_add(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# reset on empty histogram
self.assertOk(self.cmd("tdigest.reset", "tdigest"))
# insert datapoints into sketch
for x in range(10000):
self.assertOk(
self.cmd(
"tdigest.add",
"tdigest",
random.random() * 10000,
random.random() * 500 + 1.0,
)
)
def test_negative_tdigest_add(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest", 100, 100
)
self.cmd("DEL", "tdigest")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest"
)
# arity upper
self.assertRaises(
ResponseError, self.cmd, "tdigest.add", "tdigest", 100, 5, 100.0
)
# key does not exist
self.assertRaises(
ResponseError, self.cmd, "tdigest.add", "dont-exist", 100, 100
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest", "a", 5
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.add", "tdigest", 5.0, "a"
)
def test_tdigest_merge(self):
self.assertOk(self.cmd("tdigest.create", "to-tdigest", 100))
self.assertOk(self.cmd("tdigest.create", "from-tdigest", 100))
# insert datapoints into sketch
for _ in range(100):
self.assertOk(self.cmd("tdigest.add", "from-tdigest", 1.0, 1.0))
for _ in range(100):
self.assertOk(self.cmd("tdigest.add", "to-tdigest", 1.0, 10.0))
# merge from-tdigest into to-tdigest
self.assertOk(self.cmd("tdigest.merge", "to-tdigest", "from-tdigest"))
# we should now have 1100 weight on to-histogram
to_info = parse_tdigest_info(self.cmd("tdigest.info", "to-tdigest"))
total_weight_to = float(to_info["Merged weight"]) + float(
to_info["Unmerged weight"]
)
self.assertEqual(1100, total_weight_to)
def test_tdigest_merge_to_empty(self):
self.assertOk(self.cmd("tdigest.create", "to-tdigest", 100))
self.assertOk(self.cmd("tdigest.create", "from-tdigest", 100))
# insert datapoints into sketch
for _ in range(100):
self.assertOk(self.cmd("tdigest.add", "from-tdigest", 1.0, 1.0))
# merge from-tdigest into to-tdigest
self.assertOk(self.cmd("tdigest.merge", "to-tdigest", "from-tdigest"))
# assert we have same merged weight on both histograms ( given the to-histogram was empty )
from_info = parse_tdigest_info(self.cmd("tdigest.info", "from-tdigest"))
total_weight_from = float(from_info["Merged weight"]) + float(
from_info["Unmerged weight"]
)
to_info = parse_tdigest_info(self.cmd("tdigest.info", "to-tdigest"))
total_weight_to = float(to_info["Merged weight"]) + float(
to_info["Unmerged weight"]
)
self.assertEqual(total_weight_from, total_weight_to)
def test_negative_tdigest_merge(self):
self.cmd("SET", "to-tdigest", "B")
self.cmd("SET", "from-tdigest", "B")
# WRONGTYPE
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "to-tdigest", "from-tdigest"
)
self.cmd("DEL", "to-tdigest")
self.assertOk(self.cmd("tdigest.create", "to-tdigest", 100))
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "to-tdigest", "from-tdigest"
)
self.cmd("DEL", "from-tdigest")
self.assertOk(self.cmd("tdigest.create", "from-tdigest", 100))
# arity lower
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.merge", "to-tdigest"
)
# arity upper
self.assertRaises(
ResponseError,
self.cmd,
"tdigest.merge",
"to-tdigest",
"from-tdigest",
"from-tdigest",
)
# key does not exist
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "dont-exist", "to-tdigest"
)
self.assertRaises(
ResponseError, self.cmd, "tdigest.merge", "to-tdigest", "dont-exist"
)
def test_tdigest_min_max(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# test for no datapoints first
self.assertEqual(sys.float_info.max, float(self.cmd("tdigest.min", "tdigest")))
self.assertEqual(sys.float_info.min, float(self.cmd("tdigest.max", "tdigest")))
# insert datapoints into sketch
for x in range(1, 101):
self.assertOk(self.cmd("tdigest.add", "tdigest", x, 1.0))
# min/max
self.assertEqual(100, float(self.cmd("tdigest.max", "tdigest")))
self.assertEqual(1, float(self.cmd("tdigest.min", "tdigest")))
def test_negative_tdigest_min_max(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.min", "tdigest"
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.max", "tdigest"
)
# key does not exist
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.min", "dont-exist"
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.max", "dont-exist"
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.min")
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.max")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.min", "tdigest", 1
)
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.max", "tdigest", 1
)
def test_tdigest_quantile(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 500))
# insert datapoints into sketch
for x in range(1, 10000):
self.assertOk(self.cmd("tdigest.add", "tdigest", x * 0.01, 1.0))
# assert min min/max have same result as quantile 0 and 1
self.assertEqual(
float(self.cmd("tdigest.max", "tdigest")),
float(self.cmd("tdigest.quantile", "tdigest", 1.0)),
)
self.assertEqual(
float(self.cmd("tdigest.min", "tdigest")),
float(self.cmd("tdigest.quantile", "tdigest", 0.0)),
)
self.assertAlmostEqual(
1.0, float(self.cmd("tdigest.quantile", "tdigest", 0.01)), 0.01
)
self.assertAlmostEqual(
99.0, float(self.cmd("tdigest.quantile", "tdigest", 0.99)), 0.01
)
def test_negative_tdigest_quantile(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.quantile", "tdigest", 0.9
)
# key does not exist
self.assertRaises(
ResponseError, self.cmd, "tdigest.quantile", "dont-exist", 0.9
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.quantile")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError,
self.cmd,
"tdigest.quantile",
"tdigest",
1,
1,
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.quantile", "tdigest", "a"
)
def test_tdigest_cdf(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 500))
# insert datapoints into sketch
for x in range(1, 100):
self.assertOk(self.cmd("tdigest.add", "tdigest", x, 1.0))
self.assertAlmostEqual(
0.01, float(self.cmd("tdigest.cdf", "tdigest", 1.0)), 0.01
)
self.assertAlmostEqual(
0.99, float(self.cmd("tdigest.cdf", "tdigest", 99.0)), 0.01
)
def test_negative_tdigest_cdf(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "tdigest", 0.9
)
# key does not exist
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "dont-exist", 0.9
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.cdf")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "tdigest", 1, 1
)
# parsing
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.cdf", "tdigest", "a"
)
def test_negative_tdigest_info(self):
self.cmd("SET", "tdigest", "B")
# WRONGTYPE
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.info", "tdigest"
)
# dont exist
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.info", "dont-exist"
)
self.cmd("DEL", "tdigest", "B")
self.assertOk(self.cmd("tdigest.create", "tdigest", 100))
# arity lower
self.assertRaises(redis.exceptions.ResponseError, self.cmd, "tdigest.info")
# arity upper
self.assertRaises(
redis.exceptions.ResponseError, self.cmd, "tdigest.info", "tdigest", 1
)
def test_save_load(self):
self.assertOk(self.cmd("tdigest.create", "tdigest", 500))
# insert datapoints into sketch
for _ in range(1, 101):
self.assertOk(self.cmd("tdigest.add", "tdigest", 1.0, 1.0))
self.assertEqual(True, self.cmd("SAVE"))
mem_usage_prior_restart = self.cmd("MEMORY", "USAGE", "tdigest")
self.restart_and_reload()
# assert we have 100 unmerged nodes
self.assertEqual(1, self.cmd("EXISTS", "tdigest"))
self.assertEqual(
100,
float(
parse_tdigest_info(self.cmd("tdigest.info", "tdigest"))["Merged weight"]
),
)
mem_usage_after_restart = self.cmd("MEMORY", "USAGE", "tdigest")
self.assertEqual(mem_usage_prior_restart, mem_usage_after_restart)
|
stanza/utils/datasets/constituency/vtb_convert.py | asears/stanza | 3,633 | 12737644 | <gh_stars>1000+
"""
Script for processing the VTB files and turning their trees into the desired tree syntax
The VTB original trees are stored in the directory:
VietTreebank_VLSP_SP73/Kho ngu lieu 10000 cay cu phap
The script requires two arguments:
1. Original directory storing the original trees
2. New directory storing the converted trees
"""
import os
import argparse
def convert_file(org_dir, new_dir):
"""
:param org_dir: original directory storing original trees
:param new_dir: new directory storing formatted constituency trees
This function writes new trees to the corresponding files in new_dir
"""
with open(org_dir, 'r') as reader, open(new_dir, 'w') as writer:
content = reader.readlines()
for line in content:
line = ' '.join(line.split())
if line == '':
continue
elif line == '<s>':
writer.write('(ROOT ')
elif line == '</s>':
writer.write(')\n')
else:
writer.write(line)
def main():
"""
Main function for the script
Process args, loop through each file in the directory and convert
to the desired tree format
"""
parser = argparse.ArgumentParser(
description="Script that converts a VTB Tree into the desired format",
)
parser.add_argument(
'org_dir',
help='The location of the original directory storing original trees '
)
parser.add_argument(
'new_dir',
help='The location of new directory storing the new formatted trees'
)
args = parser.parse_args()
org_dir = args.org_dir
new_dir = args.new_dir
for filename in os.listdir(org_dir):
file_name, file_extension = os.path.splitext(filename)
# Only convert .prd files, skip the .raw files
if file_extension == '.raw':
continue
file_path = os.path.join(org_dir, filename)
new_path = os.path.join(new_dir, file_name)
new_file_path = f'{new_path}.mrg'
# Convert the tree and write to new_file_path
convert_file(file_path, new_file_path)
if __name__ == '__main__':
main()
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.py | CiscoDevNet/ydk-py | 177 | 12737651 | """ Cisco_IOS_XR_asr9k_sc_envmon_admin_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR asr9k\-sc\-envmon package
admin\-plane operational data.
This module contains definitions
for the following management objects\:
environmental\-monitoring\: Admin Environmental Monitoring
Operational data space
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class EnvironmentalMonitoring(_Entity_):
"""
Admin Environmental Monitoring Operational data
space
.. attribute:: racks
Table of racks
**type**\: :py:class:`Racks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring, self).__init__()
self._top_entity = None
self.yang_name = "environmental-monitoring"
self.yang_parent_name = "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("racks", ("racks", EnvironmentalMonitoring.Racks))])
self._leafs = OrderedDict()
self.racks = EnvironmentalMonitoring.Racks()
self.racks.parent = self
self._children_name_map["racks"] = "racks"
self._segment_path = lambda: "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring, [], name, value)
class Racks(_Entity_):
"""
Table of racks
.. attribute:: rack
Number
**type**\: list of :py:class:`Rack <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks, self).__init__()
self.yang_name = "racks"
self.yang_parent_name = "environmental-monitoring"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("rack", ("rack", EnvironmentalMonitoring.Racks.Rack))])
self._leafs = OrderedDict()
self.rack = YList(self)
self._segment_path = lambda: "racks"
self._absolute_path = lambda: "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks, [], name, value)
class Rack(_Entity_):
"""
Number
.. attribute:: rack (key)
Rack number
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: slots
Table of slots
**type**\: :py:class:`Slots <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack, self).__init__()
self.yang_name = "rack"
self.yang_parent_name = "racks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['rack']
self._child_classes = OrderedDict([("slots", ("slots", EnvironmentalMonitoring.Racks.Rack.Slots))])
self._leafs = OrderedDict([
('rack', (YLeaf(YType.uint32, 'rack'), ['int'])),
])
self.rack = None
self.slots = EnvironmentalMonitoring.Racks.Rack.Slots()
self.slots.parent = self
self._children_name_map["slots"] = "slots"
self._segment_path = lambda: "rack" + "[rack='" + str(self.rack) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-asr9k-sc-envmon-admin-oper:environmental-monitoring/racks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack, ['rack'], name, value)
class Slots(_Entity_):
"""
Table of slots
.. attribute:: slot
Name
**type**\: list of :py:class:`Slot <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots, self).__init__()
self.yang_name = "slots"
self.yang_parent_name = "rack"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("slot", ("slot", EnvironmentalMonitoring.Racks.Rack.Slots.Slot))])
self._leafs = OrderedDict()
self.slot = YList(self)
self._segment_path = lambda: "slots"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots, [], name, value)
class Slot(_Entity_):
"""
Name
.. attribute:: slot (key)
Slot name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: modules
Table of modules
**type**\: :py:class:`Modules <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot, self).__init__()
self.yang_name = "slot"
self.yang_parent_name = "slots"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['slot']
self._child_classes = OrderedDict([("modules", ("modules", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules))])
self._leafs = OrderedDict([
('slot', (YLeaf(YType.str, 'slot'), ['str'])),
])
self.slot = None
self.modules = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules()
self.modules.parent = self
self._children_name_map["modules"] = "modules"
self._segment_path = lambda: "slot" + "[slot='" + str(self.slot) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot, ['slot'], name, value)
class Modules(_Entity_):
"""
Table of modules
.. attribute:: module
Name
**type**\: list of :py:class:`Module <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules, self).__init__()
self.yang_name = "modules"
self.yang_parent_name = "slot"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("module", ("module", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module))])
self._leafs = OrderedDict()
self.module = YList(self)
self._segment_path = lambda: "modules"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules, [], name, value)
class Module(_Entity_):
"""
Name
.. attribute:: module (key)
Module name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: sensor_types
Table of sensor types
**type**\: :py:class:`SensorTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes>`
**config**\: False
.. attribute:: power
Module Power Draw
**type**\: :py:class:`Power <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module, self).__init__()
self.yang_name = "module"
self.yang_parent_name = "modules"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['module']
self._child_classes = OrderedDict([("sensor-types", ("sensor_types", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes)), ("power", ("power", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power))])
self._leafs = OrderedDict([
('module', (YLeaf(YType.str, 'module'), ['str'])),
])
self.module = None
self.sensor_types = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes()
self.sensor_types.parent = self
self._children_name_map["sensor_types"] = "sensor-types"
self.power = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power()
self.power.parent = self
self._children_name_map["power"] = "power"
self._segment_path = lambda: "module" + "[module='" + str(self.module) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module, ['module'], name, value)
class SensorTypes(_Entity_):
"""
Table of sensor types
.. attribute:: sensor_type
Type of sensor
**type**\: list of :py:class:`SensorType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes, self).__init__()
self.yang_name = "sensor-types"
self.yang_parent_name = "module"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("sensor-type", ("sensor_type", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType))])
self._leafs = OrderedDict()
self.sensor_type = YList(self)
self._segment_path = lambda: "sensor-types"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes, [], name, value)
class SensorType(_Entity_):
"""
Type of sensor
.. attribute:: type (key)
Sensor type
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: sensor_names
Table of sensors
**type**\: :py:class:`SensorNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType, self).__init__()
self.yang_name = "sensor-type"
self.yang_parent_name = "sensor-types"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['type']
self._child_classes = OrderedDict([("sensor-names", ("sensor_names", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.str, 'type'), ['str'])),
])
self.type = None
self.sensor_names = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames()
self.sensor_names.parent = self
self._children_name_map["sensor_names"] = "sensor-names"
self._segment_path = lambda: "sensor-type" + "[type='" + str(self.type) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType, ['type'], name, value)
class SensorNames(_Entity_):
"""
Table of sensors
.. attribute:: sensor_name
Name of sensor
**type**\: list of :py:class:`SensorName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames, self).__init__()
self.yang_name = "sensor-names"
self.yang_parent_name = "sensor-type"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("sensor-name", ("sensor_name", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName))])
self._leafs = OrderedDict()
self.sensor_name = YList(self)
self._segment_path = lambda: "sensor-names"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames, [], name, value)
class SensorName(_Entity_):
"""
Name of sensor
.. attribute:: name (key)
Sensor name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: thresholds
The threshold information
**type**\: :py:class:`Thresholds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds>`
**config**\: False
.. attribute:: value_detailed
Detailed sensor information including the sensor value
**type**\: :py:class:`ValueDetailed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed>`
**config**\: False
.. attribute:: value_brief
The sensor value
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName, self).__init__()
self.yang_name = "sensor-name"
self.yang_parent_name = "sensor-names"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([("thresholds", ("thresholds", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds)), ("value-detailed", ("value_detailed", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed))])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('value_brief', (YLeaf(YType.str, 'value-brief'), ['str'])),
])
self.name = None
self.value_brief = None
self.thresholds = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds()
self.thresholds.parent = self
self._children_name_map["thresholds"] = "thresholds"
self.value_detailed = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed()
self.value_detailed.parent = self
self._children_name_map["value_detailed"] = "value-detailed"
self._segment_path = lambda: "sensor-name" + "[name='" + str(self.name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName, ['name', 'value_brief'], name, value)
class Thresholds(_Entity_):
"""
The threshold information
.. attribute:: threshold
Types of thresholds
**type**\: list of :py:class:`Threshold <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds, self).__init__()
self.yang_name = "thresholds"
self.yang_parent_name = "sensor-name"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold", ("threshold", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold))])
self._leafs = OrderedDict()
self.threshold = YList(self)
self._segment_path = lambda: "thresholds"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds, [], name, value)
class Threshold(_Entity_):
"""
Types of thresholds
.. attribute:: type (key)
Threshold type
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: value_detailed
Detailed sensor threshold information
**type**\: :py:class:`ValueDetailed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed>`
**config**\: False
.. attribute:: trap
Threshold trap enable flag true\-ENABLE, false\-DISABLE
**type**\: bool
**config**\: False
.. attribute:: value_brief
Threshold value for the sensor
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold, self).__init__()
self.yang_name = "threshold"
self.yang_parent_name = "thresholds"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['type']
self._child_classes = OrderedDict([("value-detailed", ("value_detailed", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.str, 'type'), ['str'])),
('trap', (YLeaf(YType.boolean, 'trap'), ['bool'])),
('value_brief', (YLeaf(YType.str, 'value-brief'), ['str'])),
])
self.type = None
self.trap = None
self.value_brief = None
self.value_detailed = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed()
self.value_detailed.parent = self
self._children_name_map["value_detailed"] = "value-detailed"
self._segment_path = lambda: "threshold" + "[type='" + str(self.type) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold, ['type', 'trap', 'value_brief'], name, value)
class ValueDetailed(_Entity_):
"""
Detailed sensor threshold
information
.. attribute:: threshold_severity
Indicates minor, major, critical severities
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_relation
Indicates relation between sensor value and threshold
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_value
Value of the configured threshold
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_evaluation
Indicates the result of the most recent evaluation of the thresholD
**type**\: bool
**config**\: False
.. attribute:: threshold_notification_enabled
Indicates whether or not a notification should result, in case of threshold violation
**type**\: bool
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed, self).__init__()
self.yang_name = "value-detailed"
self.yang_parent_name = "threshold"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('threshold_severity', (YLeaf(YType.uint32, 'threshold-severity'), ['int'])),
('threshold_relation', (YLeaf(YType.uint32, 'threshold-relation'), ['int'])),
('threshold_value', (YLeaf(YType.uint32, 'threshold-value'), ['int'])),
('threshold_evaluation', (YLeaf(YType.boolean, 'threshold-evaluation'), ['bool'])),
('threshold_notification_enabled', (YLeaf(YType.boolean, 'threshold-notification-enabled'), ['bool'])),
])
self.threshold_severity = None
self.threshold_relation = None
self.threshold_value = None
self.threshold_evaluation = None
self.threshold_notification_enabled = None
self._segment_path = lambda: "value-detailed"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed, ['threshold_severity', 'threshold_relation', 'threshold_value', 'threshold_evaluation', 'threshold_notification_enabled'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold.ValueDetailed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds.Threshold']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.Thresholds']['meta_info']
class ValueDetailed(_Entity_):
"""
Detailed sensor information including
the sensor value
.. attribute:: field_validity_bitmap
Sensor valid bitmap
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: device_description
Device Name
**type**\: str
**length:** 0..50
**config**\: False
.. attribute:: units
Units of variable being read
**type**\: str
**length:** 0..50
**config**\: False
.. attribute:: device_id
Identifier for this device
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: value
Current reading of sensor
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: alarm_type
Indicates threshold violation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: data_type
Sensor data type enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: scale
Sensor scale enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: precision
Sensor precision range
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: status
Sensor operation state enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: age_time_stamp
Age of the sensor value; set to the current time if directly access the value from sensor
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: update_rate
Sensor value update rate;set to 0 if sensor value is updated and evaluated immediately
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: average
Average sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: minimum
Minimum Sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: maximum
Maximum Sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: interval
Time Interval over which sensor value is monitored
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed, self).__init__()
self.yang_name = "value-detailed"
self.yang_parent_name = "sensor-name"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('field_validity_bitmap', (YLeaf(YType.uint32, 'field-validity-bitmap'), ['int'])),
('device_description', (YLeaf(YType.str, 'device-description'), ['str'])),
('units', (YLeaf(YType.str, 'units'), ['str'])),
('device_id', (YLeaf(YType.uint32, 'device-id'), ['int'])),
('value', (YLeaf(YType.uint32, 'value'), ['int'])),
('alarm_type', (YLeaf(YType.uint32, 'alarm-type'), ['int'])),
('data_type', (YLeaf(YType.uint32, 'data-type'), ['int'])),
('scale', (YLeaf(YType.uint32, 'scale'), ['int'])),
('precision', (YLeaf(YType.uint32, 'precision'), ['int'])),
('status', (YLeaf(YType.uint32, 'status'), ['int'])),
('age_time_stamp', (YLeaf(YType.uint32, 'age-time-stamp'), ['int'])),
('update_rate', (YLeaf(YType.uint32, 'update-rate'), ['int'])),
('average', (YLeaf(YType.int32, 'average'), ['int'])),
('minimum', (YLeaf(YType.int32, 'minimum'), ['int'])),
('maximum', (YLeaf(YType.int32, 'maximum'), ['int'])),
('interval', (YLeaf(YType.int32, 'interval'), ['int'])),
])
self.field_validity_bitmap = None
self.device_description = None
self.units = None
self.device_id = None
self.value = None
self.alarm_type = None
self.data_type = None
self.scale = None
self.precision = None
self.status = None
self.age_time_stamp = None
self.update_rate = None
self.average = None
self.minimum = None
self.maximum = None
self.interval = None
self._segment_path = lambda: "value-detailed"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed, ['field_validity_bitmap', 'device_description', 'units', 'device_id', 'value', 'alarm_type', 'data_type', 'scale', 'precision', 'status', 'age_time_stamp', 'update_rate', 'average', 'minimum', 'maximum', 'interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName.ValueDetailed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames.SensorName']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType.SensorNames']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes.SensorType']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.SensorTypes']['meta_info']
class Power(_Entity_):
"""
Module Power Draw
.. attribute:: power_bag
Detailed power bag information
**type**\: :py:class:`PowerBag <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_admin_oper.EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power, self).__init__()
self.yang_name = "power"
self.yang_parent_name = "module"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("power-bag", ("power_bag", EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag))])
self._leafs = OrderedDict()
self.power_bag = EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag()
self.power_bag.parent = self
self._children_name_map["power_bag"] = "power-bag"
self._segment_path = lambda: "power"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power, [], name, value)
class PowerBag(_Entity_):
"""
Detailed power bag information
.. attribute:: power_value
Current Power Value of the Unit
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: power_max_value
Max Power Value of the Unit
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: power_unit_multiplier
Unit Multiplier of Power
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_accuracy
Accuracy of the Power Value
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_measure_caliber
Measure Caliber
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_current_type
Current Type of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_origin
The Power Origin of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_admin_state
Admin Status of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_oper_state
Oper Status of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_state_enter_reason
Enter Reason for the State
**type**\: str
**length:** 0..50
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-admin-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag, self).__init__()
self.yang_name = "power-bag"
self.yang_parent_name = "power"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('power_value', (YLeaf(YType.int32, 'power-value'), ['int'])),
('power_max_value', (YLeaf(YType.int32, 'power-max-value'), ['int'])),
('power_unit_multiplier', (YLeaf(YType.uint32, 'power-unit-multiplier'), ['int'])),
('power_accuracy', (YLeaf(YType.uint32, 'power-accuracy'), ['int'])),
('power_measure_caliber', (YLeaf(YType.uint32, 'power-measure-caliber'), ['int'])),
('power_current_type', (YLeaf(YType.uint32, 'power-current-type'), ['int'])),
('power_origin', (YLeaf(YType.uint32, 'power-origin'), ['int'])),
('power_admin_state', (YLeaf(YType.uint32, 'power-admin-state'), ['int'])),
('power_oper_state', (YLeaf(YType.uint32, 'power-oper-state'), ['int'])),
('power_state_enter_reason', (YLeaf(YType.str, 'power-state-enter-reason'), ['str'])),
])
self.power_value = None
self.power_max_value = None
self.power_unit_multiplier = None
self.power_accuracy = None
self.power_measure_caliber = None
self.power_current_type = None
self.power_origin = None
self.power_admin_state = None
self.power_oper_state = None
self.power_state_enter_reason = None
self._segment_path = lambda: "power-bag"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag, ['power_value', 'power_max_value', 'power_unit_multiplier', 'power_accuracy', 'power_measure_caliber', 'power_current_type', 'power_origin', 'power_admin_state', 'power_oper_state', 'power_state_enter_reason'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power.PowerBag']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module.Power']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules.Module']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot.Modules']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots.Slot']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack.Slots']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks.Rack']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring.Racks']['meta_info']
def clone_ptr(self):
self._top_entity = EnvironmentalMonitoring()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_admin_oper as meta
return meta._meta_table['EnvironmentalMonitoring']['meta_info']
|
services/web/manage.py | asomsiko/flask-on-docker | 207 | 12737665 | <reponame>asomsiko/flask-on-docker<filename>services/web/manage.py
from flask.cli import FlaskGroup
from project import app, db, User
cli = FlaskGroup(app)
@cli.command("create_db")
def create_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command("seed_db")
def seed_db():
db.session.add(User(email="<EMAIL>"))
db.session.commit()
if __name__ == "__main__":
cli()
|
astropy/wcs/wcsapi/__init__.py | jayvdb/astropy | 445 | 12737691 | from .low_level_api import *
from .high_level_api import *
from .high_level_wcs_wrapper import *
from .utils import *
from .sliced_low_level_wcs import *
|
moto/apigateway/integration_parsers/unknown_parser.py | gtourkas/moto | 5,460 | 12737697 | class TypeUnknownParser:
"""
Parse invocations to a APIGateway resource with an unknown integration type
"""
def invoke(self, request, integration):
_type = integration["type"]
raise NotImplementedError("The {0} type has not been implemented".format(_type))
|
src/dispatch/plugins/dispatch_opsgenie/plugin.py | axellaurelut/dispatch | 3,417 | 12737700 | """
.. module: dispatch.plugins.dispatch_opsgenie.plugin
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
"""
import logging
from pydantic import Field, SecretStr
from dispatch.config import BaseConfigurationModel
from dispatch.decorators import apply, counter, timer
from dispatch.plugins.bases import OncallPlugin
from .service import get_oncall, page_oncall
__version__ = "0.1.0"
log = logging.getLogger(__name__)
class OpsgenieConfiguration(BaseConfigurationModel):
"""Opsgenie configuration description."""
api_key: SecretStr = Field(
title="API Key", description="This is the key used to talk to the Opsgenine API."
)
@apply(counter, exclude=["__init__"])
@apply(timer, exclude=["__init__"])
class OpsGenieOncallPlugin(OncallPlugin):
title = "OpsGenie Plugin - Oncall Management"
slug = "opsgenie-oncall"
author = "stefanm8"
author_url = "https://github.com/Netflix/dispatch"
description = "Uses Opsgenie to resolve and page oncall teams."
version = __version__
def __init__(self):
self.configuration_schema = OpsgenieConfiguration
def get(self, service_id: str, **kwargs):
return get_oncall(self.configuration.api_key, service_id)
def page(
self,
service_id: str,
incident_name: str,
incident_title: str,
incident_description: str,
**kwargs,
):
return page_oncall(
self.configuration.api_key,
service_id,
incident_name,
incident_title,
incident_description,
)
|
tests/observes/test_dynamic_relationship.py | tteaka/sqlalchemy-utils | 879 | 12737709 | import pytest
import sqlalchemy as sa
from sqlalchemy.orm import dynamic_loader
from sqlalchemy_utils.observer import observes
@pytest.fixture
def Director(Base):
class Director(Base):
__tablename__ = 'director'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
movies = dynamic_loader('Movie', back_populates='director')
return Director
@pytest.fixture
def Movie(Base, Director):
class Movie(Base):
__tablename__ = 'movie'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
director_id = sa.Column(sa.Integer, sa.ForeignKey(Director.id))
director = sa.orm.relationship(Director, back_populates='movies')
director_name = sa.Column(sa.String)
@observes('director')
def director_observer(self, director):
self.director_name = director.name
return Movie
@pytest.fixture
def init_models(Director, Movie):
pass
@pytest.mark.usefixtures('postgresql_dsn')
class TestObservesForDynamicRelationship:
def test_add_observed_object(self, session, Director, Movie):
steven = Director(name='<NAME>')
session.add(steven)
jaws = Movie(name='Jaws', director=steven)
session.add(jaws)
session.commit()
assert jaws.director_name == '<NAME>'
def test_add_observed_object_from_backref(self, session, Director, Movie):
jaws = Movie(name='Jaws')
steven = Director(name='<NAME>', movies=[jaws])
session.add(steven)
session.add(jaws)
session.commit()
assert jaws.director_name == '<NAME>'
|
nlgeval/utils.py | h4ste/nlg-eval | 1,088 | 12737712 | import click
import json
import os
from xdg import XDG_CONFIG_HOME
class InvalidDataDirException(Exception):
pass
def get_data_dir():
if os.environ.get('NLGEVAL_DATA'):
if not os.path.exists(os.environ.get('NLGEVAL_DATA')):
click.secho("NLGEVAL_DATA variable is set but points to non-existent path.", fg='red', err=True)
raise InvalidDataDirException()
return os.environ.get('NLGEVAL_DATA')
else:
try:
cfg_file = os.path.join(XDG_CONFIG_HOME, 'nlgeval', 'rc.json')
with open(cfg_file, 'rt') as f:
rc = json.load(f)
if not os.path.exists(rc['data_path']):
click.secho("Data path found in {} does not exist: {} " % (cfg_file, rc['data_path']), fg='red', err=True)
click.secho("Run `nlg-eval --setup DATA_DIR' to download or set $NLGEVAL_DATA to an existing location",
fg='red', err=True)
raise InvalidDataDirException()
return rc['data_path']
except:
click.secho("Could not determine location of data.", fg='red', err=True)
click.secho("Run `nlg-eval --setup DATA_DIR' to download or set $NLGEVAL_DATA to an existing location", fg='red',
err=True)
raise InvalidDataDirException()
|
binding/python/util.py | ledoufre/porcupine | 1,034 | 12737719 | <filename>binding/python/util.py<gh_stars>1000+
#
# Copyright 2020-2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import logging
import os
import platform
import subprocess
log = logging.getLogger('PPN')
log.setLevel(logging.WARNING)
def _pv_linux_machine(machine):
if machine == 'x86_64':
return machine
elif machine == 'aarch64':
arch_info = '-' + machine
elif machine in ['armv7l', 'armv6l']:
arch_info = ''
else:
raise NotImplementedError("Unsupported CPU architecture: '%s'" % machine)
cpu_info = ''
try:
cpu_info = subprocess.check_output(['cat', '/proc/cpuinfo']).decode()
cpu_part_list = [x for x in cpu_info.split('\n') if 'CPU part' in x]
cpu_part = cpu_part_list[0].split(' ')[-1].lower()
except Exception as error:
raise RuntimeError("Failed to identify the CPU with '%s'\nCPU info: %s" % (error, cpu_info))
if '0xb76' == cpu_part:
return 'arm11' + arch_info
elif '0xc07' == cpu_part:
return 'cortex-a7' + arch_info
elif '0xd03' == cpu_part:
return 'cortex-a53' + arch_info
elif '0xd07' == cpu_part:
return 'cortex-a57' + arch_info
elif '0xd08' == cpu_part:
return 'cortex-a72' + arch_info
elif '0xc08' == cpu_part:
return 'beaglebone' + arch_info
elif machine == 'armv7l':
log.warning(
'WARNING: Please be advised that this device (CPU part = %s) is not officially supported by Picovoice. '
'Falling back to the armv6-based (Raspberry Pi Zero) library. This is not tested nor optimal.' % cpu_part)
return 'arm11'
else:
raise NotImplementedError("Unsupported CPU: '%s'." % cpu_part)
def _pv_platform():
pv_system = platform.system()
if pv_system not in {'Darwin', 'Linux', 'Windows'}:
raise ValueError("Unsupported system '%s'." % pv_system)
if pv_system == 'Linux':
pv_machine = _pv_linux_machine(platform.machine())
else:
pv_machine = platform.machine()
return pv_system, pv_machine
_PV_SYSTEM, _PV_MACHINE = _pv_platform()
_RASPBERRY_PI_MACHINES = {'arm11', 'cortex-a7', 'cortex-a53', 'cortex-a72', 'cortex-a53-aarch64', 'cortex-a72-aarch64'}
_JETSON_MACHINES = {'cortex-a57-aarch64'}
def pv_library_path(relative):
if _PV_SYSTEM == 'Darwin':
if _PV_MACHINE == 'x86_64':
return os.path.join(os.path.dirname(__file__), relative, 'lib/mac/x86_64/libpv_porcupine.dylib')
elif _PV_MACHINE == "arm64":
return os.path.join(os.path.dirname(__file__), relative, 'lib/mac/arm64/libpv_porcupine.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return os.path.join(os.path.dirname(__file__), relative, 'lib/linux/x86_64/libpv_porcupine.so')
elif _PV_MACHINE in _JETSON_MACHINES:
return os.path.join(
os.path.dirname(__file__),
relative,
'lib/jetson/%s/libpv_porcupine.so' % _PV_MACHINE)
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return os.path.join(
os.path.dirname(__file__),
relative,
'lib/raspberry-pi/%s/libpv_porcupine.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return os.path.join(os.path.dirname(__file__), relative, 'lib/beaglebone/libpv_porcupine.so')
elif _PV_SYSTEM == 'Windows':
return os.path.join(os.path.dirname(__file__), relative, 'lib/windows/amd64/libpv_porcupine.dll')
raise NotImplementedError('Unsupported platform.')
def pv_model_path(relative):
return os.path.join(os.path.dirname(__file__), relative, 'lib/common/porcupine_params.pv')
def pv_keyword_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _JETSON_MACHINES:
return 'jetson'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberry-pi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('Unsupported platform')
def pv_keyword_paths(relative):
keyword_files_dir = \
os.path.join(os.path.dirname(__file__), relative, 'resources/keyword_files', pv_keyword_files_subdir())
res = dict()
for x in os.listdir(keyword_files_dir):
res[x.rsplit('_')[0]] = os.path.join(keyword_files_dir, x)
return res
|
tests/importer/test_importer.py | pie-pie-kakaoent/uvicorn | 5,012 | 12737726 | import pytest
from uvicorn.importer import ImportFromStringError, import_from_string
def test_invalid_format() -> None:
with pytest.raises(ImportFromStringError) as exc_info:
import_from_string("example:")
expected = 'Import string "example:" must be in format "<module>:<attribute>".'
assert expected in str(exc_info.value)
def test_invalid_module() -> None:
with pytest.raises(ImportFromStringError) as exc_info:
import_from_string("module_does_not_exist:myattr")
expected = 'Could not import module "module_does_not_exist".'
assert expected in str(exc_info.value)
def test_invalid_attr() -> None:
with pytest.raises(ImportFromStringError) as exc_info:
import_from_string("tempfile:attr_does_not_exist")
expected = 'Attribute "attr_does_not_exist" not found in module "tempfile".'
assert expected in str(exc_info.value)
def test_internal_import_error() -> None:
with pytest.raises(ImportError):
import_from_string("tests.importer.raise_import_error:myattr")
def test_valid_import() -> None:
instance = import_from_string("tempfile:TemporaryFile")
from tempfile import TemporaryFile
assert instance == TemporaryFile
def test_no_import_needed() -> None:
from tempfile import TemporaryFile
instance = import_from_string(TemporaryFile)
assert instance == TemporaryFile
|
ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/aten/prim/broadcast_to_mapper.py | mindspore-ai/mindinsight | 216 | 12737738 | # Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
from mindconverter.graph_based_converter.common.utils import reset_template_and_exchange_msg
from mindconverter.graph_based_converter.constant import WeightType
from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper
class BroadcastToMapper(AtenToMindSporeMapper):
"""BroadcastTo mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "broadcast_to"
@staticmethod
def _convert_trained_weights(**kwargs):
weights = kwargs.get("weights", list())
args_name = ["input", "shape", "implicit"]
args_name_list = BroadcastToMapper.get_args_name_list(**kwargs, args_name=args_name)
trainable_params = dict()
for weight in weights:
trainable_params[args_name_list[weight.location]] = {"data": weight.value, "location": weight.location,
"type": WeightType.PARAMETER.value,
"onnx_name": weight.name}
return trainable_params
@staticmethod
def _generate_snippet_template(**kwargs):
template, exchange_msg, outputs_list, outputs_mapping = AtenToMindSporeMapper._generate_snippet_template(
**kwargs)
raw_params = kwargs.get("raw_params")
if not raw_params:
return template, exchange_msg, outputs_list, outputs_mapping
op = kwargs.get("operation")
trainable_params = kwargs.get("trainable_params", dict())
output_shape = raw_params.get("output_shape", tuple())
variable_slot = "var_0"
args_name = ["input", "shape", "implicit"]
inputs, args, group_inputs = BroadcastToMapper._params_parser(raw_params=raw_params,
args_name=args_name,
trainable_params=trainable_params)
args = BroadcastToMapper._get_args(variable_slot=variable_slot, inputs=inputs, args=args,
output_shape=output_shape)
init_template_list = [f"self.{{{variable_slot}}}_{arg_name} = {{{arg_name}}}" for arg_name in args]
parameters_declared = dict()
for name, trainable_param in trainable_params.copy().items():
value = trainable_param["data"]
if BroadcastToMapper.is_tensor(value):
variable_slot_param_name = f"{variable_slot}/{name}"
init_template_list.append(f"self.{{{variable_slot}}}_{name} = {{{variable_slot_param_name}}}")
parameters_declared[name] = ""
else:
args[name] = value.tolist()
init_template_list.append(f"self.{{{variable_slot}}}_{name} = {{{name}}}")
trainable_params.pop(name)
construct_template = f"opt_{{{variable_slot}}} = ms_np.{op}({inputs[0]}, ({', '.join(inputs[1:-1])}))"
template, exchange_msg = reset_template_and_exchange_msg(template, exchange_msg, variable_slot,
init_template_list, [construct_template], args,
trainable_params, parameters_declared, group_inputs)
return template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _get_args(**kwargs):
"""Get args from params_parser."""
variable_slot = kwargs.get("variable_slot")
inputs = kwargs.get("inputs", list())
args = kwargs.get("args", dict())
output_shape = kwargs.get("output_shape", tuple())
shape_name_list = [ipt.replace(f"self.{{{variable_slot}}}_", "") for ipt in inputs[1:-1]]
for idx, shape_name in enumerate(shape_name_list):
if isinstance(args.get(shape_name), int) and args.get(shape_name) == -1:
args[shape_name] = output_shape[idx]
args.pop("implicit")
return args
|
pyvista/utilities/geometric_objects.py | basnijholt/pyvista | 1,107 | 12737742 | """Provides an easy way of generating several geometric objects.
CONTAINS
--------
vtkArrowSource
vtkCylinderSource
vtkSphereSource
vtkPlaneSource
vtkLineSource
vtkCubeSource
vtkConeSource
vtkDiskSource
vtkRegularPolygonSource
vtkPyramid
vtkPlatonicSolidSource
vtkSuperquadricSource
as well as some pure-python helpers.
"""
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import check_valid_vector
NORMALS = {
'x': [1, 0, 0],
'y': [0, 1, 0],
'z': [0, 0, 1],
'-x': [-1, 0, 0],
'-y': [0, -1, 0],
'-z': [0, 0, -1],
}
def translate(surf, center=[0., 0., 0.], direction=[1., 0., 0.]):
"""Translate and orient a mesh to a new center and direction.
By default, the input mesh is considered centered at the origin
and facing in the x direction.
"""
normx = np.array(direction)/np.linalg.norm(direction)
normz = np.cross(normx, [0, 1.0, 0.0000001])
normz /= np.linalg.norm(normz)
normy = np.cross(normz, normx)
trans = np.zeros((4, 4))
trans[:3, 0] = normx
trans[:3, 1] = normy
trans[:3, 2] = normz
trans[3, 3] = 1
surf.transform(trans)
if not np.allclose(center, [0., 0., 0.]):
surf.points += np.array(center)
def Cylinder(center=(0.0, 0.0, 0.0), direction=(1.0, 0.0, 0.0),
radius=0.5, height=1.0, resolution=100, capping=True):
"""Create the surface of a cylinder.
See also :func:`pyvista.CylinderStructured`.
Parameters
----------
center : sequence, optional
Location of the centroid in ``[x, y, z]``.
direction : sequence, optional
Direction cylinder points to in ``[x, y, z]``.
radius : float, optional
Radius of the cylinder.
height : float, optional
Height of the cylinder.
resolution : int, optional
Number of points on the circular face of the cylinder.
capping : bool, optional
Cap cylinder ends with polygons. Default ``True``.
Returns
-------
pyvista.PolyData
Cylinder surface.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> cylinder = pyvista.Cylinder(center=[1, 2, 3], direction=[1, 1, 1],
... radius=1, height=2)
>>> cylinder.plot(show_edges=True, line_width=5, cpos='xy')
"""
cylinderSource = _vtk.vtkCylinderSource()
cylinderSource.SetRadius(radius)
cylinderSource.SetHeight(height)
cylinderSource.SetCapping(capping)
cylinderSource.SetResolution(resolution)
cylinderSource.Update()
surf = pyvista.wrap(cylinderSource.GetOutput())
surf.rotate_z(-90, inplace=True)
translate(surf, center, direction)
return surf
def CylinderStructured(radius=0.5, height=1.0,
center=(0.,0.,0.), direction=(1.,0.,0.),
theta_resolution=32, z_resolution=10):
"""Create a cylinder mesh as a :class:`pyvista.StructuredGrid`.
The end caps are left open. This can create a surface mesh if a single
value for the ``radius`` is given or a 3D mesh if multiple radii are given
as a list/array in the ``radius`` argument.
Parameters
----------
radius : float, sequence, optional
Radius of the cylinder. If a sequence, then describes the
radial coordinates of the cells as a range of values as
specified by the ``radius``.
height : float, optional
Height of the cylinder along its Z-axis.
center : sequence
Location of the centroid in ``[x, y, z]``.
direction : sequence
Direction cylinder Z-axis in ``[x, y, z]``.
theta_resolution : int, optional
Number of points on the circular face of the cylinder.
Ignored if ``radius`` is an iterable.
z_resolution : int, optional
Number of points along the height (Z-axis) of the cylinder.
Returns
-------
pyvista.StructuredGrid
Structured cylinder.
Examples
--------
Default structured cylinder
>>> import pyvista
>>> mesh = pyvista.CylinderStructured()
>>> mesh.plot(show_edges=True)
Structured cylinder with an inner radius of 1, outer of 2, with 5
segments.
>>> import numpy as np
>>> mesh = pyvista.CylinderStructured(radius=np.linspace(1, 2, 5))
>>> mesh.plot(show_edges=True)
"""
# Define grid in polar coordinates
r = np.array([radius]).ravel()
nr = len(r)
theta = np.linspace(0, 2*np.pi, num=theta_resolution)
radius_matrix, theta_matrix = np.meshgrid(r, theta)
# Transform to cartesian space
X = radius_matrix * np.cos(theta_matrix)
Y = radius_matrix * np.sin(theta_matrix)
# Make all the nodes in the grid
xx = np.array([X] * z_resolution).ravel()
yy = np.array([Y] * z_resolution).ravel()
dz = height / (z_resolution - 1)
zz = np.empty(yy.size)
zz = np.full((X.size, z_resolution), dz)
zz *= np.arange(z_resolution)
zz = zz.ravel(order='f')
# Create the grid
grid = pyvista.StructuredGrid()
grid.points = np.c_[xx, yy, zz]
grid.dimensions = [nr, theta_resolution, z_resolution]
# Orient properly in user direction
vx = np.array([0., 0., 1.])
if not np.allclose(vx, direction):
direction /= np.linalg.norm(direction)
vx -= vx.dot(direction) * direction
vx /= np.linalg.norm(vx)
vy = np.cross(direction, vx)
rmtx = np.array([vx, vy, direction])
grid.points = grid.points.dot(rmtx)
# Translate to given center
grid.points -= np.array(grid.center)
grid.points += np.array(center)
return grid
def Arrow(start=(0., 0., 0.), direction=(1., 0., 0.), tip_length=0.25,
tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,
shaft_resolution=20, scale=None):
"""Create an arrow.
Parameters
----------
start : iterable, optional
Start location in ``[x, y, z]``.
direction : iterable, optional
Direction the arrow points to in ``[x, y, z]``.
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
tip_resolution : int, optional
Number of faces around the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft.
scale : float or str, optional
Scale factor of the entire object, default is ``None``
(i.e. scale of 1). ``'auto'`` scales to length of direction
array.
Returns
-------
pyvista.PolyData
Arrow mesh.
Examples
--------
Plot a default arrow.
>>> import pyvista
>>> mesh = pyvista.Arrow()
>>> mesh.plot(show_edges=True)
"""
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetTipResolution(tip_resolution)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = pyvista.wrap(arrow.GetOutput())
if scale == 'auto':
scale = float(np.linalg.norm(direction))
if isinstance(scale, float) or isinstance(scale, int):
surf.points *= scale
elif scale is not None:
raise TypeError("Scale must be either float, int or 'auto'.")
translate(surf, start, direction)
return surf
def Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30,
phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180):
"""Create a vtk Sphere.
Parameters
----------
radius : float, optional
Sphere radius.
center : np.ndarray or list, optional
Center in ``[x, y, z]``.
direction : list or tuple or np.ndarray, optional
Direction the top of the sphere points to in ``[x, y, z]``.
theta_resolution : int , optional
Set the number of points in the longitude direction (ranging
from ``start_theta`` to ``end_theta``).
phi_resolution : int, optional
Set the number of points in the latitude direction (ranging from
``start_phi`` to ``end_phi``).
start_theta : float, optional
Starting longitude angle.
end_theta : float, optional
Ending longitude angle.
start_phi : float, optional
Starting latitude angle.
end_phi : float, optional
Ending latitude angle.
Returns
-------
pyvista.PolyData
Sphere mesh.
Examples
--------
Create a sphere using default parameters.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.plot(show_edges=True)
Create a quarter sphere by setting ``end_theta``.
>>> sphere = pyvista.Sphere(end_theta=90)
>>> out = sphere.plot(show_edges=True)
"""
sphere = _vtk.vtkSphereSource()
sphere.SetRadius(radius)
sphere.SetThetaResolution(theta_resolution)
sphere.SetPhiResolution(phi_resolution)
sphere.SetStartTheta(start_theta)
sphere.SetEndTheta(end_theta)
sphere.SetStartPhi(start_phi)
sphere.SetEndPhi(end_phi)
sphere.Update()
surf = pyvista.wrap(sphere.GetOutput())
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""Create a plane.
Parameters
----------
center : list or tuple or np.ndarray
Location of the centroid in ``[x, y, z]``.
direction : list or tuple or np.ndarray
Direction of the plane's normal in ``[x, y, z]``.
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the j direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
pyvista.PolyData
Plane mesh.
Examples
--------
Create a default plane.
>>> import pyvista
>>> mesh = pyvista.Plane()
>>> mesh.point_data.clear()
>>> mesh.plot(show_edges=True)
"""
planeSource = _vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = pyvista.wrap(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1):
"""Create a line.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide line into.
Returns
-------
pyvista.PolyData
Line mesh.
Examples
--------
Create a line between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Line((0, 0, 0), (0, 0, 1))
>>> mesh.plot(color='k', line_width=10)
"""
if resolution <= 0:
raise ValueError('Resolution must be positive')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
src = _vtk.vtkLineSource()
src.SetPoint1(*pointa)
src.SetPoint2(*pointb)
src.SetResolution(resolution)
src.Update()
line = pyvista.wrap(src.GetOutput())
# Compute distance of every point along line
compute = lambda p0, p1: np.sqrt(np.sum((p1 - p0)**2, axis=1))
distance = compute(np.array(pointa), line.points)
line['Distance'] = distance
return line
def Tube(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1, radius=1.0, n_sides=15):
"""Create a tube.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide tube into.
radius : float, optional
Minimum tube radius (minimum because the tube radius may vary).
n_sides : int, optional
Number of sides for the tube.
Returns
-------
pyvista.PolyData
Tube mesh.
Examples
--------
Create a tube between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Tube((0, 0, 0), (0, 0, 1))
>>> mesh.plot()
"""
if resolution <= 0:
raise ValueError('Resolution must be positive.')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
line_src = _vtk.vtkLineSource()
line_src.SetPoint1(*pointa)
line_src.SetPoint2(*pointb)
line_src.SetResolution(resolution)
line_src.Update()
if n_sides < 3:
raise ValueError('Number of sides `n_sides` must be >= 3')
tube_filter = _vtk.vtkTubeFilter()
tube_filter.SetInputConnection(line_src.GetOutputPort())
tube_filter.SetRadius(radius)
tube_filter.SetNumberOfSides(n_sides)
tube_filter.Update()
return pyvista.wrap(tube_filter.GetOutput())
def Cube(center=(0.0, 0.0, 0.0), x_length=1.0, y_length=1.0,
z_length=1.0, bounds=None, clean=True):
"""Create a cube.
It's possible to specify either the center and side lengths or
just the bounds of the cube. If ``bounds`` are given, all other
arguments are ignored.
.. versionchanged:: 0.33.0
The cube is created using ``vtk.vtkCubeSource``. For
compatibility with :func:`pyvista.PlatonicSolid`, face indices
are also added as cell data. For full compatibility with
:func:`PlatonicSolid() <pyvista.PlatonicSolid>`, one has to
use ``x_length = y_length = z_length = 2 * radius / 3**0.5``.
The cube points are also cleaned by default now, leaving only
the 8 corners and a watertight (manifold) mesh.
Parameters
----------
center : sequence, optional
Center in ``[x, y, z]``.
x_length : float, optional
Length of the cube in the x-direction.
y_length : float, optional
Length of the cube in the y-direction.
z_length : float, optional
Length of the cube in the z-direction.
bounds : sequence, optional
Specify the bounding box of the cube. If given, all other size
arguments are ignored. ``(xMin, xMax, yMin, yMax, zMin, zMax)``.
clean : bool, optional
Whether to clean the raw points of the mesh, making the cube
manifold. Note that this will degrade the texture coordinates
that come with the mesh, so if you plan to map a texture on
the cube, consider setting this to ``False``.
.. versionadded:: 0.33.0
Returns
-------
pyvista.PolyData
Mesh of the cube.
Examples
--------
Create a default cube.
>>> import pyvista
>>> mesh = pyvista.Cube()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkCubeSource()
if bounds is not None:
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src.SetBounds(bounds)
else:
src.SetCenter(center)
src.SetXLength(x_length)
src.SetYLength(y_length)
src.SetZLength(z_length)
src.Update()
cube = pyvista.wrap(src.GetOutput())
# add face index data for compatibility with PlatonicSolid
# but make it inactive for backwards compatibility
cube.cell_data.set_array([1, 4, 0, 3, 5, 2],['FaceIndex'])
# clean duplicate points
if clean:
cube.clean(inplace=True)
return cube
def Box(bounds=(-1., 1., -1., 1., -1., 1.), level=0, quads=True):
"""Create a box with solid faces for the given bounds.
Parameters
----------
bounds : iterable, optional
Specify the bounding box of the cube.
``(xMin, xMax, yMin, yMax, zMin, zMax)``.
level : int, optional
Level of subdivision of the faces.
quads : bool, optional
Flag to tell the source to generate either a quad or two
triangle for a set of four points. Default ``True``.
Returns
-------
pyvista.PolyData
Mesh of the box.
Examples
--------
Create a box with subdivision ``level=2``.
>>> import pyvista
>>> mesh = pyvista.Box(level=2)
>>> mesh.plot(show_edges=True)
"""
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src = _vtk.vtkTessellatedBoxSource()
src.SetLevel(level)
if quads:
src.QuadsOn()
else:
src.QuadsOff()
src.SetBounds(bounds)
src.Update()
return pyvista.wrap(src.GetOutput())
def Cone(center=(0., 0., 0.), direction=(1., 0., 0.), height=1.0, radius=None,
capping=True, angle=None, resolution=6):
"""Create a cone.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Axis of the cone passes through this
point.
direction : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the
cone.
height : float, optional
Height along the cone in its specified direction.
radius : float, optional
Base radius of the cone.
capping : bool, optional
Enable or disable the capping the base of the cone with a
polygon.
angle : float, optional
The angle in degrees between the axis of the cone and a
generatrix.
resolution : int, optional
Number of facets used to represent the cone.
Returns
-------
pyvista.PolyData
Cone mesh.
Examples
--------
Create a default Cone.
>>> import pyvista
>>> mesh = pyvista.Cone()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkConeSource()
src.SetCapping(capping)
src.SetDirection(direction)
src.SetCenter(center)
src.SetHeight(height)
if angle and radius:
raise ValueError("Both radius and angle specified. They are mutually exclusive.")
elif angle and not radius:
src.SetAngle(angle)
elif not angle and radius:
src.SetRadius(radius)
elif not angle and not radius:
src.SetRadius(0.5)
src.SetResolution(resolution)
src.Update()
return pyvista.wrap(src.GetOutput())
def Polygon(center=(0., 0., 0.), radius=1, normal=(0, 0, 1), n_sides=6):
"""Create a polygon.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Central axis of the polygon passes
through this point.
radius : float, optional
The radius of the polygon.
normal : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the polygon.
n_sides : int, optional
Number of sides of the polygon.
Returns
-------
pyvista.PolyData
Mesh of the polygon.
Examples
--------
Create an 8 sided polygon.
>>> import pyvista
>>> mesh = pyvista.Polygon(n_sides=8)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkRegularPolygonSource()
src.SetCenter(center)
src.SetNumberOfSides(n_sides)
src.SetRadius(radius)
src.SetNormal(normal)
src.Update()
return pyvista.wrap(src.GetOutput())
def Disc(center=(0., 0., 0.), inner=0.25, outer=0.5, normal=(0, 0, 1), r_res=1,
c_res=6):
"""Create a polygonal disk with a hole in the center.
The disk has zero height. The user can specify the inner and outer
radius of the disk, and the radial and circumferential resolution
of the polygonal representation.
Parameters
----------
center : iterable
Center in ``[x, y, z]``. Middle of the axis of the disc.
inner : float, optional
The inner radius.
outer : float, optional
The outer radius.
normal : iterable
Direction vector in ``[x, y, z]``. Orientation vector of the disc.
r_res : int, optional
Number of points in radial direction.
c_res : int, optional
Number of points in circumferential direction.
Returns
-------
pyvista.PolyData
Disk mesh.
Examples
--------
Create a disc with 50 points in the circumferential direction.
>>> import pyvista
>>> mesh = pyvista.Disc(c_res=50)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkDiskSource()
src.SetInnerRadius(inner)
src.SetOuterRadius(outer)
src.SetRadialResolution(r_res)
src.SetCircumferentialResolution(c_res)
src.Update()
normal = np.array(normal)
center = np.array(center)
surf = pyvista.wrap(src.GetOutput())
surf.rotate_y(90, inplace=True)
translate(surf, center, normal)
return surf
def Text3D(string, depth=0.5):
"""Create 3D text from a string.
Parameters
----------
string : str
String to generate 3D text from.
depth : float, optional
Depth of the text. Defaults to ``0.5``.
Returns
-------
pyvista.PolyData
3D text mesh.
Examples
--------
>>> import pyvista
>>> text_mesh = pyvista.Text3D('PyVista')
>>> text_mesh.plot(cpos='xy')
"""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput())
def Wavelet(extent=(-10, 10, -10, 10, -10, 10), center=(0, 0, 0), maximum=255,
x_freq=60, y_freq=30, z_freq=40, x_mag=10, y_mag=18, z_mag=5,
std=0.5, subsample_rate=1):
"""Create a wavelet.
Produces images with pixel values determined by
``Maximum*Gaussian*x_mag*sin(x_freq*x)*sin(y_freq*y)*cos(z_freq*z)``
Values are float scalars on point data with name ``"RTData"``.
Parameters
----------
extent : sequence, optional
Set/Get the extent of the whole output image. Default
``(-10, 10, -10, 10, -10, 10)``.
center : list, optional
Center of the wavelet.
maximum : float, optional
Maximum of the wavelet function.
x_freq : float, optional
Natural frequency in the x direction.
y_freq : float, optional
Natural frequency in the y direction.
z_freq : float, optional
Natural frequency in the z direction.
x_mag : float, optional
Magnitude in the x direction.
y_mag : float, optional
Magnitude in the y direction.
z_mag : float, optional
Magnitude in the z direction.
std : float, optional
Standard deviation.
subsample_rate : int, optional
The sub-sample rate.
Returns
-------
pyvista.PolyData
Wavelet mesh.
Examples
--------
>>> import pyvista
>>> wavelet = pyvista.Wavelet(extent=(0, 50, 0, 50, 0, 10), x_freq=20,
... y_freq=10, z_freq=1, x_mag=100, y_mag=100,
... z_mag=1000)
>>> wavelet.plot(show_scalar_bar=False)
Extract lower valued cells of the wavelet and create a surface from it.
>>> thresh = wavelet.threshold(800).extract_surface()
>>> thresh.plot(show_scalar_bar=False)
Smooth it to create "waves"
>>> waves = thresh.smooth(n_iter=100, relaxation_factor=0.1)
>>> waves.plot(color='white', smooth_shading=True, show_edges=True)
"""
wavelet_source = _vtk.vtkRTAnalyticSource()
wavelet_source.SetWholeExtent(*extent)
wavelet_source.SetCenter(center)
wavelet_source.SetMaximum(maximum)
wavelet_source.SetXFreq(x_freq)
wavelet_source.SetYFreq(y_freq)
wavelet_source.SetZFreq(z_freq)
wavelet_source.SetXMag(x_mag)
wavelet_source.SetYMag(y_mag)
wavelet_source.SetZMag(z_mag)
wavelet_source.SetStandardDeviation(std)
wavelet_source.SetSubsampleRate(subsample_rate)
wavelet_source.Update()
return pyvista.wrap(wavelet_source.GetOutput())
def CircularArc(pointa, pointb, center, resolution=100, negative=False):
"""Create a circular arc defined by two endpoints and a center.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
pointa : sequence
Position of the first end point.
pointb : sequence
Position of the other end point.
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
negative : bool, optional
By default the arc spans the shortest angular sector between
``pointa`` and ``pointb``.
By setting this to ``True``, the longest angular sector is
used instead (i.e. the negative coterminal angle to the
shortest one).
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Create a quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0])
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(pointa, 'pointa')
check_valid_vector(pointb, 'pointb')
check_valid_vector(center, 'center')
if not np.isclose(
np.linalg.norm(np.array(pointa) - np.array(center)),
np.linalg.norm(np.array(pointb) - np.array(center)),
):
raise ValueError("pointa and pointb are not equidistant from center")
# fix half-arc bug: if a half arc travels directly through the
# center point, it becomes a line
pointb = list(pointb)
pointb[0] -= 1E-10
pointb[1] -= 1E-10
arc = _vtk.vtkArcSource()
arc.SetPoint1(*pointa)
arc.SetPoint2(*pointb)
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.SetNegative(negative)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center).ravel()
radius = np.sqrt(np.sum((arc.points[0]-center)**2, axis=0))
angles = np.arange(0.0, 1.0 + 1.0/resolution, 1.0/resolution) * angle
arc['Distance'] = radius * angles
return arc
def CircularArcFromNormal(center, resolution=100, normal=None,
polar=None, angle=None):
"""Create a circular arc defined by normal to the plane of the arc, and an angle.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
normal : sequence, optional
The normal vector to the plane of the arc. By default it
points in the positive Z direction.
polar : sequence, optional
Starting point of the arc in polar coordinates. By default it
is the unit vector in the positive x direction.
angle : float, optional
Arc length (in degrees) beginning at the polar vector. The
direction is counterclockwise. By default it is 90.
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> normal = [0, 0, 1]
>>> polar = [-1, 0, 0]
>>> arc = pyvista.CircularArcFromNormal([0, 0, 0], normal=normal, polar=polar)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(center, 'center')
if normal is None:
normal = [0, 0, 1]
if polar is None:
polar = [1, 0, 0]
if angle is None:
angle = 90.0
arc = _vtk.vtkArcSource()
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.UseNormalAndAngleOn()
check_valid_vector(normal, 'normal')
arc.SetNormal(*normal)
check_valid_vector(polar, 'polar')
arc.SetPolarVector(*polar)
arc.SetAngle(angle)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center)
radius = np.sqrt(np.sum((arc.points[0] - center)**2, axis=0))
angles = np.linspace(0.0, angle, resolution+1)
arc['Distance'] = radius * angles
return arc
def Pyramid(points=None):
"""Create a pyramid defined by 5 points.
Parameters
----------
points : sequence, optional
Points of the pyramid. Points are ordered such that the first
four points are the four counterclockwise points on the
quadrilateral face, and the last point is the apex.
Defaults to pyramid in example.
Returns
-------
pyvista.UnstructuredGrid
Unstructured grid containing a single pyramid cell.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 1.0, 0.0]
>>> pointb = [-1.0, 1.0, 0.0]
>>> pointc = [-1.0, -1.0, 0.0]
>>> pointd = [1.0, -1.0, 0.0]
>>> pointe = [0.0, 0.0, 1.608]
>>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe])
>>> pyramid.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 1.0, 0.0],
[-1.0, 1.0, 0.0],
[-1.0, -1.0, 0.0],
[1.0, -1.0, 0.0],
[0.0, 0.0, (4 - 2**0.5)**0.5]]
if len(points) != 5:
raise TypeError('Points must be given as length 5 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
check_valid_vector(points[4], 'points[4]')
pyramid = _vtk.vtkPyramid()
pyramid.GetPointIds().SetId(0, 0)
pyramid.GetPointIds().SetId(1, 1)
pyramid.GetPointIds().SetId(2, 2)
pyramid.GetPointIds().SetId(3, 3)
pyramid.GetPointIds().SetId(4, 4)
ug = _vtk.vtkUnstructuredGrid()
ug.SetPoints(pyvista.vtk_points(np.array(points), False))
ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())
return pyvista.wrap(ug)
def Triangle(points=None):
"""Create a triangle defined by 3 points.
Parameters
----------
points : sequence, optional
Points of the triangle. Defaults to a right isosceles
triangle (see example).
Returns
-------
pyvista.PolyData
Triangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [0, 0, 0]
>>> pointb = [1, 0, 0]
>>> pointc = [0.5, 0.707, 0]
>>> triangle = pyvista.Triangle([pointa, pointb, pointc])
>>> triangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[0, 0, 0], [1, 0, 0], [0.5, 0.5**0.5, 0]]
if len(points) != 3:
raise TypeError('Points must be given as length 3 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
cells = np.array([[3, 0, 1, 2]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Rectangle(points=None):
"""Create a rectangle defined by 4 points.
Parameters
----------
points : sequence, optional
Points of the rectangle. Defaults to a simple example.
Returns
-------
pyvista.PolyData
Rectangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 0.0, 0.0]
>>> pointb = [1.0, 1.0, 0.0]
>>> pointc = [0.0, 1.0, 0.0]
>>> pointd = [0.0, 0.0, 0.0]
>>> rectangle = pyvista.Rectangle([pointa, pointb, pointc, pointd])
>>> rectangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]
if len(points) != 4:
raise TypeError('Points must be given as length 4 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
cells = np.array([[4, 0, 1, 2, 3]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Circle(radius=0.5, resolution=100):
"""Create a single PolyData circle defined by radius in the XY plane.
Parameters
----------
radius : float, optional
Radius of circle.
resolution : int, optional
Number of points on the circle.
Returns
-------
pyvista.PolyData
Circle mesh.
Examples
--------
>>> import pyvista
>>> radius = 0.5
>>> circle = pyvista.Circle(radius)
>>> circle.plot(show_edges=True, line_width=5)
"""
points = np.zeros((resolution, 3))
theta = np.linspace(0.0, 2.0*np.pi, resolution)
points[:, 0] = radius * np.cos(theta)
points[:, 1] = radius * np.sin(theta)
cells = np.array([np.append(np.array([resolution]), np.arange(resolution))])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Superquadric(center=(0., 0., 0.), scale=(1., 1., 1.), size=0.5,
theta_roundness=1., phi_roundness=1.,
theta_resolution=16, phi_resolution=16,
toroidal=False, thickness=1/3):
"""Create a superquadric.
Parameters
----------
center : iterable, optional
Center of the superquadric in ``[x, y, z]``.
scale : iterable, optional
Scale factors of the superquadric in ``[x, y, z]``.
size : float, optional
Superquadric isotropic size.
theta_roundness : float, optional
Superquadric east/west roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
phi_roundness : float, optional
Superquadric north/south roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
theta_resolution : int, optional
Number of points in the longitude direction.
Values are rounded to nearest multiple of 4.
phi_resolution : int, optional
Number of points in the latitude direction.
Values are rounded to nearest multiple of 8.
toroidal : bool, optional
Whether or not the superquadric is toroidal (``True``)
or ellipsoidal (``False``).
thickness : float, optional
Superquadric ring thickness.
Only applies if toroidal is set to ``True``.
Returns
-------
pyvista.PolyData
Superquadric mesh.
See Also
--------
pyvista.ParametricSuperEllipsoid :
Parametric superquadric if toroidal is ``False``.
pyvista.ParametricSuperToroid :
Parametric superquadric if toroidal is ``True``.
Examples
--------
>>> import pyvista
>>> superquadric = pyvista.Superquadric(scale=(3., 1., 0.5),
... phi_roundness=0.1,
... theta_roundness=0.5)
>>> superquadric.plot(show_edges=True)
"""
superquadricSource = _vtk.vtkSuperquadricSource()
superquadricSource.SetCenter(center)
superquadricSource.SetScale(scale)
superquadricSource.SetSize(size)
superquadricSource.SetThetaRoundness(theta_roundness)
superquadricSource.SetPhiRoundness(phi_roundness)
superquadricSource.SetThetaResolution(round(theta_resolution/4)*4)
superquadricSource.SetPhiResolution(round(phi_resolution/8)*8)
superquadricSource.SetToroidal(toroidal)
superquadricSource.SetThickness(thickness)
superquadricSource.Update()
return pyvista.wrap(superquadricSource.GetOutput())
def PlatonicSolid(kind='tetrahedron', radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a Platonic solid of a given size.
Parameters
----------
kind : str or int, optional
The kind of Platonic solid to create. Either the name of the
polyhedron or an integer index:
* ``'tetrahedron'`` or ``0``
* ``'cube'`` or ``1``
* ``'octahedron'`` or ``2``
* ``'icosahedron'`` or ``3``
* ``'dodecahedron'`` or ``4``
radius : float, optional
The radius of the circumscribed sphere for the solid to create.
center : sequence, optional
Three-length sequence defining the center of the solid to create.
Returns
-------
pyvista.PolyData
One of the five Platonic solids. Cell scalars are defined that
assign integer labels to each face (with array name
``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> dodeca = pyvista.PlatonicSolid('dodecahedron')
>>> dodeca.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
kinds = {
'tetrahedron': 0,
'cube': 1,
'octahedron': 2,
'icosahedron': 3,
'dodecahedron': 4,
}
if isinstance(kind, str):
if kind not in kinds:
raise ValueError(f'Invalid Platonic solid kind "{kind}".')
kind = kinds[kind]
elif isinstance(kind, int) and kind not in range(5):
raise ValueError(f'Invalid Platonic solid index "{kind}".')
elif not isinstance(kind, int):
raise ValueError('Invalid Platonic solid index type '
f'"{type(kind).__name__}".')
check_valid_vector(center, 'center')
solid = _vtk.vtkPlatonicSolidSource()
solid.SetSolidType(kind)
solid.Update()
solid = pyvista.wrap(solid.GetOutput())
solid.scale(radius, inplace=True)
solid.points += np.asanyarray(center) - solid.center
# rename and activate cell scalars
cell_data = solid.get_array(0)
solid.clear_data()
solid.cell_data['FaceIndex'] = cell_data
return solid
def Tetrahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a tetrahedron of a given size.
A tetrahedron is composed of four congruent equilateral triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the tetrahedron.
center : sequence, optional
Three-length sequence defining the center of the tetrahedron.
Returns
-------
pyvista.PolyData
Mesh for the tetrahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a tetrahedron.
>>> import pyvista
>>> tetra = pyvista.Tetrahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='tetrahedron', radius=radius, center=center)
def Octahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an octahedron of a given size.
An octahedron is composed of eight congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the octahedron.
center : sequence, optional
Three-length sequence defining the center of the octahedron.
Returns
-------
pyvista.PolyData
Mesh for the octahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an octahedron.
>>> import pyvista
>>> tetra = pyvista.Octahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='octahedron', radius=radius, center=center)
def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a dodecahedron of a given size.
A dodecahedron is composed of twelve congruent regular pentagons.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the dodecahedron.
center : sequence, optional
Three-length sequence defining the center of the dodecahedron.
Returns
-------
pyvista.PolyData
Mesh for the dodecahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> tetra = pyvista.Dodecahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)
def Icosahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an icosahedron of a given size.
An icosahedron is composed of twenty congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the icosahedron.
center : sequence, optional
Three-length sequence defining the center of the icosahedron.
Returns
-------
pyvista.PolyData
Mesh for the icosahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an icosahedron.
>>> import pyvista
>>> tetra = pyvista.Icosahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='icosahedron', radius=radius, center=center)
|
src/titiler/application/titiler/application/settings.py | NLTGit/titiler | 288 | 12737757 | <filename>src/titiler/application/titiler/application/settings.py<gh_stars>100-1000
"""Titiler API settings."""
import pydantic
class ApiSettings(pydantic.BaseSettings):
"""FASTAPI application settings."""
name: str = "titiler"
cors_origins: str = "*"
cachecontrol: str = "public, max-age=3600"
root_path: str = ""
debug: bool = False
disable_cog: bool = False
disable_stac: bool = False
disable_mosaic: bool = False
lower_case_query_parameters: bool = False
@pydantic.validator("cors_origins")
def parse_cors_origin(cls, v):
"""Parse CORS origins."""
return [origin.strip() for origin in v.split(",")]
class Config:
"""model config"""
env_file = ".env"
env_prefix = "TITILER_API_"
|
tools/telemetry/telemetry/core/backends/webdriver/webdriver_desktop_browser_finder.py | iplo/Chain | 231 | 12737770 | <filename>tools/telemetry/telemetry/core/backends/webdriver/webdriver_desktop_browser_finder.py<gh_stars>100-1000
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop browsers that can be controlled by telemetry."""
import logging
import os
import sys
from telemetry.core import browser
from telemetry.core import possible_browser
from telemetry.core import util
from telemetry.core.backends.webdriver import webdriver_ie_backend
from telemetry.core.platform import factory
from telemetry.page import cloud_storage
# Try to import the selenium python lib which may be not available.
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webdriver', 'pylib')
try:
from selenium import webdriver # pylint: disable=F0401
except ImportError:
webdriver = None
ALL_BROWSER_TYPES = []
if webdriver:
ALL_BROWSER_TYPES = [
'internet-explorer',
'internet-explorer-x64']
else:
logging.warning('Webdriver backend is unsupported without selenium pylib. '
'For installation of selenium pylib, please refer to '
'https://code.google.com/p/selenium/wiki/PythonBindings.')
class PossibleWebDriverBrowser(possible_browser.PossibleBrowser):
"""A browser that can be controlled through webdriver API."""
def __init__(self, browser_type, finder_options):
target_os = sys.platform.lower()
super(PossibleWebDriverBrowser, self).__init__(browser_type, target_os,
finder_options)
assert browser_type in ALL_BROWSER_TYPES, \
'Please add %s to ALL_BROWSER_TYPES' % browser_type
@property
def _platform_backend(self):
return factory.GetPlatformBackendForCurrentOS()
def CreateWebDriverBackend(self, platform_backend):
raise NotImplementedError()
def Create(self):
backend = self.CreateWebDriverBackend(self._platform_backend)
return browser.Browser(backend, self._platform_backend)
def SupportsOptions(self, finder_options):
if len(finder_options.extensions_to_load) != 0:
return False
return True
def UpdateExecutableIfNeeded(self):
pass
@property
def last_modification_time(self):
return -1
class PossibleDesktopIE(PossibleWebDriverBrowser):
def __init__(self, browser_type, finder_options, architecture):
super(PossibleDesktopIE, self).__init__(browser_type, finder_options)
self._architecture = architecture
def CreateWebDriverBackend(self, platform_backend):
assert webdriver
def DriverCreator():
ie_driver_exe = os.path.join(util.GetTelemetryDir(), 'bin',
'IEDriverServer_%s.exe' % self._architecture)
cloud_storage.GetIfChanged(ie_driver_exe, cloud_storage.PUBLIC_BUCKET)
return webdriver.Ie(executable_path=ie_driver_exe)
return webdriver_ie_backend.WebDriverIEBackend(
platform_backend, DriverCreator, self.finder_options.browser_options)
def SelectDefaultBrowser(_):
return None
def FindAllAvailableBrowsers(finder_options):
"""Finds all the desktop browsers available on this machine."""
browsers = []
if not webdriver:
return browsers
# Look for the IE browser in the standard location.
if sys.platform.startswith('win'):
ie_path = os.path.join('Internet Explorer', 'iexplore.exe')
win_search_paths = {
'32' : { 'path' : os.getenv('PROGRAMFILES(X86)'),
'type' : 'internet-explorer'},
'64' : { 'path' : os.getenv('PROGRAMFILES'),
'type' : 'internet-explorer-x64'}}
for architecture, ie_info in win_search_paths.iteritems():
if not ie_info['path']:
continue
if os.path.exists(os.path.join(ie_info['path'], ie_path)):
browsers.append(
PossibleDesktopIE(ie_info['type'], finder_options, architecture))
return browsers
|
src/zc/buildout/tests/test_increment.py | digitalsatori/buildout | 426 | 12737779 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2020 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import re
from zope.testing import renormalizing
import zc.buildout.testing
from zc.buildout.tests import easy_install_SetUp
from zc.buildout.tests import normalize_bang
def default_cfg():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [debug]
... dec = 1
... 2
... inc = 1
... ''')
>>> write('buildout.cfg', '''
... [buildout]
...
... [debug]
... dec -= 2
... inc += 2
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate debug', env=env), end='')
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[debug]
dec= 1
/home/.buildout/default.cfg
-= buildout.cfg
inc= 1
2
/home/.buildout/default.cfg
+= buildout.cfg
"""
def default_cfg_extensions():
r"""
Add two extensions as develop eggs
>>> mkdir('demo')
>>> write('demo', 'demo.py', '''
... import sys
... def ext(buildout):
... sys.stdout.write('demo %s %s\\n' % ('ext', sorted(buildout)))
... def unload(buildout):
... sys.stdout.write('demo %s %s\\n' % ('unload', sorted(buildout)))
... ''')
>>> write('demo', 'setup.py', '''
... from setuptools import setup
...
... setup(
... name = "demo",
... entry_points = {
... 'zc.buildout.extension': ['ext = demo:ext'],
... 'zc.buildout.unloadextension': ['ext = demo:unload'],
... },
... )
... ''')
>>> mkdir('demo2')
>>> write('demo2', 'demo2.py', '''
... import sys
... def ext(buildout):
... sys.stdout.write('demo2 %s %s\\n' % ('ext', sorted(buildout)))
... def unload(buildout):
... sys.stdout.write('demo2 %s %s\\n' % ('unload', sorted(buildout)))
... ''')
>>> write('demo2', 'setup.py', '''
... from setuptools import setup
...
... setup(
... name = "demo2",
... entry_points = {
... 'zc.buildout.extension': ['ext = demo2:ext'],
... 'zc.buildout.unloadextension': ['ext = demo2:unload'],
... },
... )
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... develop = demo demo2
... parts =
... ''')
Run buildout once without extensions to actually develop the eggs.
(Develop happens after loading extensions.)
>>> print_(system(buildout), end='')
Develop: '/sample-buildout/demo'
Develop: '/sample-buildout/demo2'
>>> ls("develop-eggs")
- demo.egg-link
- demo2.egg-link
- zc.recipe.egg.egg-link
extensions in .buildout/default.cfg
incremented in buildout.cfg
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... develop = demo demo2
... extensions += demo2
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= buildout.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_base():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= base.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_base2():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('base2.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... base2.cfg
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= base2.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_base2_and_base3():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('base2.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('base3.cfg', '''
... [buildout]
... extensions += demo3
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... base2.cfg
... base3.cfg
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
demo3
/home/.buildout/default.cfg
+= base2.cfg
+= base3.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_buildout():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... extensions += demo2
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= buildout.cfg
...
versions= versions
DEFAULT_VALUE
"""
def with_extends_increment_in_buildout_with_base_and_root():
r"""
>>> home = tmpdir('home')
>>> mkdir(home, '.buildout')
>>> default_cfg = join(home, '.buildout', 'default.cfg')
>>> write(default_cfg, '''
... [buildout]
... extensions = demo
... ''')
>>> write('root.cfg', '''
... [buildout]
... ''')
>>> write('base.cfg', '''
... [buildout]
... extends = root.cfg
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... extensions += demo2
... parts =
... ''')
>>> env = dict(HOME=home, USERPROFILE=home)
>>> print_(system(buildout+' annotate buildout', env=env), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions= demo
demo2
/home/.buildout/default.cfg
+= buildout.cfg
...
versions= versions
DEFAULT_VALUE
"""
def no_default_with_extends_increment_in_base2_and_base3():
r"""
>>> write('base.cfg', '''
... [buildout]
... ''')
>>> write('base2.cfg', '''
... [buildout]
... extensions += demo2
... ''')
>>> write('base3.cfg', '''
... [buildout]
... extensions += demo3
... ''')
>>> write('buildout.cfg', '''
... [buildout]
... extends = base.cfg
... base2.cfg
... base3.cfg
... parts =
... ''')
>>> print_(system(buildout+' annotate buildout'), end='')
... # doctest: +ELLIPSIS
<BLANKLINE>
Annotated sections
==================
<BLANKLINE>
[buildout]
...
extensions=
demo2
demo3
IMPLICIT_VALUE
+= base2.cfg
+= base3.cfg
...
versions= versions
DEFAULT_VALUE
"""
def test_suite():
return doctest.DocTestSuite(
setUp=easy_install_SetUp,
tearDown=zc.buildout.testing.buildoutTearDown,
checker=renormalizing.RENormalizing([
zc.buildout.testing.normalize_path,
zc.buildout.testing.normalize_endings,
zc.buildout.testing.normalize_script,
zc.buildout.testing.normalize_egg_py,
zc.buildout.testing.normalize___pycache__,
zc.buildout.testing.not_found,
zc.buildout.testing.normalize_exception_type_for_python_2_and_3,
zc.buildout.testing.adding_find_link,
zc.buildout.testing.python27_warning,
zc.buildout.testing.python27_warning_2,
zc.buildout.testing.easyinstall_deprecated,
zc.buildout.testing.setuptools_deprecated,
zc.buildout.testing.pkg_resources_deprecated,
zc.buildout.testing.warnings_warn,
normalize_bang,
(re.compile(r'^(\w+\.)*(Missing\w+: )'), '\2'),
(re.compile(r"buildout: Running \S*setup.py"),
'buildout: Running setup.py'),
(re.compile(r'pip-\S+-'),
'pip.egg'),
(re.compile(r'setuptools-\S+-'),
'setuptools.egg'),
(re.compile(r'zc.buildout-\S+-'),
'zc.buildout.egg'),
(re.compile(r'pip = \S+'), 'pip = 20.0.0'),
(re.compile(r'setuptools = \S+'), 'setuptools = 0.7.99'),
(re.compile(r'File "\S+one.py"'),
'File "one.py"'),
(re.compile(r'We have a develop egg: (\S+) (\S+)'),
r'We have a develop egg: \1 V'),
(re.compile(r'Picked: setuptools = \S+'),
'Picked: setuptools = V'),
(re.compile('[-d] pip'), '- pip'),
(re.compile('[-d] setuptools'), '- setuptools'),
(re.compile(r'\\[\\]?'), '/'),
(re.compile(
'-q develop -mxN -d "/sample-buildout/develop-eggs'),
'-q develop -mxN -d /sample-buildout/develop-eggs'
),
(re.compile(r'^[*]...'), '...'),
# for
# bug_92891
# bootstrap_crashes_with_egg_recipe_in_buildout_section
(re.compile(r"Unused options for buildout: 'eggs' 'scripts'\."),
"Unused options for buildout: 'scripts' 'eggs'."),
# Python 3.4 changed the wording of NameErrors
(re.compile('NameError: global name'), 'NameError: name'),
# fix for test_distutils_scripts_using_import_are_properly_parsed
# and test_distutils_scripts_using_from_are_properly_parsed
# win32 apparently adds a " around sys.executable
(re.compile('#!"python"'), '#!python'),
]),
)
|
env/lib/python3.6/site-packages/simple_salesforce/bulk.py | anthowen/duplify | 5,079 | 12737781 | """ Classes for interacting with Salesforce Bulk API """
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
import json
import requests
from time import sleep
from simple_salesforce.util import call_salesforce
class SFBulkHandler(object):
""" Bulk API request handler
Intermediate class which allows us to use commands,
such as 'sf.bulk.Contacts.create(...)'
This is really just a middle layer, whose sole purpose is
to allow the above syntax
"""
def __init__(self, session_id, bulk_url, proxies=None, session=None):
"""Initialize the instance with the given parameters.
Arguments:
* session_id -- the session ID for authenticating to Salesforce
* bulk_url -- API endpoint set in Salesforce instance
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
self.session_id = session_id
self.session = session or requests.Session()
self.bulk_url = bulk_url
# don't wipe out original proxies with None
if not session and proxies is not None:
self.session.proxies = proxies
# Define these headers separate from Salesforce class,
# as bulk uses a slightly different format
self.headers = {
'Content-Type': 'application/json',
'X-SFDC-Session': self.session_id,
'X-PrettyPrint': '1'
}
def __getattr__(self, name):
return SFBulkType(object_name=name, bulk_url=self.bulk_url,
headers=self.headers, session=self.session)
class SFBulkType(object):
""" Interface to Bulk/Async API functions"""
def __init__(self, object_name, bulk_url, headers, session):
"""Initialize the instance with the given parameters.
Arguments:
* object_name -- the name of the type of SObject this represents,
e.g. `Lead` or `Contact`
* bulk_url -- API endpoint set in Salesforce instance
* headers -- bulk API headers
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
self.object_name = object_name
self.bulk_url = bulk_url
self.session = session
self.headers = headers
def _create_job(self, operation, object_name, external_id_field=None):
""" Create a bulk job
Arguments:
* operation -- Bulk operation to be performed by job
* object_name -- SF object
* external_id_field -- unique identifier field for upsert operations
"""
payload = {
'operation': operation,
'object': object_name,
'contentType': 'JSON'
}
if operation == 'upsert':
payload['externalIdFieldName'] = external_id_field
url = "{}{}".format(self.bulk_url, 'job')
result = call_salesforce(url=url, method='POST', session=self.session,
headers=self.headers,
data=json.dumps(payload))
return result.json(object_pairs_hook=OrderedDict)
def _close_job(self, job_id):
""" Close a bulk job """
payload = {
'state': 'Closed'
}
url = "{}{}{}".format(self.bulk_url, 'job/', job_id)
result = call_salesforce(url=url, method='POST', session=self.session,
headers=self.headers,
data=json.dumps(payload))
return result.json(object_pairs_hook=OrderedDict)
def _get_job(self, job_id):
""" Get an existing job to check the status """
url = "{}{}{}".format(self.bulk_url, 'job/', job_id)
result = call_salesforce(url=url, method='GET', session=self.session,
headers=self.headers)
return result.json(object_pairs_hook=OrderedDict)
def _add_batch(self, job_id, data, operation):
""" Add a set of data as a batch to an existing job
Separating this out in case of later
implementations involving multiple batches
"""
url = "{}{}{}{}".format(self.bulk_url, 'job/', job_id, '/batch')
if operation != 'query':
data = json.dumps(data)
result = call_salesforce(url=url, method='POST', session=self.session,
headers=self.headers, data=data)
return result.json(object_pairs_hook=OrderedDict)
def _get_batch(self, job_id, batch_id):
""" Get an existing batch to check the status """
url = "{}{}{}{}{}".format(self.bulk_url, 'job/',
job_id, '/batch/', batch_id)
result = call_salesforce(url=url, method='GET', session=self.session,
headers=self.headers)
return result.json(object_pairs_hook=OrderedDict)
def _get_batch_results(self, job_id, batch_id, operation):
""" retrieve a set of results from a completed job """
url = "{}{}{}{}{}{}".format(self.bulk_url, 'job/', job_id, '/batch/',
batch_id, '/result')
result = call_salesforce(url=url, method='GET', session=self.session,
headers=self.headers)
if operation == 'query':
url_query_results = "{}{}{}".format(url, '/', result.json()[0])
query_result = call_salesforce(url=url_query_results, method='GET',
session=self.session,
headers=self.headers)
return query_result.json()
return result.json()
#pylint: disable=R0913
def _bulk_operation(self, object_name, operation, data,
external_id_field=None, wait=5):
""" String together helper functions to create a complete
end-to-end bulk API request
Arguments:
* object_name -- SF object
* operation -- Bulk operation to be performed by job
* data -- list of dict to be passed as a batch
* external_id_field -- unique identifier field for upsert operations
* wait -- seconds to sleep between checking batch status
"""
job = self._create_job(object_name=object_name, operation=operation,
external_id_field=external_id_field)
batch = self._add_batch(job_id=job['id'], data=data,
operation=operation)
self._close_job(job_id=job['id'])
batch_status = self._get_batch(job_id=batch['jobId'],
batch_id=batch['id'])['state']
while batch_status not in ['Completed', 'Failed', 'Not Processed']:
sleep(wait)
batch_status = self._get_batch(job_id=batch['jobId'],
batch_id=batch['id'])['state']
results = self._get_batch_results(job_id=batch['jobId'],
batch_id=batch['id'],
operation=operation)
return results
# _bulk_operation wrappers to expose supported Salesforce bulk operations
def delete(self, data):
""" soft delete records """
results = self._bulk_operation(object_name=self.object_name,
operation='delete', data=data)
return results
def insert(self, data):
""" insert records """
results = self._bulk_operation(object_name=self.object_name,
operation='insert', data=data)
return results
def upsert(self, data, external_id_field):
""" upsert records based on a unique identifier """
results = self._bulk_operation(object_name=self.object_name,
operation='upsert',
external_id_field=external_id_field,
data=data)
return results
def update(self, data):
""" update records """
results = self._bulk_operation(object_name=self.object_name,
operation='update', data=data)
return results
def hard_delete(self, data):
""" hard delete records """
results = self._bulk_operation(object_name=self.object_name,
operation='hardDelete', data=data)
return results
def query(self, data):
""" bulk query """
results = self._bulk_operation(object_name=self.object_name,
operation='query', data=data)
return results
|
airflow/ti_deps/dep_context.py | npodewitz/airflow | 8,092 | 12737792 | <reponame>npodewitz/airflow<gh_stars>1000+
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, List, Optional
import attr
from sqlalchemy.orm.session import Session
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
@attr.define
class DepContext:
"""
A base class for contexts that specifies which dependencies should be evaluated in
the context for a task instance to satisfy the requirements of the context. Also
stores state related to the context that can be used by dependency classes.
For example there could be a SomeRunContext that subclasses this class which has
dependencies for:
- Making sure there are slots available on the infrastructure to run the task instance
- A task-instance's task-specific dependencies are met (e.g. the previous task
instance completed successfully)
- ...
:param deps: The context-specific dependencies that need to be evaluated for a
task instance to run in this execution context.
:param flag_upstream_failed: This is a hack to generate the upstream_failed state
creation while checking to see whether the task instance is runnable. It was the
shortest path to add the feature. This is bad since this class should be pure (no
side effects).
:param ignore_all_deps: Whether or not the context should ignore all ignorable
dependencies. Overrides the other ignore_* parameters
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for
Backfills)
:param ignore_in_retry_period: Ignore the retry period for task instances
:param ignore_in_reschedule_period: Ignore the reschedule period for task instances
:param ignore_unmapped_tasks: Ignore errors about mapped tasks not yet being expanded
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and
trigger rule
:param ignore_ti_state: Ignore the task instance's previous failure/success
:param finished_tis: A list of all the finished task instances of this run
"""
deps: set = attr.ib(factory=set)
flag_upstream_failed: bool = False
ignore_all_deps: bool = False
ignore_depends_on_past: bool = False
ignore_in_retry_period: bool = False
ignore_in_reschedule_period: bool = False
ignore_task_deps: bool = False
ignore_ti_state: bool = False
ignore_unmapped_tasks: bool = False
finished_tis: Optional[List["TaskInstance"]] = None
def ensure_finished_tis(self, dag_run: "DagRun", session: Session) -> List["TaskInstance"]:
"""
This method makes sure finished_tis is populated if it's currently None.
This is for the strange feature of running tasks without dag_run.
:param dag_run: The DagRun for which to find finished tasks
:return: A list of all the finished tasks of this DAG and execution_date
:rtype: list[airflow.models.TaskInstance]
"""
if self.finished_tis is None:
finished_tis = dag_run.get_task_instances(state=State.finished, session=session)
self.finished_tis = finished_tis
else:
finished_tis = self.finished_tis
return finished_tis
|
test/manual/trash/test_get_trash_document.py | membranepotential/mendeley-python-sdk | 103 | 12737817 | <gh_stars>100-1000
from test import get_user_session, cassette
from test.resources.documents import delete_all_documents, create_document, assert_core_document, assert_bib_document, \
assert_client_document, assert_tags_document, assert_all_document
def test_should_get_document_core_view():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/get_document/get_document_core_view.yaml'):
created_doc = create_document(session)
created_doc.move_to_trash()
doc = session.trash.get(created_doc.id)
assert_core_document(doc)
def test_should_get_document_bib_view():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/get_document/get_document_bib_view.yaml'):
created_doc = create_document(session)
created_doc.move_to_trash()
doc = session.trash.get(created_doc.id, view='bib')
assert_core_document(doc)
assert_bib_document(doc)
def test_should_get_document_client_view():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/get_document/get_document_client_view.yaml'):
created_doc = create_document(session)
created_doc.move_to_trash()
doc = session.trash.get(created_doc.id, view='client')
assert_core_document(doc)
assert_client_document(doc)
def test_should_get_document_tags_view():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/get_document/get_document_tags_view.yaml'):
created_doc = create_document(session)
created_doc.move_to_trash()
doc = session.trash.get(created_doc.id, view='tags')
assert_core_document(doc)
assert_tags_document(doc)
def test_should_get_document_all_view():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/get_document/get_document_all_view.yaml'):
created_doc = create_document(session)
created_doc.move_to_trash()
doc = session.trash.get(created_doc.id, view='all')
assert_all_document(doc)
def test_should_be_able_to_get_profile_for_document():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/get_document/get_profile_for_document.yaml'):
created_doc = create_document(session)
created_doc.move_to_trash()
doc = session.trash.get(created_doc.id)
profile = session.profiles.me
assert doc.profile.display_name == profile.display_name
|
homeassistant/components/tasmota/fan.py | mtarjoianu/core | 30,023 | 12737821 | <gh_stars>1000+
"""Support for Tasmota fans."""
from __future__ import annotations
from typing import Any
from hatasmota import const as tasmota_const, fan as tasmota_fan
from hatasmota.entity import TasmotaEntity as HATasmotaEntity
from hatasmota.models import DiscoveryHashType
from homeassistant.components.fan import (
DOMAIN as FAN_DOMAIN,
FanEntity,
FanEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
ORDERED_NAMED_FAN_SPEEDS = [
tasmota_const.FAN_SPEED_LOW,
tasmota_const.FAN_SPEED_MEDIUM,
tasmota_const.FAN_SPEED_HIGH,
] # off is not included
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Tasmota fan dynamically through discovery."""
@callback
def async_discover(
tasmota_entity: HATasmotaEntity, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota fan."""
async_add_entities(
[TasmotaFan(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(FAN_DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(FAN_DOMAIN),
async_discover,
)
class TasmotaFan(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
FanEntity,
):
"""Representation of a Tasmota fan."""
_attr_supported_features = FanEntityFeature.SET_SPEED
_tasmota_entity: tasmota_fan.TasmotaFan
def __init__(self, **kwds: Any) -> None:
"""Initialize the Tasmota fan."""
self._state: int | None = None
super().__init__(
**kwds,
)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_state_callback(self.fan_state_updated)
await super().async_added_to_hass()
@callback
def fan_state_updated(self, state: int, **kwargs: Any) -> None:
"""Handle state updates."""
self._state = state
self.async_write_ha_state()
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return len(ORDERED_NAMED_FAN_SPEEDS)
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if self._state is None:
return None
if self._state == 0:
return 0
return ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS, self._state)
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan."""
if percentage == 0:
await self.async_turn_off()
else:
tasmota_speed = percentage_to_ordered_list_item(
ORDERED_NAMED_FAN_SPEEDS, percentage
)
await self._tasmota_entity.set_speed(tasmota_speed)
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn the fan on."""
# Tasmota does not support turning a fan on with implicit speed
await self.async_set_percentage(
percentage
or ordered_list_item_to_percentage(
ORDERED_NAMED_FAN_SPEEDS, tasmota_const.FAN_SPEED_MEDIUM
)
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the fan off."""
await self._tasmota_entity.set_speed(tasmota_const.FAN_SPEED_OFF)
|
azure-devops/azext_devops/devops_sdk/v6_0/search/search_client.py | dhilmathy/azure-devops-cli-extension | 248 | 12737833 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class SearchClient(Client):
"""Search
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(SearchClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'ea48a0a1-269c-42d8-b8ad-ddc8fcdcf578'
def fetch_scroll_code_search_results(self, request, project=None):
"""FetchScrollCodeSearchResults.
[Preview API] Provides a set of results for the search text.
:param :class:`<ScrollSearchRequest> <azure.devops.v6_0.search.models.ScrollSearchRequest>` request: The Code Search Request.
:param str project: Project ID or project name
:rtype: :class:`<CodeSearchResponse> <azure.devops.v6_0.search.models.CodeSearchResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(request, 'ScrollSearchRequest')
response = self._send(http_method='POST',
location_id='852dac94-e8f7-45a2-9910-927ae35766a2',
version='6.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('CodeSearchResponse', response)
def fetch_code_search_results(self, request, project=None):
"""FetchCodeSearchResults.
[Preview API] Provides a set of results for the search text.
:param :class:`<CodeSearchRequest> <azure.devops.v6_0.search.models.CodeSearchRequest>` request: The Code Search Request.
:param str project: Project ID or project name
:rtype: :class:`<CodeSearchResponse> <azure.devops.v6_0.search.models.CodeSearchResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(request, 'CodeSearchRequest')
response = self._send(http_method='POST',
location_id='e7f29993-5b82-4fca-9386-f5cfe683d524',
version='6.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('CodeSearchResponse', response)
def fetch_package_search_results(self, request):
"""FetchPackageSearchResults.
[Preview API] Provides a set of results for the search text.
:param :class:`<PackageSearchRequest> <azure.devops.v6_0.search.models.PackageSearchRequest>` request: The Package Search Request.
:rtype: :class:`<PackageSearchResponse> <azure.devops.v6_0.search.models.PackageSearchResponse>`
"""
content = self._serialize.body(request, 'PackageSearchRequest')
response = self._send(http_method='POST',
location_id='f62ada48-eedc-4c8e-93f0-de870e4ecce0',
version='6.0-preview.1',
content=content)
response_object = models.PackageSearchResponse()
response_object.content = self._deserialize('PackageSearchResponseContent', response)
response_object.activity_id = response.headers.get('ActivityId')
return response_object
def get_repository_status(self, project, repository):
"""GetRepositoryStatus.
[Preview API] Provides status of Repository.
:param str project: Project ID or project name
:param str repository: Repository ID or repository name.
:rtype: :class:`<RepositoryStatusResponse> <azure.devops.v6_0.search.models.RepositoryStatusResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository is not None:
route_values['repository'] = self._serialize.url('repository', repository, 'str')
response = self._send(http_method='GET',
location_id='1f60303c-7261-4387-80f1-742a2ecf2964',
version='6.0-preview.1',
route_values=route_values)
return self._deserialize('RepositoryStatusResponse', response)
def get_tfvc_repository_status(self, project):
"""GetTfvcRepositoryStatus.
[Preview API] Provides status of TFVC Repository.
:param str project: Project ID or project name
:rtype: :class:`<TfvcRepositoryStatusResponse> <azure.devops.v6_0.search.models.TfvcRepositoryStatusResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='d5bf4e52-e0af-4626-8c50-8a80b18fa69f',
version='6.0-preview.1',
route_values=route_values)
return self._deserialize('TfvcRepositoryStatusResponse', response)
def fetch_wiki_search_results(self, request, project=None):
"""FetchWikiSearchResults.
[Preview API] Provides a set of results for the search request.
:param :class:`<WikiSearchRequest> <azure.devops.v6_0.search.models.WikiSearchRequest>` request: The Wiki Search Request.
:param str project: Project ID or project name
:rtype: :class:`<WikiSearchResponse> <azure.devops.v6_0.search.models.WikiSearchResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(request, 'WikiSearchRequest')
response = self._send(http_method='POST',
location_id='e90e7664-7049-4100-9a86-66b161d81080',
version='6.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WikiSearchResponse', response)
def fetch_work_item_search_results(self, request, project=None):
"""FetchWorkItemSearchResults.
[Preview API] Provides a set of results for the search text.
:param :class:`<WorkItemSearchRequest> <azure.devops.v6_0.search.models.WorkItemSearchRequest>` request: The Work Item Search Request.
:param str project: Project ID or project name
:rtype: :class:`<WorkItemSearchResponse> <azure.devops.v6_0.search.models.WorkItemSearchResponse>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(request, 'WorkItemSearchRequest')
response = self._send(http_method='POST',
location_id='73b2c9e2-ff9e-4447-8cda-5f5b21ff7cae',
version='6.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemSearchResponse', response)
|
examples/hid_keyboard_shortcuts.py | jersu11/Adafruit_CircuitPython_HID | 174 | 12737842 | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import digitalio
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
kbd = Keyboard(usb_hid.devices)
# define buttons. these can be any physical switches/buttons, but the values
# here work out-of-the-box with a CircuitPlayground Express' A and B buttons.
swap = digitalio.DigitalInOut(board.D4)
swap.direction = digitalio.Direction.INPUT
swap.pull = digitalio.Pull.DOWN
search = digitalio.DigitalInOut(board.D5)
search.direction = digitalio.Direction.INPUT
search.pull = digitalio.Pull.DOWN
while True:
# press ALT+TAB to swap windows
if swap.value:
kbd.send(Keycode.ALT, Keycode.TAB)
# press CTRL+K, which in a web browser will open the search dialog
elif search.value:
kbd.send(Keycode.CONTROL, Keycode.K)
time.sleep(0.1)
|
pyecharts/render/engine.py | swuecho/pyecharts | 11,032 | 12737843 | <gh_stars>1000+
import os
from collections import Iterable
from jinja2 import Environment
from ..commons import utils
from ..datasets import EXTRA, FILENAMES
from ..globals import CurrentConfig, NotebookType
from ..types import Any, Optional
from .display import HTML, Javascript
def write_utf8_html_file(file_name: str, html_content: str):
with open(file_name, "w+", encoding="utf-8") as html_file:
html_file.write(html_content)
class RenderEngine:
def __init__(self, env: Optional[Environment] = None):
self.env = env or CurrentConfig.GLOBAL_ENV
@staticmethod
def generate_js_link(chart: Any) -> Any:
if not chart.js_host:
chart.js_host = CurrentConfig.ONLINE_HOST
links = []
for dep in chart.js_dependencies.items:
# TODO: if?
if dep.startswith("https://api.map.baidu.com"):
links.append(dep)
if dep in FILENAMES:
f, ext = FILENAMES[dep]
links.append("{}{}.{}".format(chart.js_host, f, ext))
else:
for url, files in EXTRA.items():
if dep in files:
f, ext = files[dep]
links.append("{}{}.{}".format(url, f, ext))
break
chart.dependencies = links
return chart
def render_chart_to_file(self, template_name: str, chart: Any, path: str, **kwargs):
"""
Render a chart or page to local html files.
:param chart: A Chart or Page object
:param path: The destination file which the html code write to
:param template_name: The name of template file.
"""
tpl = self.env.get_template(template_name)
html = utils.replace_placeholder(
tpl.render(chart=self.generate_js_link(chart), **kwargs)
)
write_utf8_html_file(path, html)
def render_chart_to_template(self, template_name: str, chart: Any, **kwargs) -> str:
tpl = self.env.get_template(template_name)
return utils.replace_placeholder(
tpl.render(chart=self.generate_js_link(chart), **kwargs)
)
def render_chart_to_notebook(self, template_name: str, **kwargs) -> str:
tpl = self.env.get_template(template_name)
return utils.replace_placeholder(tpl.render(**kwargs))
def render(
chart, path: str, template_name: str, env: Optional[Environment], **kwargs
) -> str:
RenderEngine(env).render_chart_to_file(
template_name=template_name, chart=chart, path=path, **kwargs
)
return os.path.abspath(path)
def render_embed(
chart, template_name: str, env: Optional[Environment], **kwargs
) -> str:
return RenderEngine(env).render_chart_to_template(
template_name=template_name, chart=chart, **kwargs
)
def render_notebook(self, notebook_template, lab_template):
instance = self if isinstance(self, Iterable) else (self,)
if CurrentConfig.NOTEBOOK_TYPE == NotebookType.JUPYTER_NOTEBOOK:
require_config = utils.produce_require_dict(self.js_dependencies, self.js_host)
return HTML(
RenderEngine().render_chart_to_notebook(
template_name=notebook_template,
charts=instance,
config_items=require_config["config_items"],
libraries=require_config["libraries"],
)
)
if CurrentConfig.NOTEBOOK_TYPE == NotebookType.JUPYTER_LAB:
return HTML(
RenderEngine().render_chart_to_notebook(
template_name=lab_template, charts=instance
)
)
if CurrentConfig.NOTEBOOK_TYPE == NotebookType.NTERACT:
return HTML(self.render_embed())
if CurrentConfig.NOTEBOOK_TYPE == NotebookType.ZEPPELIN:
print("%html " + self.render_embed())
def load_javascript(chart):
scripts = []
for dep in chart.js_dependencies.items:
f, ext = FILENAMES[dep]
scripts.append("{}{}.{}".format(CurrentConfig.ONLINE_HOST, f, ext))
return Javascript(lib=scripts)
|
models/__init__.py | ModelZoo/BostonHousing | 191 | 12737845 | from .house import HousePricePredictionModel |
tests/test_act_quantized_ops.py | yachuan/actnn | 162 | 12737897 | <filename>tests/test_act_quantized_ops.py
"""Test the activation quantized ops"""
import math
import numpy as np
import torch
from torch.nn import functional as F
from timeit_v2 import py_benchmark
from actnn import QScheme, QBNScheme, config, get_memory_usage, compute_tensor_bytes
from actnn.ops import ext_backward_func, ext_quantization
from actnn.ops import conv2d as quantized_conv2d, batch_norm as quantized_batch_norm, \
adaptive_avg_pool2d as quantized_adaptive_avg_pool2d
def test_relu_correctness():
print("========== ReLU Correctness Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(128, 56, 56, 31).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad]]
output_ref, grad_data_ref = test_implementation(F.relu)
output_us, grad_data_us = test_implementation(ext_quantization.act_quantized_relu)
np.testing.assert_allclose(output_ref, output_us)
np.testing.assert_allclose(grad_data_ref, grad_data_us)
def test_relu_memory():
print("========== ReLU Memory Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(128, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
before = get_memory_usage()
for i in range(10):
data = func(data)
after = get_memory_usage()
return after - before
usage_ref = test_implementation(F.relu)
usage_us = test_implementation(ext_quantization.act_quantized_relu)
print("Exact. Usage: %.2f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.2f MB" % (usage_us / 2 ** 20))
def test_relu_speed():
print("========== ReLU Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(256, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
stmt = "func(data)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
output = func(data)
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
forward_ref, backward_ref = test_implementation(F.relu)
forward_us, backward_us = test_implementation(ext_quantization.act_quantized_relu)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_dropout_memory():
print("========== Dropout Memory Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(128, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
before = get_memory_usage()
for i in range(10):
data = func(data, 0.2)
after = get_memory_usage()
return after - before
usage_ref = test_implementation(F.dropout)
usage_us = test_implementation(ext_quantization.act_quantized_dropout)
print("Exact. Usage: %.2f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.2f MB" % (usage_us / 2 ** 20))
def test_dropout_speed():
print("========== Dropout Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(256, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
stmt = "func(data, 0.2)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
output = func(data, 0.2)
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
forward_ref, backward_ref = test_implementation(F.dropout)
forward_us, backward_us = test_implementation(ext_quantization.act_quantized_dropout)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_adaptive_avg_pool2d_correctness():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 4, 28, 28, 256, 256, 3, 1, 1, 1, 1
data_np = np.random.randn(N, CI, H, W).astype('float32')
head_np = np.random.randn(N, CI, 1, 1).astype('float32')
output_size = 1, 1
def test_implementation(func):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
head = torch.tensor(head_np).to("cuda")
output = func(data, output_size)
output.backward(head)
return [x.detach().cpu().numpy() for x in [output, data.grad]]
output_ref, grad_data_ref = test_implementation(F.adaptive_avg_pool2d)
output_us, grad_data_us = test_implementation(quantized_adaptive_avg_pool2d.apply)
atol = 1e-4
rtol = 1e-4
print("========== AdaptiveAvgPool2d Correctness Test ==========")
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
def test_adaptive_avg_pool2d_memory():
"""Test the memory usage"""
# arguments and test data
N, H, W, CI = 1024, 4, 4, 1024
data_np = np.random.randn(N, CI, H, W).astype('float32')
output_size = (1, 1)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, output_size)
for i in range(10):
output = func(output, output_size)
return get_memory_usage() - compute_tensor_bytes([data, output])
usage_ref = test_implementation(F.adaptive_avg_pool2d)
usage_us = test_implementation(quantized_adaptive_avg_pool2d.apply)
print("========== AdaptiveAvgPool2d Memory Test ==========")
print("Exact. Usage: %.3f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.2f MB" % (usage_us / 2 ** 20))
def test_max_pool2d_correctness():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, kernel_size, stride, padding, dilation = 4, 28, 28, 8, 3, 2, 1, 1
ceil_mode, return_indices = False, False
print("========== MaxPool2d Correctness Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),
(dilation, dilation), ceil_mode, return_indices)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad]]
output_ref, grad_data_ref = test_implementation(F.max_pool2d)
output_us, grad_data_us = test_implementation(ext_quantization.act_quantized_max_pool2d)
atol = 1e-4
rtol = 1e-4
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
def test_max_pool2d_memory():
"""Test the memory usage"""
# arguments and test data
N, H, W, CI, kernel_size, stride, padding, dilation = 128, 28, 28, 8, 3, 2, 1, 1
ceil_mode, return_indices = False, False
print("========== MaxPool2d Memory Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),
(dilation, dilation), ceil_mode, return_indices)
return get_memory_usage() - compute_tensor_bytes([output, data])
usage_ref = test_implementation(F.max_pool2d)
usage_us = test_implementation(ext_quantization.act_quantized_max_pool2d)
print("Exact. Usage: %.3f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.3f MB" % (usage_us / 2 ** 20))
def test_max_pool2d_speed():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, kernel_size, stride, padding, dilation = 128, 28, 28, 128, 3, 2, 1, 1
ceil_mode, return_indices = False, False
print("========== MaxPool2d Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
stmt = "func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),"\
"(dilation, dilation), ceil_mode, return_indices)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
output = func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),
(dilation, dilation), ceil_mode, return_indices)
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
forward_ref, backward_ref = test_implementation(F.max_pool2d)
forward_us, backward_us = test_implementation(ext_quantization.act_quantized_max_pool2d)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_upsample_memory():
"""Test the memory usage"""
# arguments and test data
N, H, W, CI = 128, 28, 28, 8
size, scale_factor, mode, align_corners = None, 2, 'bilinear', False
data_np = np.random.randn(N, CI, H, W).astype('float32')
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, size, scale_factor, mode, align_corners)
output = func(output, size, scale_factor, mode, align_corners)
output = func(output, size, scale_factor, mode, align_corners)
return get_memory_usage() - compute_tensor_bytes([output, data])
usage_ref = test_implementation(F.interpolate)
print("========== Upsample Memory Test ==========")
print("Exact. Usage: %.3f MB" % (usage_ref / 2 ** 20))
def test_bn_correctness():
# arguments and test data
N, H, W, CI = 16, 28, 28, 256
data_np = np.random.randn(N, CI, H, W).astype('float32') * 0.01
running_mean_np = np.random.randn(CI).astype('float32')
running_var_np = np.random.randn(CI).astype('float32')
bn_weight_np = np.random.randn(CI).astype('float32')
bn_bias_np = np.random.randn(CI).astype('float32')
training = False
bn_scheme = QBNScheme()
config.compress_activation = False
def test_implementation(func):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
running_mean = torch.tensor(running_mean_np).to("cuda")
running_var = torch.tensor(running_var_np).to("cuda")
bn_weight = torch.tensor(bn_weight_np).to("cuda").requires_grad_()
bn_bias = torch.tensor(bn_bias_np).to("cuda").requires_grad_()
if func == F.batch_norm:
output = func(data, running_mean, running_var, bn_weight, bn_bias, training, 0.1, 1e-5)
else:
output = func(data, running_mean, running_var, bn_weight, bn_bias, training, 0.1, 1e-5, bn_scheme)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad, bn_weight.grad, bn_bias.grad]]
output_ref, grad_data_ref, grad_weight_ref, grad_bias_ref = test_implementation(F.batch_norm)
output_us, grad_data_us, grad_weight_us, grad_bias_us = test_implementation(quantized_batch_norm.apply)
atol = 1e-3
rtol = 1e-3
print("========== BN Correctness Test ==========")
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_weight_ref, grad_weight_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_bias_ref, grad_bias_us, atol=atol, rtol=rtol)
def test_conv2d_correctness():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 4, 28, 28, 256, 256, 3, 1, 1, 1, 1
print("========== Conv2d Correctness Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype(dtype)
bias_np = np.random.randn(CO).astype(dtype)
def test_implementation(func, scheme):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
bias = torch.tensor(bias_np).to("cuda").requires_grad_()
output = func(data, weight, bias, stride, padding, dilation, groups, scheme)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad, weight.grad, bias.grad]]
config.activation_compression_bits = [16]
config.initial_bits = 16
config.perlayer = False
config.use_gradient = False
scheme = QScheme(None)
config.simulate = True
output_ref, grad_data_ref, grad_weight_ref, grad_bias_ref = test_implementation(quantized_conv2d.apply, scheme)
config.simulate = False
output_us, grad_data_us, grad_weight_us, grad_bias_us = test_implementation(quantized_conv2d.apply, scheme)
atol = 1e-2
rtol = 1e-2
assert output_ref.dtype == output_us.dtype
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_weight_ref, grad_weight_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_bias_ref, grad_bias_us, atol=atol, rtol=rtol)
def test_conv2d_correctness_per_group_only():
"""Test the correctness of computation results
NOTE: This test will fail on large shapes or low bits.
To make this test pass, we should disable stochastic noise.
"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 2, 16, 16, 4, 4, 1, 1, 1, 1, 1
print("========== Conv2d Correctness Test (per group only) ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype(dtype)
bias_np = np.random.randn(CO).astype(dtype)
def test_implementation(func, scheme):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
bias = torch.tensor(bias_np).to("cuda").requires_grad_()
output = func(data, weight, bias, stride, padding, dilation, groups, scheme)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad, weight.grad, bias.grad]]
config.activation_compression_bits = [8]
config.perlayer = False
config.use_gradient = False
config.simulate = True
output_ref, grad_data_ref, grad_weight_ref, grad_bias_ref = test_implementation(quantized_conv2d.apply, None)
config.simulate = False
output_us, grad_data_us, grad_weight_us, grad_bias_us = test_implementation(quantized_conv2d.apply, None)
atol = 1e-1
rtol = 1e-1
assert output_ref.dtype == output_us.dtype
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_weight_ref, grad_weight_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_bias_ref, grad_bias_us, atol=atol, rtol=rtol)
def test_conv2d_speed():
"""Test the speed of convolution layer"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 128, 28, 28, 256, 256, 3, 1, 1, 1, 1
print("========== Conv2d Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype(dtype)
bias_np = np.random.randn(CO).astype(dtype)
scheme = QScheme(None)
def test_implementation(func, scheme):
data = torch.tensor(data_np).to("cuda").requires_grad_()
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
bias = torch.tensor(bias_np).to("cuda").requires_grad_()
if func == quantized_conv2d.apply:
output = func(data, weight, bias, stride, padding, dilation, groups, scheme)
stmt = "func(data, weight, bias, stride, padding, dilation, groups, scheme)"
else:
output = func(data, weight, bias, stride, padding, dilation, groups)
stmt = "func(data, weight, bias, stride, padding, dilation, groups)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
config.activation_compression_bits = [16]
config.initial_bits = 16
config.perlayer = False
config.use_gradient = False
config.simulate = False
scheme = QScheme(None)
forward_ref, backward_ref = test_implementation(F.conv2d, None)
forward_us, backward_us = test_implementation(quantized_conv2d.apply, scheme)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_conv2d_memory_analytical():
"""Compute the memory of activation analytically"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 256, 28, 28, 256, 256, 3, 1, 1, 1, 1
data_np = np.random.randn(N, CI, H, W).astype('float32')
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype('float32')
bias_np = np.random.randn(CO).astype('float32')
running_mean = np.zeros((CO,), dtype='float32')
running_var = np.ones((CO,), dtype='float32')
bn_weight = np.random.randn(CO).astype('float32')
bn_bias = np.random.randn(CO).astype('float32')
scheme = QScheme(num_locations=kernel_size**2)
bn_scheme = QBNScheme()
def test_implementation(conv_func, relu_func, bn_func, n_layers=10):
data = torch.tensor(data_np).to("cuda")
# allocate input and weights
data = torch.tensor(data_np).to("cuda").requires_grad_(False)
weights = []
running_means = []
running_vars = []
bn_weights = []
bn_biass = []
for i in range(n_layers):
weights.append(torch.tensor(weight_np).to("cuda").requires_grad_())
running_means.append(torch.tensor(running_mean).to("cuda"))
running_vars.append(torch.tensor(running_var).to("cuda"))
bn_weights.append(torch.tensor(bn_weight).to("cuda").requires_grad_())
bn_biass.append(torch.tensor(bn_bias).to("cuda").requires_grad_())
before_size = get_memory_usage(False)
# forward n convolution layers
output = data
for i in range(n_layers):
if conv_func == quantized_conv2d.apply:
output = conv_func(output, weights[i], None, stride, padding, dilation, groups, scheme)
output = bn_func(output, running_means[i], running_vars[i], bn_weights[i], bn_biass[i], True, 0.1, 1e-5, bn_scheme)
else:
output = conv_func(output, weights[i], None, stride, padding, dilation, groups)
output = bn_func(output, running_means[i], running_vars[i], bn_weights[i], bn_biass[i], True, 0.1, 1e-5)
output = relu_func(output)
output = output.sum()
after_size = get_memory_usage(False)
output_size = compute_tensor_bytes(output)
return after_size / 1024**2, (after_size - before_size - output_size) / 1024**2
total_size_ref, act_size_ref = test_implementation(F.conv2d, lambda x: F.relu(x, inplace=True), F.batch_norm)
config.simulate = True
total_size_sim, act_size_sim = test_implementation(quantized_conv2d.apply,
ext_quantization.act_quantized_relu, quantized_batch_norm.apply)
config.simulate = False
total_size_us, act_size_us = test_implementation(quantized_conv2d.apply,
ext_quantization.act_quantized_relu, quantized_batch_norm.apply)
print("========== Conv2d Activation Memory Test (bits = %d) ==========" % (config.activation_compression_bits))
print("Exact. Total: %7.2f MB\tAct: %7.2f MB" % (total_size_ref, act_size_ref))
print("Simulation. Total: %7.2f MB\tAct: %7.2f MB" % (total_size_sim, act_size_sim))
print("Quantized. Total: %7.2f MB\tAct: %7.2f MB" % (total_size_us, act_size_us))
def test_conv2d_memory_max_batch_size():
"""Find the maximum batch size by gradually increasing the batch size until hitting Out-of-memory error"""
for device in ["cuda"]:
def test_implementation(func, n_layers, batch_sizes):
def run_batch_size(batch_size):
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = batch_size, 28, 28, 256, 256, 3, 1, 1, 1, 1
data_np = np.random.uniform(size=(N, CI, H, W)).astype('float32')
weight_np = np.random.uniform(size=(CO, CI // groups, kernel_size, kernel_size)).astype('float32')
bias_np = np.random.uniform(size=(CO,)).astype('float32')
# allocate input and weights
data = torch.tensor(data_np).to("cuda").requires_grad_(False)
weights = []
for i in range(n_layers):
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
weights.append(weight)
before_size = get_memory_usage(False)
# forward n convolution layers
output = data
for i in range(n_layers):
output = func(output, weights[i], None, stride, padding, dilation, groups)
output = output.sum()
after_size = get_memory_usage(False)
output_size = compute_tensor_bytes(output)
return after_size / 1024**2, (after_size - before_size - output_size) / 1024**2
# try gradually increased batch sizes
try:
for i, batch_size in enumerate(batch_sizes):
total_size_ref, act_size_ref = run_batch_size(batch_size)
print("batch_size: %4d\t" % batch_size, end="")
print("total_memory: %7.2f MB\tact_memory: %7.2f MB" % (total_size_ref, act_size_ref))
except RuntimeError:
pass
finally:
print("Maximum batch size: %d" % (batch_sizes[i-1]))
print("========== Conv2d Batch Size Test ==========")
print("---> Exact")
test_implementation(F.conv2d, n_layers=50, batch_sizes=[100, 200, 250, 300, 350, 400, 450, 500, 1000])
print("---> Quantized")
test_implementation(act_quantized_conv2d.apply, n_layers=50, batch_sizes=[100, 200, 250, 500, 1000, 2200, 2300, 2400, 3000, 4000])
if __name__ == "__main__":
test_relu_correctness()
test_relu_memory()
test_relu_speed()
#test_dropout_memory()
#test_dropout_speed()
#test_adaptive_avg_pool2d_correctness()
#test_adaptive_avg_pool2d_memory()
#test_max_pool2d_correctness()
#test_max_pool2d_memory()
#test_max_pool2d_speed()
#test_upsample_memory()
#test_bn_correctness()
test_conv2d_correctness()
#test_conv2d_correctness_per_group_only()
#test_conv2d_speed()
#config.activation_compression_bits = 2
#test_conv2d_memory_analytical()
#config.activation_compression_bits = 2
#test_conv2d_memory_max_batch_size()
|
core/providers/torrent_modules/torrentdownloads.py | 0x20Man/Watcher3 | 320 | 12737905 | <gh_stars>100-1000
import logging
from xml.etree.cElementTree import fromstring
from xmljson import yahoo
import core
from core.helpers import Url
logging = logging.getLogger(__name__)
def search(imdbid, term):
proxy_enabled = core.CONFIG['Server']['Proxy']['enabled']
logging.info('Performing backlog search on TorrentDownloads for {}.'.format(imdbid))
url = 'http://www.torrentdownloads.me/rss.xml?type=search&search={}'.format(term)
try:
if proxy_enabled and core.proxy.whitelist('http://www.torrentdownloads.me') is True:
response = Url.open(url, proxy_bypass=True).text
else:
response = Url.open(url).text
if response:
return _parse(response, imdbid)
else:
return []
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('TorrentDownloads search failed.', exc_info=True)
return []
def get_rss():
proxy_enabled = core.CONFIG['Server']['Proxy']['enabled']
logging.info('Fetching latest RSS from TorrentDownloads.')
url = 'http://www.torrentdownloads.me/rss2/last/4'
try:
if proxy_enabled and core.proxy.whitelist('http://www.torrentdownloads.me') is True:
response = Url.open(url, proxy_bypass=True).text
else:
response = Url.open(url).text
if response:
return _parse(response, None)
else:
return []
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('TorrentDownloads RSS fetch failed.', exc_info=True)
return []
def _parse(xml, imdbid):
logging.info('Parsing TorrentDownloads results.')
try:
items = yahoo.data(fromstring(xml))['rss']['channel']['item']
except Exception as e:
logging.error('Unexpected XML format from TorrentDownloads.', exc_info=True)
return []
results = []
for i in items:
result = {}
try:
result['score'] = 0
result['size'] = int(i['size'])
result['status'] = 'Available'
result['pubdate'] = None
result['title'] = i['title']
result['imdbid'] = imdbid
result['indexer'] = 'TorrentDownloads'
result['info_link'] = 'http://www.torrentdownloads.me{}'.format(i['link'])
result['torrentfile'] = core.providers.torrent.magnet(i['info_hash'])
result['guid'] = i['info_hash']
result['type'] = 'magnet'
result['downloadid'] = None
result['freeleech'] = 0
result['download_client'] = None
result['seeders'] = int(i['seeders'])
results.append(result)
except Exception as e:
logging.error('Error parsing TorrentDownloads XML.', exc_info=True)
continue
logging.info('Found {} results from TorrentDownloads.'.format(len(results)))
return results
|
aliyun-python-sdk-core/tests/auth/signers/test_sts_token_signer.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12737917 | <filename>aliyun-python-sdk-core/tests/auth/signers/test_sts_token_signer.py
# coding=utf-8
from tests import unittest
from aliyunsdkcore.auth.credentials import StsTokenCredential
from aliyunsdkcore.auth.signers.sts_token_signer import StsTokenSigner
from aliyunsdkcore.request import RpcRequest, RoaRequest
class TestStsTokenSigner(unittest.TestCase):
def test_sts_token_signer(self):
credential = StsTokenCredential(
'sts_access_key_id', 'sts_access_key_secret', 'sts_token')
signer = StsTokenSigner(credential)
# for rpc
request = RpcRequest("product", "version", "action_name")
self.assertIsNone(request.get_query_params().get("SecurityToken"))
headers, url = signer.sign('cn-hangzhou', request)
self.assertDictEqual(request.get_headers(), {'x-acs-action': 'action_name',
'x-acs-version': 'version',
'x-sdk-invoke-type': 'normal'})
self.assertEqual(request.get_query_params().get("SecurityToken"), 'sts_token')
# self.assertEqual(url, "/?SignatureVersion=1.0&Format=None"
# "&Timestamp=2018-12-02T11%3A03%3A01Z&RegionId=cn-hangzhou"
# "&AccessKeyId=access_key_id&SignatureMethod=HMAC-SHA1&Version=version"
# "&Signature=AmdeJh1ZOW6PgwM3%2BROhEnbKII4%3D&Action=action_name"
# "&SignatureNonce=d5e6e832-7f95-4f26-9e28-017f735721f8&SignatureType=')
request = RoaRequest("product", "version", "action_name", uri_pattern="/")
request.set_method('get')
self.assertIsNone(request.get_headers().get("x-acs-security-token"))
headers, url = signer.sign('cn-hangzhou', request)
self.assertEqual(request.get_headers().get("x-acs-security-token"), 'sts_token')
|
setup.py | theomega/django-ordered-model | 421 | 12737930 | <reponame>theomega/django-ordered-model
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open("requirements.txt") as f:
requires = f.read().splitlines()
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="django-ordered-model",
long_description=long_description,
long_description_content_type="text/markdown",
version="3.4.3",
description="Allows Django models to be ordered and provides a simple admin interface for reordering them.",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/django-ordered-model/django-ordered-model",
packages=[
"ordered_model",
"ordered_model.management",
"ordered_model.management.commands",
],
requires=requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
zip_safe=False,
package_data={
"ordered_model": [
"static/ordered_model/arrow-up.gif",
"static/ordered_model/arrow-down.gif",
"static/ordered_model/arrow-top.gif",
"static/ordered_model/arrow-bottom.gif",
"locale/de/LC_MESSAGES/django.po",
"locale/de/LC_MESSAGES/django.mo",
"locale/pl/LC_MESSAGES/django.po",
"locale/pl/LC_MESSAGES/django.mo",
"templates/ordered_model/admin/order_controls.html",
]
},
)
|
models/commands/GetFileInformationFromDeviceCommand.py | pwnfoo/SirepRAT | 357 | 12737952 | <reponame>pwnfoo/SirepRAT
#!/usr/bin/env python3
"""
BSD 3-Clause License
Copyright (c) 2017, SafeBreach Labs
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Concrete implementation of the GetFileInformationFromDevice command
Author: <NAME> <<EMAIL>>
Date: 2018-02-04 08:03:08
"""
import common.utils as utils
from .SirepCommand import SirepCommand
from common.constants import INT_SIZE
from common.enums.CommandType import CommandType
class GetFileInformationFromDeviceCommand(SirepCommand):
"""Concrete implementation of the GetFileInformationFromDevice command"""
def __init__(self, remote_path):
"""Described in parent class"""
super(GetFileInformationFromDeviceCommand, self).__init__(CommandType.GetFileInformationFromDevice)
self.remote_path = remote_path
self.payload_length = self._calculate_payload_length()
def _calculate_payload_length(self):
"""
Returns the payload length of the command.
The payload length for this command type is the unicode length of the remote path.
"""
return 2*len(self.remote_path)
def serialize_sirep(self):
"""Described in parent class"""
return b''.join((
utils.pack_uint(self.command_type.value),
utils.pack_string(self.remote_path),
))
@staticmethod
def deserialize_sirep(self, command_buffer):
"""Described in parent class"""
remote_path = utils.unpack_string(command_buffer[2*INT_SIZE:])
return GetFileInformationFromDeviceCommand(remote_path)
|
examples/pytorch/geniepath/ppi.py | ketyi/dgl | 9,516 | 12737962 | <gh_stars>1000+
import argparse
import numpy as np
import torch as th
import torch.optim as optim
from dgl.data import PPIDataset
from dgl.dataloading import GraphDataLoader
from sklearn.metrics import f1_score
from model import GeniePath, GeniePathLazy
def evaluate(model, loss_fn, dataloader, device='cpu'):
loss = 0
f1 = 0
num_blocks = 0
for subgraph in dataloader:
subgraph = subgraph.to(device)
label = subgraph.ndata['label'].to(device)
feat = subgraph.ndata['feat']
logits = model(subgraph, feat)
# compute loss
loss += loss_fn(logits, label).item()
predict = np.where(logits.data.cpu().numpy() >= 0., 1, 0)
f1 += f1_score(label.cpu(), predict, average='micro')
num_blocks += 1
return f1 / num_blocks, loss / num_blocks
def main(args):
# Step 1: Prepare graph data and retrieve train/validation/test index ============================= #
# Load dataset
train_dataset = PPIDataset(mode='train')
valid_dataset = PPIDataset(mode='valid')
test_dataset = PPIDataset(mode='test')
train_dataloader = GraphDataLoader(train_dataset, batch_size=args.batch_size)
valid_dataloader = GraphDataLoader(valid_dataset, batch_size=args.batch_size)
test_dataloader = GraphDataLoader(test_dataset, batch_size=args.batch_size)
# check cuda
if args.gpu >= 0 and th.cuda.is_available():
device = 'cuda:{}'.format(args.gpu)
else:
device = 'cpu'
num_classes = train_dataset.num_labels
# Extract node features
graph = train_dataset[0]
feat = graph.ndata['feat']
# Step 2: Create model =================================================================== #
if args.lazy:
model = GeniePathLazy(in_dim=feat.shape[-1],
out_dim=num_classes,
hid_dim=args.hid_dim,
num_layers=args.num_layers,
num_heads=args.num_heads,
residual=args.residual)
else:
model = GeniePath(in_dim=feat.shape[-1],
out_dim=num_classes,
hid_dim=args.hid_dim,
num_layers=args.num_layers,
num_heads=args.num_heads,
residual=args.residual)
model = model.to(device)
# Step 3: Create training components ===================================================== #
loss_fn = th.nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Step 4: training epochs =============================================================== #
for epoch in range(args.max_epoch):
model.train()
tr_loss = 0
tr_f1 = 0
num_blocks = 0
for subgraph in train_dataloader:
subgraph = subgraph.to(device)
label = subgraph.ndata['label']
feat = subgraph.ndata['feat']
logits = model(subgraph, feat)
# compute loss
batch_loss = loss_fn(logits, label)
tr_loss += batch_loss.item()
tr_predict = np.where(logits.data.cpu().numpy() >= 0., 1, 0)
tr_f1 += f1_score(label.cpu(), tr_predict, average='micro')
num_blocks += 1
# backward
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
# validation
model.eval()
val_f1, val_loss = evaluate(model, loss_fn, valid_dataloader, device)
print("In epoch {}, Train F1: {:.4f} | Train Loss: {:.4f}; Valid F1: {:.4f} | Valid loss: {:.4f}".
format(epoch, tr_f1 / num_blocks, tr_loss / num_blocks, val_f1, val_loss))
# Test after all epoch
model.eval()
test_f1, test_loss = evaluate(model, loss_fn, test_dataloader, device)
print("Test F1: {:.4f} | Test loss: {:.4f}".
format(test_f1, test_loss))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GeniePath')
parser.add_argument("--gpu", type=int, default=-1, help="GPU Index. Default: -1, using CPU.")
parser.add_argument("--hid_dim", type=int, default=256, help="Hidden layer dimension")
parser.add_argument("--num_layers", type=int, default=3, help="Number of GeniePath layers")
parser.add_argument("--max_epoch", type=int, default=1000, help="The max number of epochs. Default: 1000")
parser.add_argument("--lr", type=float, default=0.0004, help="Learning rate. Default: 0.0004")
parser.add_argument("--num_heads", type=int, default=1, help="Number of head in breadth function. Default: 1")
parser.add_argument("--residual", type=bool, default=False, help="Residual in GAT or not")
parser.add_argument("--batch_size", type=int, default=2, help="Batch size of graph dataloader")
parser.add_argument("--lazy", type=bool, default=False, help="Variant GeniePath-Lazy")
args = parser.parse_args()
print(args)
th.manual_seed(16)
main(args)
|
tkinter/command-access-button/main.py | whitmans-max/python-examples | 140 | 12737967 | <gh_stars>100-1000
# date: 2019.04.09
#
import tkinter as tk
# --- functions ---
def get_text(text):
print(text)
def get_widget(widget):
print(widget["text"])
widget["text"] = "DONE"
widget["bg"] = "green"
def get_event(event):
print(event.widget["text"])
event.widget["text"] = "DONE"
event.widget["bg"] = "green"
# --- main ---
list_words = ("One", "Two", "Three")
root = tk.Tk()
# access button's text in function assigned to button
for word in list_words:
btn = tk.Button(root, text=word, command=lambda txt=word:get_text(txt))
btn.pack()
# access button in function assigned to button
for word in list_words:
btn = tk.Button(root, text=word)
btn["command"] = lambda widget=btn:get_widget(widget)
btn.pack()
# access button in function assigned to button
for word in list_words:
btn = tk.Button(root, text=word)
btn.bind('<Button-1>', get_event)
btn.pack()
root.mainloop()
|
test/persistence/mzworkflows.py | MaterializeInc/materialize | 3,840 | 12737971 | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import os
from materialize.mzcompose import (
Kafka,
Materialized,
SchemaRegistry,
Testdrive,
Workflow,
Zookeeper,
)
materialized = Materialized(
options="--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test"
)
mz_disable_user_indexes = Materialized(
name="mz_disable_user_indexes",
hostname="materialized",
options="--persistent-user-tables --persistent-kafka-upsert-source --disable-persistent-system-tables-test --disable-user-indexes",
)
# This instance of Mz is used for failpoint testing. By using --disable-persistent-system-tables-test
# we ensure that only testdrive-initiated actions cause I/O. The --workers 1 is used due to #8739
mz_without_system_tables = Materialized(
name="mz_without_system_tables",
hostname="materialized",
options="--persistent-user-tables --disable-persistent-system-tables-test --workers 1",
)
prerequisites = [Zookeeper(), Kafka(), SchemaRegistry()]
services = [
*prerequisites,
materialized,
mz_disable_user_indexes,
mz_without_system_tables,
Testdrive(no_reset=True, seed=1),
]
td_test = os.environ.pop("TD_TEST", "*")
def workflow_persistence(w: Workflow):
workflow_kafka_sources(w)
workflow_user_tables(w)
workflow_failpoints(w)
workflow_disable_user_indexes(w)
def workflow_kafka_sources(w: Workflow):
w.start_and_wait_for_tcp(services=prerequisites, timeout_secs=240)
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command=f"kafka-sources/*{td_test}*-before.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
# And restart again, for extra stress
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command=f"kafka-sources/*{td_test}*-after.td",
)
# Do one more restart, just in case and just confirm that Mz is able to come up
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.kill_services(services=["materialized"], signal="SIGKILL")
w.remove_services(services=["materialized", "testdrive-svc"], destroy_volumes=True)
w.remove_volumes(volumes=["mzdata"])
def workflow_user_tables(w: Workflow):
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command=f"user-tables/table-persistence-before-{td_test}.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["materialized"])
w.run_service(
service="testdrive-svc",
command=f"user-tables/table-persistence-after-{td_test}.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.remove_services(services=["materialized", "testdrive-svc"], destroy_volumes=True)
w.remove_volumes(volumes=["mzdata"])
def workflow_failpoints(w: Workflow):
w.start_services(services=["mz_without_system_tables"])
w.wait_for_mz(service="mz_without_system_tables")
w.run_service(service="testdrive-svc", command=f"failpoints/{td_test}.td")
w.kill_services(services=["mz_without_system_tables"], signal="SIGKILL")
w.remove_services(
services=["mz_without_system_tables", "testdrive-svc"], destroy_volumes=True
)
w.remove_volumes(volumes=["mzdata"])
def workflow_disable_user_indexes(w: Workflow):
w.start_and_wait_for_tcp(services=prerequisites)
w.start_services(services=["materialized"])
w.wait_for_mz(service="materialized")
w.run_service(
service="testdrive-svc",
command="disable-user-indexes/before.td",
)
w.kill_services(services=["materialized"], signal="SIGKILL")
w.start_services(services=["mz_disable_user_indexes"])
w.wait_for_mz(service="mz_disable_user_indexes")
w.run_service(
service="testdrive-svc",
command="disable-user-indexes/after.td",
)
w.kill_services(services=["mz_disable_user_indexes"], signal="SIGKILL")
w.remove_services(
services=["materialized", "mz_disable_user_indexes", "testdrive-svc"],
destroy_volumes=True,
)
w.remove_volumes(volumes=["mzdata"])
|
tools/build/v2/test/indirect_conditional.py | jmuskaan72/Boost | 198 | 12737975 | #!/usr/bin/python
# Copyright (C) <NAME> 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
exe a1 : a1.cpp : <conditional>@a1-rule ;
rule a1-rule ( properties * )
{
if <variant>debug in $(properties)
{
return <define>OK ;
}
}
exe a2 : a2.cpp : <conditional>@$(__name__).a2-rule
<variant>debug:<optimization>speed ;
rule a2-rule ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
exe a3 : a3.cpp : <conditional>@$(__name__).a3-rule-1
<conditional>@$(__name__).a3-rule-2 ;
rule a3-rule-1 ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
rule a3-rule-2 ( properties * )
{
if <variant>debug in $(properties)
{
return <optimization>speed ;
}
}
""")
t.write("a1.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.write("a2.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.write("a3.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a1.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a2.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a3.exe")
t.cleanup()
|
api/tacticalrmm/winupdate/migrations/0003_auto_20200828_0134.py | infinite8co/tacticalrmm | 903 | 12737978 | # Generated by Django 3.1 on 2020-08-28 01:34
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("automation", "0004_auto_20200617_0332"),
("agents", "0012_auto_20200810_0544"),
("winupdate", "0002_auto_20200715_0445"),
]
operations = [
migrations.AddField(
model_name="winupdatepolicy",
name="policy",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="winupdatepolicy",
to="automation.policy",
),
),
migrations.AlterField(
model_name="winupdatepolicy",
name="agent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="winupdatepolicy",
to="agents.agent",
),
),
]
|
sdk/keyvault/azure-keyvault-secrets/tests/test_samples_secrets_async.py | rsdoherty/azure-sdk-for-python | 2,728 | 12737987 | <gh_stars>1000+
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# -------------------------------------
import asyncio
import pytest
from _shared.test_case_async import KeyVaultTestCase
from _test_case import client_setup, get_decorator, SecretsTestCase
all_api_versions = get_decorator(is_async=True)
def print(*args):
assert all(arg is not None for arg in args)
@pytest.mark.asyncio
async def test_create_secret_client():
vault_url = "vault_url"
# pylint:disable=unused-variable
# [START create_secret_client]
from azure.identity.aio import DefaultAzureCredential
from azure.keyvault.secrets.aio import SecretClient
# Create a SecretClient using default Azure credentials
credential = DefaultAzureCredential()
secret_client = SecretClient(vault_url, credential)
# the client and credential should be closed when no longer needed
# (both are also async context managers)
await secret_client.close()
await credential.close()
# [END create_secret_client]
class TestExamplesKeyVault(SecretsTestCase, KeyVaultTestCase):
@all_api_versions()
@client_setup
async def test_example_secret_crud_operations(self, client, **kwargs):
secret_client = client
secret_name = self.get_resource_name("secret-name")
# [START set_secret]
from dateutil import parser as date_parse
expires_on = date_parse.parse("2050-02-02T08:00:00.000Z")
# create a secret, setting optional arguments
secret = await secret_client.set_secret(secret_name, "secret-value", enabled=True, expires_on=expires_on)
print(secret.id)
print(secret.name)
print(secret.properties.enabled)
print(secret.properties.expires_on)
# [END set_secret]
secret_version = secret.properties.version
# [START get_secret]
# get the latest version of a secret
secret = await secret_client.get_secret(secret_name)
# alternatively, specify a version
secret = await secret_client.get_secret(secret_name, secret_version)
print(secret.id)
print(secret.name)
print(secret.properties.version)
print(secret.properties.vault_url)
# [END get_secret]
# [START update_secret]
# update attributes of an existing secret
content_type = "text/plain"
tags = {"foo": "updated tag"}
updated_secret_properties = await secret_client.update_secret_properties(
secret_name, content_type=content_type, tags=tags
)
print(updated_secret_properties.version)
print(updated_secret_properties.updated_on)
print(updated_secret_properties.content_type)
print(updated_secret_properties.tags)
# [END update_secret]
# [START delete_secret]
# delete a secret
deleted_secret = await secret_client.delete_secret(secret_name)
print(deleted_secret.name)
# if the vault has soft-delete enabled, the secret's deleted_date,
# scheduled purge date and recovery id are set
print(deleted_secret.deleted_date)
print(deleted_secret.scheduled_purge_date)
print(deleted_secret.recovery_id)
# [END delete_secret]
@all_api_versions()
@client_setup
async def test_example_secret_list_operations(self, client, **kwargs):
secret_client = client
for i in range(7):
secret_name = self.get_resource_name("secret{}".format(i))
await secret_client.set_secret(secret_name, "value{}".format(i))
# [START list_secrets]
# gets a list of secrets in the vault
secrets = secret_client.list_properties_of_secrets()
async for secret in secrets:
# the list doesn't include values or versions of the secrets
print(secret.id)
print(secret.name)
print(secret.enabled)
# [END list_secrets]
# [START list_properties_of_secret_versions]
# gets a list of all versions of a secret
secret_versions = secret_client.list_properties_of_secret_versions("secret-name")
async for secret in secret_versions:
# the list doesn't include the versions' values
print(secret.id)
print(secret.enabled)
print(secret.updated_on)
# [END list_properties_of_secret_versions]
# [START list_deleted_secrets]
# gets a list of deleted secrets (requires soft-delete enabled for the vault)
deleted_secrets = secret_client.list_deleted_secrets()
async for secret in deleted_secrets:
# the list doesn't include values or versions of the deleted secrets
print(secret.id)
print(secret.name)
print(secret.scheduled_purge_date)
print(secret.recovery_id)
print(secret.deleted_date)
# [END list_deleted_secrets]
@all_api_versions()
@client_setup
async def test_example_secrets_backup_restore(self, client, **kwargs):
secret_client = client
secret_name = self.get_resource_name("secret-name")
await secret_client.set_secret(secret_name, "secret-value")
# [START backup_secret]
# backup secret
secret_backup = await secret_client.backup_secret(secret_name)
# returns the raw bytes of the backed up secret
print(secret_backup)
# [END backup_secret]
await secret_client.delete_secret(secret_name)
await secret_client.purge_deleted_secret(secret_name)
if self.is_live:
await asyncio.sleep(60)
# [START restore_secret_backup]
# restores a backed up secret
restored_secret = await secret_client.restore_secret_backup(secret_backup)
print(restored_secret.id)
print(restored_secret.version)
# [END restore_secret_backup]
@all_api_versions()
@client_setup
async def test_example_secrets_recover(self, client, **kwargs):
secret_client = client
secret_name = self.get_resource_name("secret-name")
await secret_client.set_secret(secret_name, "secret-value")
await secret_client.delete_secret(secret_name)
# [START get_deleted_secret]
# gets a deleted secret (requires soft-delete enabled for the vault)
deleted_secret = await secret_client.get_deleted_secret(secret_name)
print(deleted_secret.name)
# [END get_deleted_secret]
# [START recover_deleted_secret]
# recover deleted secret to the latest version
recovered_secret = await secret_client.recover_deleted_secret(secret_name)
print(recovered_secret.id)
print(recovered_secret.name)
# [END recover_deleted_secret]
|
esphome/components/pn532_spi/__init__.py | OttoWinter/esphomeyaml | 249 | 12737991 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import spi, pn532
from esphome.const import CONF_ID
AUTO_LOAD = ["pn532"]
CODEOWNERS = ["@OttoWinter", "@jesserockz"]
DEPENDENCIES = ["spi"]
MULTI_CONF = True
pn532_spi_ns = cg.esphome_ns.namespace("pn532_spi")
PN532Spi = pn532_spi_ns.class_("PN532Spi", pn532.PN532, spi.SPIDevice)
CONFIG_SCHEMA = cv.All(
pn532.PN532_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(PN532Spi),
}
).extend(spi.spi_device_schema(cs_pin_required=True))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await pn532.setup_pn532(var, config)
await spi.register_spi_device(var, config)
|
backend/cloud_inquisitor/plugins/views/templates.py | MrSecure/cloud-inquisitor | 462 | 12738016 | <reponame>MrSecure/cloud-inquisitor
from flask import session
from cloud_inquisitor.app import _import_templates
from cloud_inquisitor.constants import ROLE_ADMIN, HTTP
from cloud_inquisitor.database import db
from cloud_inquisitor.log import auditlog
from cloud_inquisitor.plugins import BaseView
from cloud_inquisitor.schema import Role, Template
from cloud_inquisitor.utils import MenuItem, diff
from cloud_inquisitor.wrappers import check_auth, rollback
class TemplateList(BaseView):
URLS = ['/api/v1/templates']
MENU_ITEMS = [
MenuItem(
'admin',
'Templates',
'template.list',
'template',
order=4
)
]
@rollback
@check_auth(ROLE_ADMIN)
def get(self):
templates = db.Template.all()
return self.make_response({
'templates': templates,
'templateCount': len(templates)
})
@rollback
@check_auth(ROLE_ADMIN)
def post(self):
"""Create a new template"""
self.reqparse.add_argument('templateName', type=str, required=True)
self.reqparse.add_argument('template', type=str, required=True)
args = self.reqparse.parse_args()
template = db.Template.find_one(template_name=args['templateName'])
if template:
return self.make_response('Template already exists, update the existing template instead', HTTP.CONFLICT)
template = Template()
template.template_name = args['templateName']
template.template = args['template']
db.session.add(template)
db.session.commit()
auditlog(event='template.create', actor=session['user'].username, data=args)
return self.make_response('Template {} has been created'.format(template.template_name), HTTP.CREATED)
@rollback
@check_auth(ROLE_ADMIN)
def put(self):
"""Re-import all templates, overwriting any local changes made"""
try:
_import_templates(force=True)
return self.make_response('Imported templates')
except:
self.log.exception('Failed importing templates')
return self.make_response('Failed importing templates', HTTP.SERVER_ERROR)
class TemplateGet(BaseView):
URLS = ['/api/v1/template/<string:template_name>']
@rollback
@check_auth(ROLE_ADMIN)
def get(self, template_name):
"""Get a specific template"""
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
return self.make_response({'template': template})
@rollback
@check_auth(ROLE_ADMIN)
def put(self, template_name):
"""Update a template"""
self.reqparse.add_argument('template', type=str, required=True)
args = self.reqparse.parse_args()
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
changes = diff(template.template, args['template'])
template.template = args['template']
template.is_modified = True
db.session.add(template)
db.session.commit()
auditlog(
event='template.update',
actor=session['user'].username,
data={
'template_name': template_name,
'template_changes': changes
}
)
return self.make_response('Template {} has been updated'.format(template_name))
@rollback
@check_auth(ROLE_ADMIN)
def delete(self, template_name):
"""Delete a template"""
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
db.session.delete(template)
db.session.commit()
auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})
return self.make_response({
'message': 'Template has been deleted',
'templateName': template_name
})
|
data/DataPre.py | byamao1/MMSA | 198 | 12738037 | # coding: utf-8
import os
import shutil
import pickle
import librosa
import argparse
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from PIL import Image
from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import Dataset, DataLoader
class MDataPreLoader(Dataset):
def __init__(self, args):
self.working_dir = args.working_dir
self.df = args.df
self.annotation_dict = {
"Negative": 0,
"Neutral": 1,
"Positive": 2
}
# toolkits path
self.openface2Path = args.openface2Path
# bert
tokenizer_class = BertTokenizer
if args.language == 'cn':
self.pretrainedBertPath = 'pretrained_model/bert_cn'
self.tokenizer = tokenizer_class.from_pretrained('pretrained_model/bert_cn')
else:
self.pretrainedBertPath = 'pretrained_model/bert_en'
self.tokenizer = tokenizer_class.from_pretrained('pretrained_model/bert_en', do_lower_case=True)
def __len__(self):
return len(self.df)
def __getVideoEmbedding(self, video_path, tmp_dir, pool_size=3):
faces_feature_dir = os.path.join(tmp_dir, 'Faces')
os.mkdir(faces_feature_dir)
cmd = self.openface2Path + ' -f ' + video_path + ' -out_dir ' + faces_feature_dir
os.system(cmd)
# read features
features, local_features = [], []
df_path = glob(os.path.join(faces_feature_dir, '*.csv'))
if len(df_path) > 0:
df_path = df_path[0]
df = pd.read_csv(df_path)
for i in range(len(df)):
local_features.append(np.array(df.loc[i][df.columns[5:]]))
if (i + 1) % pool_size == 0:
features.append(np.array(local_features).mean(axis=0))
local_features = []
if len(local_features) != 0:
features.append(np.array(local_features).mean(axis=0))
return np.array(features)
def __getAudioEmbedding(self, video_path, audio_path):
# use ffmpeg to extract audio
cmd = 'ffmpeg -i ' + video_path + ' -f wav -vn ' + \
audio_path + ' -loglevel quiet'
os.system(cmd)
# get features
y, sr = librosa.load(audio_path)
# using librosa to get audio features (f0, mfcc, cqt)
hop_length = 512 # hop_length smaller, seq_len larger
f0 = librosa.feature.zero_crossing_rate(y, hop_length=hop_length).T # (seq_len, 1)
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, htk=True).T # (seq_len, 20)
cqt = librosa.feature.chroma_cqt(y=y, sr=sr, hop_length=hop_length).T # (seq_len, 12)
return np.concatenate([f0, mfcc, cqt], axis=-1)
def __getTextEmbedding(self, text):
# directory is fine
tokenizer = BertTokenizer.from_pretrained(self.pretrainedBertPath)
model = BertModel.from_pretrained(self.pretrainedBertPath)
# add_special_tokens will add start and end token
input_ids = torch.tensor([tokenizer.encode(text, add_special_tokens=True)])
with torch.no_grad():
last_hidden_states = model(input_ids)[0] # Models outputs are now tuples
return last_hidden_states.squeeze().numpy()
def __preTextforBert(self, text):
tokens_a = self.tokenizer.tokenize(text,invertable=True)
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_ids = np.expand_dims(input_ids, 1)
input_mask = np.expand_dims(input_mask, 1)
segment_ids = np.expand_dims(segment_ids, 1)
text_bert = np.concatenate([input_ids, input_mask, segment_ids], axis=1)
return text_bert
def __getitem__(self, index):
tmp_dir = os.path.join(self.working_dir, f'Processed/tmp-{index}')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
video_id, clip_id, text, label, annotation, mode, _ = self.df.loc[index]
cur_id = video_id + '$_$' + clip_id
# video
video_path = os.path.join(self.working_dir, 'Raw', video_id, clip_id + '.mp4')
embedding_V = self.__getVideoEmbedding(video_path, tmp_dir)
seq_V = embedding_V.shape[0]
# audio
audio_path = os.path.join(tmp_dir, 'tmp.wav')
embedding_A = self.__getAudioEmbedding(video_path, audio_path)
seq_A = embedding_A.shape[0]
# text
embedding_T = self.__getTextEmbedding(text)
text_bert = self.__preTextforBert(text)
seq_T = embedding_T.shape[0]
ret = {
'id': cur_id,
'audio': embedding_A,
'vision': embedding_V,
'raw_text': text,
'text': embedding_T,
'text_bert': text_bert,
'audio_lengths': seq_A,
'vision_lengths': seq_V,
'annotations': annotation,
'classification_labels': self.annotation_dict[annotation],
'regression_labels': label,
'mode': mode
}
# clear tmp dir to save space
shutil.rmtree(tmp_dir)
return ret
class MDataPre():
def __init__(self, args):
self.working_dir = args.working_dir
# padding
self.padding_mode = 'zeros'
self.padding_location = 'back'
def __padding(self, feature, MAX_LEN):
"""
mode:
zero: padding with 0
normal: padding with normal distribution
location: front / back
"""
assert self.padding_mode in ['zeros', 'normal']
assert self.padding_location in ['front', 'back']
length = feature.shape[0]
if length >= MAX_LEN:
return feature[:MAX_LEN, :]
if self.padding_mode == "zeros":
pad = np.zeros([MAX_LEN - length, feature.shape[-1]])
elif self.padding_mode == "normal":
mean, std = feature.mean(), feature.std()
pad = np.random.normal(mean, std, (MAX_LEN-length, feature.shape[1]))
feature = np.concatenate([pad, feature], axis=0) if(self.padding_location == "front") else \
np.concatenate((feature, pad), axis=0)
return feature
def __paddingSequence(self, sequences):
if len(sequences) == 0:
return sequences
feature_dim = sequences[0].shape[-1]
lens = [s.shape[0] for s in sequences]
# confirm length using (mean + std)
final_length = int(np.mean(lens) + 3 * np.std(lens))
# padding sequences to final_length
final_sequence = np.zeros([len(sequences), final_length, feature_dim])
for i, s in enumerate(sequences):
if len(s) != 0:
final_sequence[i] = self.__padding(s, final_length)
return final_sequence
def __collate_fn(self, batch):
ret = {k: [] for k in batch[0].keys()}
for b in batch:
for k,v in b.items():
ret[k].append(v)
return ret
def run(self):
output_path = os.path.join(self.working_dir, 'Processed/features.pkl')
# load last point
if os.path.exists(output_path):
with open(output_path, 'rb') as f:
data = pickle.load(f)
last_row_idx = len(data['id'])
else:
data = {"id": [],
"raw_text": [],
"audio": [],
"vision": [],
"text": [],
"text_bert": [],
"audio_lengths": [],
"vision_lengths": [],
"annotations": [],
"classification_labels": [],
"regression_labels": [],
"mode": []}
last_row_idx = 0
args.df = pd.read_csv(os.path.join(self.working_dir, 'label.csv'), dtype={'clip_id': str, 'video_id': str, 'text': str})
args.df = args.df[last_row_idx:]
dataloader = DataLoader(MDataPreLoader(args),
batch_size=64,
num_workers=8,
shuffle=False,
collate_fn=self.__collate_fn)
isEnd = False
try:
with tqdm(dataloader) as td:
for batch_data in td:
for k, v in batch_data.items():
data[k].extend(v)
isEnd = True
except Exception as e:
print(e)
finally:
try:
if isEnd:
# padding
for item in ['audio', 'vision', 'text', 'text_bert']:
data[item] = self.__paddingSequence(data[item])
# data['mode'] = list(args.df['mode'])
# split train, valid, test
inx_dict = {
mode + '_index': [i for i, v in enumerate(data['mode']) if v == mode]
for mode in ['train', 'valid', 'test']
}
data.pop('mode')
final_data = {k: {} for k in ['train', 'valid', 'test']}
for mode in ['train', 'valid', 'test']:
indexes = inx_dict[mode + '_index']
for item in data.keys():
if isinstance(data[item], list):
final_data[mode][item] = [data[item][v] for v in indexes]
else:
final_data[mode][item] = data[item][indexes]
data = final_data
except Exception as e:
print(e)
finally:
with open(output_path, 'wb') as wf:
pickle.dump(data, wf, protocol = 4)
print('Features are saved in %s!' %output_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--working_dir', type=str, default='/home/sharing/disk3/dataset/multimodal-sentiment-dataset/StandardDatasets/MOSEI',
help='path to datasets')
parser.add_argument('--language', type=str, default="en",
help='en / cn')
parser.add_argument('--openface2Path', type=str, default="/home/iyuge2/ToolKits/OpenFace/build/bin/FeatureExtraction",
help='path to FeatureExtraction tool in openface2')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
dp = MDataPre(args)
dp.run() |
aioinflux/__init__.py | AndyBryson/aioinflux | 120 | 12738099 | # flake8: noqa
from . import serialization
from .client import InfluxDBClient, InfluxDBError, InfluxDBWriteError
from .iterutils import iterpoints
from .serialization.usertype import *
__version__ = '0.9.0'
|
tests/test_serialization.py | Multihuntr/albumentations | 3,893 | 12738127 | import json
import os
import random
from unittest.mock import patch
import cv2
import numpy as np
import pytest
import albumentations as A
import albumentations.augmentations.functional as F
from albumentations.core.serialization import SERIALIZABLE_REGISTRY, shorten_class_name
from albumentations.core.transforms_interface import ImageOnlyTransform
from .conftest import skipif_no_torch
from .utils import (
OpenMock,
check_all_augs_exists,
get_dual_transforms,
get_image_only_transforms,
get_transforms,
set_seed,
)
TEST_SEEDS = (0, 1, 42, 111, 9999)
@pytest.mark.parametrize(
["augmentation_cls", "params"],
get_transforms(
custom_arguments={
A.Crop: {"y_min": 0, "y_max": 10, "x_min": 0, "x_max": 10},
A.CenterCrop: {"height": 10, "width": 10},
A.CropNonEmptyMaskIfExists: {"height": 10, "width": 10},
A.RandomCrop: {"height": 10, "width": 10},
A.RandomResizedCrop: {"height": 10, "width": 10},
A.RandomSizedCrop: {"min_max_height": (4, 8), "height": 10, "width": 10},
A.CropAndPad: {"px": 10},
A.Resize: {"height": 10, "width": 10},
},
except_augmentations={
A.RandomCropNearBBox,
A.RandomSizedBBoxSafeCrop,
A.FDA,
A.HistogramMatching,
A.PixelDistributionAdaptation,
A.Lambda,
A.TemplateTransform,
},
),
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, mask=mask)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
AUGMENTATION_CLS_PARAMS = [
[
A.ImageCompression,
{
"quality_lower": 10,
"quality_upper": 80,
"compression_type": A.ImageCompression.ImageCompressionType.WEBP,
},
],
[A.JpegCompression, {"quality_lower": 10, "quality_upper": 80}],
[A.HueSaturationValue, {"hue_shift_limit": 70, "sat_shift_limit": 95, "val_shift_limit": 55}],
[A.RGBShift, {"r_shift_limit": 70, "g_shift_limit": 80, "b_shift_limit": 40}],
[A.RandomBrightnessContrast, {"brightness_limit": 0.5, "contrast_limit": 0.8}],
[A.Blur, {"blur_limit": 3}],
[A.MotionBlur, {"blur_limit": 3}],
[A.MedianBlur, {"blur_limit": 3}],
[A.GaussianBlur, {"blur_limit": 3}],
[A.GaussNoise, {"var_limit": (20, 90), "mean": 10, "per_channel": False}],
[A.CLAHE, {"clip_limit": 2, "tile_grid_size": (12, 12)}],
[A.RandomGamma, {"gamma_limit": (10, 90)}],
[A.Cutout, {"num_holes": 4, "max_h_size": 4, "max_w_size": 4}],
[A.CoarseDropout, {"max_holes": 4, "max_height": 4, "max_width": 4}],
[A.RandomSnow, {"snow_point_lower": 0.2, "snow_point_upper": 0.4, "brightness_coeff": 4}],
[
A.RandomRain,
{
"slant_lower": -5,
"slant_upper": 5,
"drop_length": 15,
"drop_width": 2,
"drop_color": (100, 100, 100),
"blur_value": 3,
"brightness_coefficient": 0.5,
"rain_type": "heavy",
},
],
[A.RandomFog, {"fog_coef_lower": 0.2, "fog_coef_upper": 0.8, "alpha_coef": 0.11}],
[
A.RandomSunFlare,
{
"flare_roi": (0.1, 0.1, 0.9, 0.6),
"angle_lower": 0.1,
"angle_upper": 0.95,
"num_flare_circles_lower": 7,
"num_flare_circles_upper": 11,
"src_radius": 300,
"src_color": (200, 200, 200),
},
],
[
A.RandomShadow,
{
"shadow_roi": (0.1, 0.4, 0.9, 0.9),
"num_shadows_lower": 2,
"num_shadows_upper": 4,
"shadow_dimension": 8,
},
],
[
A.PadIfNeeded,
{"min_height": 512, "min_width": 512, "border_mode": cv2.BORDER_CONSTANT, "value": (10, 10, 10)},
],
[
A.Rotate,
{
"limit": 120,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[
A.SafeRotate,
{
"limit": 120,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[
A.ShiftScaleRotate,
{
"shift_limit": 0.2,
"scale_limit": 0.2,
"rotate_limit": 70,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[
A.ShiftScaleRotate,
{
"shift_limit_x": 0.3,
"shift_limit_y": 0.4,
"scale_limit": 0.2,
"rotate_limit": 70,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[
A.OpticalDistortion,
{
"distort_limit": 0.2,
"shift_limit": 0.2,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[
A.GridDistortion,
{
"num_steps": 10,
"distort_limit": 0.5,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[
A.ElasticTransform,
{
"alpha": 2,
"sigma": 25,
"alpha_affine": 40,
"interpolation": cv2.INTER_CUBIC,
"border_mode": cv2.BORDER_CONSTANT,
"value": (10, 10, 10),
},
],
[A.CenterCrop, {"height": 10, "width": 10}],
[A.RandomCrop, {"height": 10, "width": 10}],
[A.CropNonEmptyMaskIfExists, {"height": 10, "width": 10}],
[A.RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
[A.Crop, {"x_max": 64, "y_max": 64}],
[A.ToFloat, {"max_value": 16536}],
[A.Normalize, {"mean": (0.385, 0.356, 0.306), "std": (0.129, 0.124, 0.125), "max_pixel_value": 100.0}],
[A.RandomBrightness, {"limit": 0.4}],
[A.RandomContrast, {"limit": 0.4}],
[A.RandomScale, {"scale_limit": 0.2, "interpolation": cv2.INTER_CUBIC}],
[A.Resize, {"height": 64, "width": 64}],
[A.SmallestMaxSize, {"max_size": 64, "interpolation": cv2.INTER_CUBIC}],
[A.LongestMaxSize, {"max_size": 128, "interpolation": cv2.INTER_CUBIC}],
[A.RandomGridShuffle, {"grid": (5, 5)}],
[A.Solarize, {"threshold": 32}],
[A.Posterize, {"num_bits": 1}],
[A.Equalize, {"mode": "pil", "by_channels": False}],
[A.MultiplicativeNoise, {"multiplier": (0.7, 2.3), "per_channel": True, "elementwise": True}],
[
A.ColorJitter,
{"brightness": [0.2, 0.3], "contrast": [0.7, 0.9], "saturation": [1.2, 1.7], "hue": [-0.2, 0.1]},
],
[
A.Perspective,
{
"scale": 0.5,
"keep_size": False,
"pad_mode": cv2.BORDER_REFLECT_101,
"pad_val": 10,
"mask_pad_val": 100,
"fit_output": True,
"interpolation": cv2.INTER_CUBIC,
},
],
[A.Sharpen, {"alpha": [0.2, 0.5], "lightness": [0.5, 1.0]}],
[A.Emboss, {"alpha": [0.2, 0.5], "strength": [0.5, 1.0]}],
[A.RandomToneCurve, {"scale": 0.2}],
[
A.CropAndPad,
{
"px": 10,
"keep_size": False,
"sample_independently": False,
"interpolation": cv2.INTER_CUBIC,
"pad_cval_mask": [10, 20, 30],
"pad_cval": [11, 12, 13],
"pad_mode": cv2.BORDER_REFLECT101,
},
],
[
A.Superpixels,
{"p_replace": (0.5, 0.7), "n_segments": (20, 30), "max_size": 25, "interpolation": cv2.INTER_CUBIC},
],
[
A.Affine,
{
"scale": 0.5,
"translate_percent": 0.7,
"translate_px": None,
"rotate": 33,
"shear": 21,
"interpolation": cv2.INTER_CUBIC,
"cval": 25,
"cval_mask": 1,
"mode": cv2.BORDER_REFLECT,
"fit_output": True,
},
],
[
A.Affine,
{
"scale": {"x": [0.3, 0.5], "y": [0.1, 0.2]},
"translate_percent": None,
"translate_px": {"x": [10, 200], "y": [5, 101]},
"rotate": [333, 360],
"shear": {"x": [31, 38], "y": [41, 48]},
"interpolation": 3,
"cval": [10, 20, 30],
"cval_mask": 1,
"mode": cv2.BORDER_REFLECT,
"fit_output": True,
},
],
[
A.PiecewiseAffine,
{
"scale": 0.33,
"nb_rows": (10, 20),
"nb_cols": 33,
"interpolation": 2,
"mask_interpolation": 1,
"cval": 10,
"cval_mask": 20,
"mode": "edge",
"absolute_scale": True,
"keypoints_threshold": 0.1,
},
],
[A.ChannelDropout, dict(channel_drop_range=(1, 2), fill_value=1)],
[A.ChannelShuffle, {}],
[A.Downscale, dict(scale_min=0.5, scale_max=0.75, interpolation=cv2.INTER_LINEAR)],
[A.Flip, {}],
[A.FromFloat, dict(dtype="uint8", max_value=1)],
[A.HorizontalFlip, {}],
[A.ISONoise, dict(color_shift=(0.2, 0.3), intensity=(0.7, 0.9))],
[A.InvertImg, {}],
[A.MaskDropout, dict(max_objects=2, image_fill_value=10, mask_fill_value=20)],
[A.NoOp, {}],
[A.RandomResizedCrop, dict(height=20, width=30, scale=(0.5, 0.6), ratio=(0.8, 0.9))],
[A.FancyPCA, dict(alpha=0.3)],
[A.RandomRotate90, {}],
[A.ToGray, {}],
[A.ToSepia, {}],
[A.Transpose, {}],
[A.VerticalFlip, {}],
[A.RingingOvershoot, dict(blur_limit=(7, 15), cutoff=(np.pi / 5, np.pi / 2))],
[A.UnsharpMask, {"blur_limit": 3, "sigma_limit": 0.5, "alpha": 0.2, "threshold": 15}],
[A.AdvancedBlur, dict(blur_limit=(3, 5), rotate_limit=(60, 90))],
[A.PixelDropout, {"dropout_prob": 0.1, "per_channel": True, "drop_value": None}],
[A.PixelDropout, {"dropout_prob": 0.1, "per_channel": False, "drop_value": None, "mask_drop_value": 15}],
]
AUGMENTATION_CLS_EXCEPT = {
A.FDA,
A.HistogramMatching,
A.PixelDistributionAdaptation,
A.Lambda,
A.RandomCropNearBBox,
A.RandomSizedBBoxSafeCrop,
A.GridDropout,
A.GlassBlur,
A.TemplateTransform,
}
@pytest.mark.parametrize(
["augmentation_cls", "params"], check_all_augs_exists(AUGMENTATION_CLS_PARAMS, AUGMENTATION_CLS_EXCEPT)
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_augmentations_serialization_with_custom_parameters(
augmentation_cls, params, p, seed, image, mask, always_apply
):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, mask=mask)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
@pytest.mark.parametrize(
["augmentation_cls", "params"], check_all_augs_exists(AUGMENTATION_CLS_PARAMS, AUGMENTATION_CLS_EXCEPT)
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
@pytest.mark.parametrize("data_format", ("yaml",))
def test_augmentations_serialization_to_file_with_custom_parameters(
augmentation_cls, params, p, seed, image, mask, always_apply, data_format
):
with patch("builtins.open", OpenMock()):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
filepath = "serialized.{}".format(data_format)
A.save(aug, filepath, data_format=data_format)
deserialized_aug = A.load(filepath, data_format=data_format)
set_seed(seed)
aug_data = aug(image=image, mask=mask)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
@pytest.mark.parametrize(
["augmentation_cls", "params"],
get_transforms(
custom_arguments={
A.Crop: {"y_min": 0, "y_max": 10, "x_min": 0, "x_max": 10},
A.CenterCrop: {"height": 10, "width": 10},
A.CropNonEmptyMaskIfExists: {"height": 10, "width": 10},
A.RandomCrop: {"height": 10, "width": 10},
A.RandomResizedCrop: {"height": 10, "width": 10},
A.RandomSizedCrop: {"min_max_height": (4, 8), "height": 10, "width": 10},
A.CropAndPad: {"px": 10},
A.Resize: {"height": 10, "width": 10},
A.RandomSizedBBoxSafeCrop: {"height": 10, "width": 10},
},
except_augmentations={
A.RandomCropNearBBox,
A.FDA,
A.HistogramMatching,
A.PixelDistributionAdaptation,
A.Lambda,
A.CoarseDropout,
A.CropNonEmptyMaskIfExists,
A.ElasticTransform,
A.GridDistortion,
A.RandomGridShuffle,
A.GridDropout,
A.MaskDropout,
A.OpticalDistortion,
A.TemplateTransform,
},
),
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_augmentations_for_bboxes_serialization(
augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply
):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, bboxes=albumentations_bboxes)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
@pytest.mark.parametrize(
["augmentation_cls", "params"],
get_transforms(
custom_arguments={
A.Crop: {"y_min": 0, "y_max": 10, "x_min": 0, "x_max": 10},
A.CenterCrop: {"height": 10, "width": 10},
A.CropNonEmptyMaskIfExists: {"height": 10, "width": 10},
A.RandomCrop: {"height": 10, "width": 10},
A.RandomResizedCrop: {"height": 10, "width": 10},
A.RandomSizedCrop: {"min_max_height": (4, 8), "height": 10, "width": 10},
A.CropAndPad: {"px": 10},
A.Resize: {"height": 10, "width": 10},
},
except_augmentations={
A.RandomCropNearBBox,
A.FDA,
A.HistogramMatching,
A.PixelDistributionAdaptation,
A.Lambda,
A.CoarseDropout,
A.CropNonEmptyMaskIfExists,
A.ElasticTransform,
A.GridDistortion,
A.RandomGridShuffle,
A.GridDropout,
A.MaskDropout,
A.OpticalDistortion,
A.RandomSizedBBoxSafeCrop,
A.TemplateTransform,
},
),
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_augmentations_for_keypoints_serialization(augmentation_cls, params, p, seed, image, keypoints, always_apply):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, keypoints=keypoints)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
@pytest.mark.parametrize(
["augmentation_cls", "params", "call_params"],
[[A.RandomCropNearBBox, {"max_part_shift": 0.15}, {"cropping_bbox": [-59, 77, 177, 231]}]],
)
@pytest.mark.parametrize("p", [0.5, 1])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("always_apply", (False, True))
def test_augmentations_serialization_with_call_params(
augmentation_cls, params, call_params, p, seed, image, always_apply
):
aug = augmentation_cls(p=p, always_apply=always_apply, **params)
annotations = {"image": image, **call_params}
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(**annotations)
set_seed(seed)
deserialized_aug_data = deserialized_aug(**annotations)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
def test_from_float_serialization(float_image):
aug = A.FromFloat(p=1, dtype="uint8")
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
aug_data = aug(image=float_image)
deserialized_aug_data = deserialized_aug(image=float_image)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
@pytest.mark.parametrize("seed", TEST_SEEDS)
def test_transform_pipeline_serialization(seed, image, mask):
aug = A.Compose(
[
A.OneOrOther(
A.Compose(
[
A.Resize(1024, 1024),
A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),
A.OneOf(
[
A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
]
),
]
),
A.Compose(
[
A.Resize(1024, 1024),
A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
]
),
),
A.SomeOf(
[
A.HorizontalFlip(p=1),
A.Transpose(p=1),
A.HueSaturationValue(p=0.5),
A.RandomBrightnessContrast(p=0.5),
],
2,
replace=False,
),
]
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, mask=mask)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
@pytest.mark.parametrize(
["bboxes", "bbox_format", "labels"],
[
([(20, 30, 40, 50)], "coco", [1]),
([(20, 30, 40, 50, 99), (10, 40, 30, 20, 9)], "coco", [1, 2]),
([(20, 30, 60, 80)], "pascal_voc", [2]),
([(20, 30, 60, 80, 99)], "pascal_voc", [1]),
([(0.2, 0.3, 0.4, 0.5)], "yolo", [2]),
([(0.2, 0.3, 0.4, 0.5, 99)], "yolo", [1]),
],
)
@pytest.mark.parametrize("seed", TEST_SEEDS)
def test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.SomeOf(
[
A.HorizontalFlip(p=1),
A.Transpose(p=1),
A.HueSaturationValue(p=0.5),
A.RandomBrightnessContrast(p=0.5),
],
n=5,
),
],
bbox_params={"format": bbox_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, bboxes=bboxes, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
@pytest.mark.parametrize(
["keypoints", "keypoint_format", "labels"],
[
([(20, 30, 40, 50)], "xyas", [1]),
([(20, 30, 40, 50, 99), (10, 40, 30, 20, 9)], "xy", [1, 2]),
([(20, 30, 60, 80)], "yx", [2]),
([(20, 30, 60, 80, 99)], "xys", [1]),
],
)
@pytest.mark.parametrize("seed", TEST_SEEDS)
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
aug = A.Compose(
[
A.OneOrOther(
A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
),
A.SomeOf(
n=2,
transforms=[
A.HorizontalFlip(p=1),
A.Transpose(p=1),
A.HueSaturationValue(p=0.5),
A.RandomBrightnessContrast(p=0.5),
],
replace=False,
),
],
keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, keypoints=keypoints, labels=labels)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
@pytest.mark.parametrize(
["augmentation_cls", "params"],
get_image_only_transforms(
except_augmentations={A.HistogramMatching, A.FDA, A.PixelDistributionAdaptation, A.TemplateTransform},
),
)
@pytest.mark.parametrize("seed", TEST_SEEDS)
def test_additional_targets_for_image_only_serialization(augmentation_cls, params, image, seed):
aug = A.Compose([augmentation_cls(always_apply=True, **params)], additional_targets={"image2": "image"})
image2 = image.copy()
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug)
set_seed(seed)
aug_data = aug(image=image, image2=image2)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, image2=image2)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["image2"], deserialized_aug_data["image2"])
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("p", [1])
def test_lambda_serialization(image, mask, albumentations_bboxes, keypoints, seed, p):
def vflip_image(image, **kwargs):
return F.vflip(image)
def vflip_mask(mask, **kwargs):
return F.vflip(mask)
def vflip_bbox(bbox, **kwargs):
return F.bbox_vflip(bbox, **kwargs)
def vflip_keypoint(keypoint, **kwargs):
return F.keypoint_vflip(keypoint, **kwargs)
aug = A.Lambda(name="vflip", image=vflip_image, mask=vflip_mask, bbox=vflip_bbox, keypoint=vflip_keypoint, p=p)
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug, lambda_transforms={"vflip": aug})
set_seed(seed)
aug_data = aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
def test_serialization_v2_conversion_without_totensor():
current_directory = os.path.dirname(os.path.abspath(__file__))
files_directory = os.path.join(current_directory, "files")
transform_1_1_0 = A.load(os.path.join(files_directory, "transform_v1.1.0_without_totensor.json"))
with open(os.path.join(files_directory, "output_v1.1.0_without_totensor.json")) as f:
output_1_1_0 = json.load(f)
np.random.seed(42)
image = np.random.randint(low=0, high=255, size=(256, 256, 3), dtype=np.uint8)
random.seed(42)
transformed_image = transform_1_1_0(image=image)["image"]
assert transformed_image.tolist() == output_1_1_0
@skipif_no_torch
def test_serialization_v2_conversion_with_totensor():
current_directory = os.path.dirname(os.path.abspath(__file__))
files_directory = os.path.join(current_directory, "files")
transform_1_1_0 = A.load(os.path.join(files_directory, "transform_v1.1.0_with_totensor.json"))
with open(os.path.join(files_directory, "output_v1.1.0_with_totensor.json")) as f:
output_1_1_0 = json.load(f)
np.random.seed(42)
image = np.random.randint(low=0, high=255, size=(256, 256, 3), dtype=np.uint8)
random.seed(42)
transformed_image = transform_1_1_0(image=image)["image"]
assert transformed_image.numpy().tolist() == output_1_1_0
def test_serialization_v2_without_totensor():
current_directory = os.path.dirname(os.path.abspath(__file__))
files_directory = os.path.join(current_directory, "files")
transform = A.load(os.path.join(files_directory, "transform_serialization_v2_without_totensor.json"))
with open(os.path.join(files_directory, "output_v1.1.0_without_totensor.json")) as f:
output_1_1_0 = json.load(f)
np.random.seed(42)
image = np.random.randint(low=0, high=255, size=(256, 256, 3), dtype=np.uint8)
random.seed(42)
transformed_image = transform(image=image)["image"]
assert transformed_image.tolist() == output_1_1_0
@skipif_no_torch
def test_serialization_v2_with_totensor():
current_directory = os.path.dirname(os.path.abspath(__file__))
files_directory = os.path.join(current_directory, "files")
transform = A.load(os.path.join(files_directory, "transform_serialization_v2_with_totensor.json"))
with open(os.path.join(files_directory, "output_v1.1.0_with_totensor.json")) as f:
output_1_1_0 = json.load(f)
np.random.seed(42)
image = np.random.randint(low=0, high=255, size=(256, 256, 3), dtype=np.uint8)
random.seed(42)
transformed_image = transform(image=image)["image"]
assert transformed_image.numpy().tolist() == output_1_1_0
def test_custom_transform_with_overlapping_name():
class HorizontalFlip(ImageOnlyTransform):
pass
assert SERIALIZABLE_REGISTRY["HorizontalFlip"] == A.HorizontalFlip
assert SERIALIZABLE_REGISTRY["tests.test_serialization.HorizontalFlip"] == HorizontalFlip
def test_serialization_v2_to_dict():
transform = A.Compose([A.HorizontalFlip()])
transform_dict = A.to_dict(transform)["transform"]
assert transform_dict == {
"__class_fullname__": "Compose",
"p": 1.0,
"transforms": [{"__class_fullname__": "HorizontalFlip", "always_apply": False, "p": 0.5}],
"bbox_params": None,
"keypoint_params": None,
"additional_targets": {},
}
@pytest.mark.parametrize(
["class_fullname", "expected_short_class_name"],
[
["albumentations.augmentations.transforms.HorizontalFlip", "HorizontalFlip"],
["HorizontalFlip", "HorizontalFlip"],
["some_module.HorizontalFlip", "some_module.HorizontalFlip"],
],
)
def test_shorten_class_name(class_fullname, expected_short_class_name):
assert shorten_class_name(class_fullname) == expected_short_class_name
@pytest.mark.parametrize("seed", TEST_SEEDS)
@pytest.mark.parametrize("p", [1])
def test_template_transform_serialization(image, template, seed, p):
template_transform = A.TemplateTransform(name="template", templates=template, p=p)
aug = A.Compose([A.Flip(), template_transform, A.Blur()])
serialized_aug = A.to_dict(aug)
deserialized_aug = A.from_dict(serialized_aug, lambda_transforms={"template": template_transform})
set_seed(seed)
aug_data = aug(image=image)
set_seed(seed)
deserialized_aug_data = deserialized_aug(image=image)
assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
|
emukit/test_functions/multi_fidelity/hartmann.py | EmuKit/Emukit | 272 | 12738138 | <gh_stars>100-1000
from typing import Tuple
import numpy as np
from ...core import ContinuousParameter, InformationSourceParameter, ParameterSpace
from ...core.loop.user_function import MultiSourceFunctionWrapper
def multi_fidelity_hartmann_3d() -> Tuple[MultiSourceFunctionWrapper, ParameterSpace]:
r"""
The function is given by:
.. math::
f(x, \alpha) = -\sum_{i=1}^{4} \alpha_i \exp \left( -\sum_{j=1}^{3} A_{i,j}\left( x_j - P_{i, j} \right)^2 \right)
where
.. math::
\mathbf{A} = \begin{bmatrix}
3.0 & 10 & 30 \\
0.1 & 10 & 35 \\
3.0 & 10 & 30 \\
0.1 & 10 & 35
\end{bmatrix}
.. math::
\mathbf{P} = 10^{-4} \begin{bmatrix}
3689 & 1170 & 2673 \\
4699 & 4387 & 7470 \\
1091 & 8732 & 5547 \\
381 & 5743 & 8828
\end{bmatrix}
The high fidelity function is given by setting:
.. math::
\alpha = (1.0, 1.2, 3.0, 3.2)^T
The middle fidelity is given by setting:
.. math::
\alpha = (1.01, 1.19, 2.9, 3.3)^T
The low fidelity is given by setting:
.. math::
\alpha = (1.02, 1.18, 2.8, 3.4)^T
The domain is given by:
.. math::
\mathbf{x}_i \in (0, 1)
Reference: https://www.sfu.ca/~ssurjano/hart3.html
:return: Tuple of MultiSourceFunctionWrapper and ParameterSpace
"""
A = np.array([[3, 10, 30], [0.1, 10, 35], [3, 10, 30], [0.1, 10, 35]])
P = 1e-4 * np.array([[3689, 1170, 2673], [4699, 4387, 7470], [1091, 8732, 5547], [381, 5743, 8828]])
alpha = np.array([1.0, 1.2, 3.0, 3.2])
delta = np.array([0.01, -0.01, -0.1, 0.1])
def high(x):
res = 0
for i in range(4):
temp = 0
for j in range(3):
temp -= A[i][j] * np.power(x[:, j] - P[i][j], 2)
res += alpha[i] * np.exp(temp)
return res[:, None]
def medium(x):
alpha_m = alpha + delta
res = 0
for i in range(4):
temp = 0
for j in range(3):
temp -= A[i][j] * np.power(x[:, j] - P[i][j], 2)
res += alpha_m[i] * np.exp(temp)
return res[:, None]
def low(x):
alpha_l = alpha + 2 * delta
res = 0
for i in range(4):
temp = 0
for j in range(3):
temp -= A[i][j] * np.power(x[:, j] - P[i][j], 2)
res += alpha_l[i] * np.exp(temp)
return res[:, None]
space = ParameterSpace(
[
ContinuousParameter("x1", 0.0, 1.0),
ContinuousParameter("x2", 0.0, 1.0),
ContinuousParameter("x3", 0.0, 1.0),
InformationSourceParameter(3),
]
)
fcn_wrapper = MultiSourceFunctionWrapper([low, medium, high])
return fcn_wrapper, space
|
vnpy/api/nst/__init__.py | funrunskypalace/vnpy | 19,529 | 12738150 | from .vnnsttd import TdApi
from .nst_constant import *
|
examples/MktdataPublisher.py | msitt/blpapi-python | 228 | 12738178 | # MktdataPublisher.py
from __future__ import print_function
from __future__ import absolute_import
import time
from optparse import OptionParser, OptionValueError
import datetime
import threading
import os
import platform as plat
import sys
if sys.version_info >= (3, 8) and plat.system().lower() == "windows":
# pylint: disable=no-member
with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')):
import blpapi
else:
import blpapi
PERMISSION_REQUEST = blpapi.Name("PermissionRequest")
RESOLUTION_SUCCESS = blpapi.Name("ResolutionSuccess")
SESSION_TERMINATED = blpapi.Name("SessionTerminated")
TOPICS = blpapi.Name("topics")
TOPIC_CREATED = blpapi.Name("TopicCreated")
TOPIC_SUBSCRIBED = blpapi.Name("TopicSubscribed")
TOPIC_UNSUBSCRIBED = blpapi.Name("TopicUnsubscribed")
TOPIC_RECAP = blpapi.Name("TopicRecap")
class MyStream(object):
def __init__(self, sid="", fields=None):
self.id = sid
self.fields = fields if fields else []
self.lastValue = 0
self.topic = blpapi.Topic()
self.isSubscribed = False
def fillData(self, eventFormatter, elementDef):
for i, f in enumerate(self.fields):
if not elementDef.typeDefinition().hasElementDefinition(f):
print("Invalid field '%s'" % f)
continue
fieldDef = elementDef.typeDefinition().getElementDefinition(f)
fieldType = fieldDef.typeDefinition().datatype()
value = None
if fieldType == blpapi.DataType.BOOL:
value = bool((self.lastValue + i) % 2 == 0)
elif fieldType == blpapi.DataType.CHAR:
value = chr((self.lastValue + i) % 100 + 32)
elif fieldType == blpapi.DataType.INT32 or \
fieldType == blpapi.DataType.INT64:
value = self.lastValue + i
elif fieldType == blpapi.DataType.FLOAT32 or \
fieldType == blpapi.DataType.FLOAT64:
value = (self.lastValue + i) * 1.1
elif fieldType == blpapi.DataType.STRING:
value = "S%d" % (self.lastValue + i)
elif fieldType == blpapi.DataType.DATE or \
fieldType == blpapi.DataType.TIME or \
fieldType == blpapi.DataType.DATETIME:
value = datetime.datetime.today()
value.replace(day=(self.lastValue / 100) % 28 + 1)
value.replace(microsecond=i * 1000)
eventFormatter.setElement(f, value)
def fillDataNull(self, eventFormatter, elementDef):
for f in self.fields:
if not elementDef.typeDefinition().hasElementDefinition(f):
print("Invalid field '%s'" % f)
continue
fieldDef = elementDef.typeDefinition().getElementDefinition(f)
if fieldDef.typeDefinition().isSimpleType():
# Publishing NULL value
eventFormatter.setElementNull(f)
def next(self):
self.lastValue += 1
def isAvailable(self):
return self.topic.isValid() and self.isSubscribed
class MyEventHandler(object):
def __init__(self,
serviceName,
messageType,
fields,
eids,
resolveSubServiceCode,
mutex,
stop,
condition):
self.serviceName = serviceName
self.messageType = messageType
self.fields = fields
self.eids = eids
self.resolveSubServiceCode = resolveSubServiceCode
self.mutex = mutex
self.stop = stop
self.condition = condition
self.streams = dict()
self.availableTopicCount = 0
def processEvent(self, event, session):
if event.eventType() == blpapi.Event.SESSION_STATUS:
for msg in event:
print(msg)
if msg.messageType() == SESSION_TERMINATED:
self.stop.set()
elif event.eventType() == blpapi.Event.TOPIC_STATUS:
topicList = blpapi.TopicList()
for msg in event:
print(msg)
if msg.messageType() == TOPIC_SUBSCRIBED:
topicStr = msg.getElementAsString("topic")
with self.mutex:
if topicStr not in self.streams:
# TopicList knows how to add an entry based on a
# TOPIC_SUBSCRIBED message.
topicList.add(msg)
self.streams[topicStr] = MyStream(topicStr,
self.fields)
stream = self.streams[topicStr]
stream.isSubscribed = True
if stream.isAvailable():
self.availableTopicCount += 1
self.condition.notifyAll()
elif msg.messageType() == TOPIC_UNSUBSCRIBED:
topicStr = msg.getElementAsString("topic")
with self.mutex:
if topicStr not in self.streams:
# We should never be coming here.
# TOPIC_UNSUBSCRIBED can not come before
# a TOPIC_SUBSCRIBED or TOPIC_CREATED
continue
stream = self.streams[topicStr]
if stream.isAvailable():
self.availableTopicCount -= 1
self.condition.notifyAll()
stream.isSubscribed = False
elif msg.messageType() == TOPIC_CREATED:
topicStr = msg.getElementAsString("topic")
with self.mutex:
if topicStr not in self.streams:
self.streams[topicStr] = MyStream(topicStr,
self.fields)
stream = self.streams[topicStr]
try:
stream.topic = session.getTopic(msg)
except blpapi.Exception as e:
print("Exception while processing " \
"TOPIC_CREATED: %s" % e)
continue
if stream.isAvailable():
self.availableTopicCount += 1
self.condition.notifyAll()
elif msg.messageType() == TOPIC_RECAP:
# Here we send a recap in response to a Recap request.
try:
topicStr = msg.getElementAsString("topic")
recapEvent = None
with self.mutex:
if topicStr not in self.streams:
continue
stream = self.streams[topicStr]
if not stream.isAvailable():
continue
topic = session.getTopic(msg)
service = topic.service()
recapCid = msg.correlationIds()[0]
recapEvent = service.createPublishEvent()
elementDef = \
service.getEventDefinition(self.messageType)
eventFormatter = blpapi.EventFormatter(recapEvent)
eventFormatter.appendRecapMessage(topic, recapCid)
stream.fillData(eventFormatter, elementDef)
session.publish(recapEvent)
except blpapi.Exception as e:
print("Exception while processing TOPIC_RECAP: %s" % e)
continue
if topicList.size() > 0:
# createTopicsAsync will result in RESOLUTION_STATUS,
# TOPIC_CREATED events.
session.createTopicsAsync(topicList)
elif event.eventType() == blpapi.Event.RESOLUTION_STATUS:
for msg in event:
print(msg)
elif event.eventType() == blpapi.Event.REQUEST:
service = session.getService(self.serviceName)
for msg in event:
print(msg)
if msg.messageType() == PERMISSION_REQUEST:
# Similar to createPublishEvent. We assume just one
# service - self.serviceName. A responseEvent can only be
# for single request so we can specify the correlationId -
# which establishes context - when we create the Event.
response = \
service.createResponseEvent(msg.correlationIds()[0])
permission = 1 # ALLOWED: 0, DENIED: 1
ef = blpapi.EventFormatter(response)
if msg.hasElement("uuid"):
msg.getElementAsInteger("uuid")
permission = 0
if msg.hasElement("applicationId"):
msg.getElementAsInteger("applicationId")
permission = 0
# In appendResponse the string is the name of the
# operation, the correlationId indicates which request we
# are responding to.
ef.appendResponse("PermissionResponse")
ef.pushElement("topicPermissions")
# For each of the topics in the request, add an entry to
# the response.
topicsElement = msg.getElement(TOPICS).values()
for topic in topicsElement:
ef.appendElement()
ef.setElement("topic", topic)
if self.resolveSubServiceCode:
try:
ef.setElement("subServiceCode",
self.resolveSubServiceCode)
print(("Mapping topic %s to subServiceCode %s" %
(topic, self.resolveSubServiceCode)))
except blpapi.Exception:
print("subServiceCode could not be set."
" Resolving without subServiceCode")
ef.setElement("result", permission)
if permission == 1: # DENIED
ef.pushElement("reason")
ef.setElement("source", "My Publisher Name")
ef.setElement("category", "NOT_AUTHORIZED")
ef.setElement("subcategory",
"Publisher Controlled")
ef.setElement(
"description",
"Permission denied by My Publisher Name")
ef.popElement()
elif self.eids:
ef.pushElement("permissions")
ef.appendElement()
ef.setElement("permissionService", "//blp/blpperm")
ef.pushElement("eids")
for e in self.eids:
ef.appendValue(e)
ef.popElement()
ef.popElement()
ef.popElement()
ef.popElement()
ef.popElement()
# Service is implicit in the Event. sendResponse has a
# second parameter - partialResponse - that defaults to
# false.
session.sendResponse(response)
else:
for msg in event:
print(msg)
return True
def authOptionCallback(_option, _opt, value, parser):
"""Parse authorization options from user input"""
vals = value.split('=', 1)
if value == "user":
authUser = blpapi.AuthUser.createWithLogonName()
authOptions = blpapi.AuthOptions.createWithUser(authUser)
elif value == "none":
authOptions = None
elif vals[0] == "app" and len(vals) == 2:
appName = vals[1]
authOptions = blpapi.AuthOptions.createWithApp(appName)
elif vals[0] == "userapp" and len(vals) == 2:
appName = vals[1]
authUser = blpapi.AuthUser.createWithLogonName()
authOptions = blpapi.AuthOptions\
.createWithUserAndApp(authUser, appName)
elif vals[0] == "dir" and len(vals) == 2:
activeDirectoryProperty = vals[1]
authUser = blpapi.AuthUser\
.createWithActiveDirectoryProperty(activeDirectoryProperty)
authOptions = blpapi.AuthOptions.createWithUser(authUser)
elif vals[0] == "manual":
parts = []
if len(vals) == 2:
parts = vals[1].split(',')
if len(parts) != 3:
raise OptionValueError("Invalid auth option {}".format(value))
appName, ip, userId = parts
authUser = blpapi.AuthUser.createWithManualOptions(userId, ip)
authOptions = blpapi.AuthOptions.createWithUserAndApp(authUser, appName)
else:
raise OptionValueError("Invalid auth option '{}'".format(value))
parser.values.auth = {'option' : authOptions}
def parseCmdLine():
parser = OptionParser(description="Publish market data.")
parser.add_option("-a",
"--ip",
dest="hosts",
help="server name or IP (default: localhost)",
metavar="ipAddress",
action="append",
default=[])
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
parser.add_option("-s",
dest="service",
help="service name (default: %default)",
metavar="service",
default="//viper/mktdata")
parser.add_option("-f",
dest="fields",
help="field to subscribe to (default: LAST_PRICE)",
metavar="field",
action="append",
default=[])
parser.add_option("-m",
dest="messageType",
help="type of published event (default: %default)",
metavar="messageType",
default="MarketDataEvents")
parser.add_option("-e",
dest="eids",
help="permission eid for all subscriptions",
metavar="EID",
action="append",
default=[])
parser.add_option("-g",
dest="groupId",
help="publisher groupId (defaults to unique value)",
metavar="groupId")
parser.add_option("-r",
"--pri",
type="int",
dest="priority",
help="set publisher priority level (default: %default)",
metavar="priority",
default=10)
parser.add_option("-c",
type="int",
dest="clearInterval",
help="number of events after which cache will be "
"cleared (default: 0 i.e cache never cleared)",
metavar="clearInterval",
default=0)
parser.add_option("--auth",
dest="auth",
help="authentication option: "
"user|none|app=<app>|userapp=<app>|dir=<property>"
"|manual=<app,ip,user>"
" (default: user)\n"
"'none' is applicable to Desktop API product "
"that requires Bloomberg Professional service "
"to be installed locally.",
metavar="option",
action="callback",
callback=authOptionCallback,
type="string",
default={"option" :
blpapi.AuthOptions.createWithUser(
blpapi.AuthUser.createWithLogonName())})
parser.add_option("--ssc",
dest="ssc",
help="active sub-service code option: "
"<begin>,<end>,<priority>",
metavar="ssc",
default="")
parser.add_option("--rssc",
dest="rssc",
help="sub-service code to be used in resolves",
metavar="rssc",
default="")
(options, _) = parser.parse_args()
if not options.hosts:
options.hosts = ["localhost"]
if not options.fields:
options.fields = ["LAST_PRICE"]
return options
def activate(options, session):
if options.ssc:
sscBegin, sscEnd, sscPriority = map(int, options.ssc.split(","))
print(("Activating sub service code range [%s, %s] @ %s" %
(sscBegin, sscEnd, sscPriority)))
session.activateSubServiceCodeRange(options.service,
sscBegin,
sscEnd,
sscPriority)
def deactivate(options, session):
if options.ssc:
sscBegin, sscEnd, sscPriority = map(int, options.ssc.split(","))
print(("DeActivating sub service code range [%s, %s] @ %s" %
(sscBegin, sscEnd, sscPriority)))
session.deactivateSubServiceCodeRange(options.service,
sscBegin,
sscEnd)
def main():
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
for idx, host in enumerate(options.hosts):
sessionOptions.setServerAddress(host, options.port, idx)
sessionOptions.setSessionIdentityOptions(options.auth['option'])
sessionOptions.setAutoRestartOnDisconnection(True)
# NOTE: If running without a backup server, make many attempts to
# connect/reconnect to give that host a chance to come back up (the
# larger the number, the longer it will take for SessionStartupFailure
# to come on startup, or SessionTerminated due to inability to fail
# over). We don't have to do that in a redundant configuration - it's
# expected at least one server is up and reachable at any given time,
# so only try to connect to each server once.
sessionOptions.setNumStartAttempts(1 if len(options.hosts) > 1 else 1000)
print("Connecting to port %d on %s" % (
options.port, " ".join(options.hosts)))
PUBLISH_MESSAGE_TYPE = blpapi.Name(options.messageType)
mutex = threading.Lock()
stop = threading.Event()
condition = threading.Condition(mutex)
myEventHandler = MyEventHandler(options.service,
PUBLISH_MESSAGE_TYPE,
options.fields,
options.eids,
options.rssc,
mutex,
stop,
condition)
# Create a Session
session = blpapi.ProviderSession(sessionOptions,
myEventHandler.processEvent)
# Start a Session
if not session.start():
print("Failed to start session.")
return
serviceOptions = blpapi.ServiceRegistrationOptions()
if options.groupId is not None:
serviceOptions.setGroupId(options.groupId)
serviceOptions.setServicePriority(options.priority)
if options.ssc:
sscBegin, sscEnd, sscPriority = map(int, options.ssc.split(","))
print(("Adding active sub service code range [%s, %s] @ %s" %
(sscBegin, sscEnd, sscPriority)))
try:
serviceOptions.addActiveSubServiceCodeRange(sscBegin,
sscEnd,
sscPriority)
except blpapi.Exception as e:
print(("FAILED to add active sub service codes."
" Exception %s" % e.description()))
try:
if not session.registerService(options.service,
session.getAuthorizedIdentity(),
serviceOptions):
print("Failed to register '%s'" % options.service)
return
service = session.getService(options.service)
elementDef = service.getEventDefinition(PUBLISH_MESSAGE_TYPE)
eventCount = 0
numPublished = 0
while not stop.is_set():
event = service.createPublishEvent()
with condition:
while myEventHandler.availableTopicCount == 0:
# Set timeout to 1 - give a chance for CTRL-C
condition.wait(1)
if stop.is_set():
return
publishNull = False
if (options.clearInterval > 0 and
eventCount == options.clearInterval):
eventCount = 0
publishNull = True
eventFormatter = blpapi.EventFormatter(event)
for _,stream in myEventHandler.streams.items():
if not stream.isAvailable():
continue
eventFormatter.appendMessage(PUBLISH_MESSAGE_TYPE,
stream.topic)
if publishNull:
stream.fillDataNull(eventFormatter, elementDef)
else:
eventCount += 1
stream.next()
stream.fillData(eventFormatter, elementDef)
for msg in event:
print(msg)
session.publish(event)
time.sleep(1)
numPublished += 1
if numPublished % 10 == 0:
deactivate(options, session)
time.sleep(30)
activate(options, session)
finally:
# Stop the session
session.stop()
if __name__ == "__main__":
print("MktdataPublisher")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
|
tests/test_packages/test_protocols/test_http.py | bryanchriswhite/agents-aea | 126 | 12738185 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the http protocol package."""
import sys
from typing import Type
from unittest import mock
import pytest
from aea.common import Address
from aea.exceptions import AEAEnforceError
from aea.mail.base import Envelope
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
from aea.protocols.dialogue.base import DialogueLabel
import packages
from packages.fetchai.protocols.http.dialogues import HttpDialogue, HttpDialogues
from packages.fetchai.protocols.http.message import HttpMessage
from packages.fetchai.protocols.http.message import (
_default_logger as http_message_logger,
)
from tests.conftest import ROOT_DIR
sys.path.append(ROOT_DIR)
def test_request_serialization():
"""Test the serialization for 'request' speech-act works."""
msg = HttpMessage(
performative=HttpMessage.Performative.REQUEST,
method="some_method",
url="url",
version="some_version",
headers="some_headers",
body=b"some_body",
)
msg.to = "receiver"
envelope = Envelope(to=msg.to, sender="sender", message=msg,)
envelope_bytes = envelope.encode()
actual_envelope = Envelope.decode(envelope_bytes)
expected_envelope = envelope
assert expected_envelope.to == actual_envelope.to
assert expected_envelope.sender == actual_envelope.sender
assert (
expected_envelope.protocol_specification_id
== actual_envelope.protocol_specification_id
)
assert expected_envelope.message != actual_envelope.message
actual_msg = HttpMessage.serializer.decode(actual_envelope.message)
actual_msg.to = actual_envelope.to
actual_msg.sender = actual_envelope.sender
expected_msg = msg
assert expected_msg == actual_msg
def test_response_serialization():
"""Test the serialization for 'response' speech-act works."""
msg = HttpMessage(
message_id=2,
target=1,
performative=HttpMessage.Performative.RESPONSE,
version="some_version",
status_code=1,
status_text="some_status_text",
headers="some_headers",
body=b"some_body",
)
msg.to = "receiver"
envelope = Envelope(to=msg.to, sender="sender", message=msg,)
envelope_bytes = envelope.encode()
actual_envelope = Envelope.decode(envelope_bytes)
expected_envelope = envelope
assert expected_envelope.to == actual_envelope.to
assert expected_envelope.sender == actual_envelope.sender
assert (
expected_envelope.protocol_specification_id
== actual_envelope.protocol_specification_id
)
assert expected_envelope.message != actual_envelope.message
actual_msg = HttpMessage.serializer.decode(actual_envelope.message)
actual_msg.to = actual_envelope.to
actual_msg.sender = actual_envelope.sender
expected_msg = msg
assert expected_msg == actual_msg
def test_performative_string_value():
"""Test the string value of the performatives."""
assert (
str(HttpMessage.Performative.REQUEST) == "request"
), "The str value must be request"
assert (
str(HttpMessage.Performative.RESPONSE) == "response"
), "The str value must be response"
def test_encoding_unknown_performative():
"""Test that we raise an exception when the performative is unknown during encoding."""
msg = HttpMessage(
performative=HttpMessage.Performative.REQUEST,
method="some_method",
url="url",
version="some_version",
headers="some_headers",
body=b"some_body",
)
with pytest.raises(ValueError, match="Performative not valid:"):
with mock.patch.object(HttpMessage.Performative, "__eq__", return_value=False):
HttpMessage.serializer.encode(msg)
def test_decoding_unknown_performative():
"""Test that we raise an exception when the performative is unknown during decoding."""
msg = HttpMessage(
performative=HttpMessage.Performative.REQUEST,
method="some_method",
url="url",
version="some_version",
headers="some_headers",
body=b"some_body",
)
encoded_msg = HttpMessage.serializer.encode(msg)
with pytest.raises(ValueError, match="Performative not valid:"):
with mock.patch.object(HttpMessage.Performative, "__eq__", return_value=False):
HttpMessage.serializer.decode(encoded_msg)
@mock.patch.object(
packages.fetchai.protocols.http.message,
"enforce",
side_effect=AEAEnforceError("some error"),
)
def test_incorrect_message(mocked_enforce):
"""Test that we raise an exception when the message is incorrect."""
with mock.patch.object(http_message_logger, "error") as mock_logger:
HttpMessage(
performative=HttpMessage.Performative.REQUEST,
method="some_method",
url="url",
version="some_version",
headers="some_headers",
body=b"some_body",
)
mock_logger.assert_any_call("some error")
class TestDialogues:
"""Tests http dialogues."""
@classmethod
def setup_class(cls):
"""Set up the test."""
cls.agent_addr = "agent address"
cls.server_addr = "server address"
cls.agent_dialogues = AgentDialogues(cls.agent_addr)
cls.server_dialogues = ServerDialogues(cls.server_addr)
def test_create_self_initiated(self):
"""Test the self initialisation of a dialogue."""
result = self.agent_dialogues._create_self_initiated(
dialogue_opponent_addr=self.server_addr,
dialogue_reference=(str(0), ""),
role=HttpDialogue.Role.CLIENT,
)
assert isinstance(result, HttpDialogue)
assert result.role == HttpDialogue.Role.CLIENT, "The role must be client."
def test_create_opponent_initiated(self):
"""Test the opponent initialisation of a dialogue."""
result = self.agent_dialogues._create_opponent_initiated(
dialogue_opponent_addr=self.server_addr,
dialogue_reference=(str(0), ""),
role=HttpDialogue.Role.CLIENT,
)
assert isinstance(result, HttpDialogue)
assert result.role == HttpDialogue.Role.CLIENT, "The role must be client."
class AgentDialogue(HttpDialogue):
"""The dialogue class maintains state of a dialogue and manages it."""
def __init__(
self,
dialogue_label: DialogueLabel,
self_address: Address,
role: BaseDialogue.Role,
message_class: Type[HttpMessage],
) -> None:
"""
Initialize a dialogue.
:param dialogue_label: the identifier of the dialogue
:param self_address: the address of the entity for whom this dialogue is maintained
:param role: the role of the agent this dialogue is maintained for
:return: None
"""
HttpDialogue.__init__(
self,
dialogue_label=dialogue_label,
self_address=self_address,
role=role,
message_class=message_class,
)
class AgentDialogues(HttpDialogues):
"""The dialogues class keeps track of all dialogues."""
def __init__(self, self_address: Address) -> None:
"""
Initialize dialogues.
:return: None
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return HttpDialogue.Role.CLIENT
HttpDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
dialogue_class=AgentDialogue,
)
class ServerDialogue(HttpDialogue):
"""The dialogue class maintains state of a dialogue and manages it."""
def __init__(
self,
dialogue_label: DialogueLabel,
self_address: Address,
role: BaseDialogue.Role,
message_class: Type[HttpMessage],
) -> None:
"""
Initialize a dialogue.
:param dialogue_label: the identifier of the dialogue
:param self_address: the address of the entity for whom this dialogue is maintained
:param role: the role of the agent this dialogue is maintained for
:return: None
"""
HttpDialogue.__init__(
self,
dialogue_label=dialogue_label,
self_address=self_address,
role=role,
message_class=message_class,
)
class ServerDialogues(HttpDialogues):
"""The dialogues class keeps track of all dialogues."""
def __init__(self, self_address: Address) -> None:
"""
Initialize dialogues.
:return: None
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return HttpDialogue.Role.SERVER
HttpDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
dialogue_class=ServerDialogue,
)
|
memory_analyzer/tests/test_analysis_template.py | clearpathrobotics/memory-analyzer | 129 | 12738199 | <filename>memory_analyzer/tests/test_analysis_template.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import sys
import tempfile
from unittest import TestCase, mock
import objgraph
from jinja2 import Environment, FileSystemLoader
from pympler import muppy, summary # noqa
from .. import analysis_utils
class ObjGraphTemplateTests(TestCase):
template_name = "analysis.py.template"
filename = "some_filename"
templates_path = f"{os.path.abspath(os.path.dirname(__file__))}/../templates/"
pid = 1234
specific_refs = ["str", "int"]
def setUp(self):
self.items = [
["builtins.test1", 1, 10000],
["__main__.test2", 3, 5],
["ast._things.test3", 10, 1024],
]
# TODO: Add tests for summary obj and the _repr
mock_summ = mock.patch.object(summary, "summarize", return_value=self.items)
mock_summ.start()
self.addCleanup(mock_summ.stop)
def tearDown(self):
if os.path.isfile(f"{self.templates_path}rendered_template.py.out"):
os.remove(f"{self.templates_path}rendered_template.py.out")
def test_with_no_references(self):
template = analysis_utils.render_template(
self.template_name,
self.templates_path,
0,
self.pid,
[],
self.filename,
None,
)
with mock.patch("builtins.open", mock.mock_open(), create=True) as mock_fifo:
exec(template)
mock_fifo.assert_called_with(f"/tmp/memanz_pipe_{self.pid}", "wb")
output_bytes = pickle.dumps(self.items)
mock_fifo().write.assert_called_with(output_bytes)
@mock.patch.object(objgraph, "show_backrefs")
@mock.patch.object(objgraph, "show_refs")
def test_with_num_references(self, mock_refs, mock_back_refs):
dirname = "/tmp/tests/"
template = analysis_utils.render_template(
self.template_name,
self.templates_path,
1,
self.pid,
[],
f"{dirname}{self.filename}",
None,
)
with mock.patch("builtins.open", mock.mock_open(), create=True) as mock_fifo:
exec(template, {})
handler = mock_fifo()
output_bytes = pickle.dumps(self.items)
handler.write.assert_called_with(output_bytes)
self.assertEqual(
self.items,
[
[
"builtins.test1",
1,
10000,
f"{dirname}ref_1234_test1.png",
f"{dirname}backref_1234_test1.png",
],
["ast._things.test3", 10, 1024],
["__main__.test2", 3, 5],
],
)
@mock.patch.object(objgraph, "show_backrefs")
@mock.patch.object(objgraph, "show_refs")
def test_with_specific_references(self, mock_refs, mock_back_refs):
dirname = "/tmp/tests/"
with tempfile.TemporaryDirectory() as d:
template = analysis_utils.render_template(
self.template_name,
self.templates_path,
0,
self.pid,
["test3"],
f"{dirname}{self.filename}",
d,
)
self.assertEqual(1, len(os.listdir(d)), os.listdir(d))
with mock.patch("builtins.open", mock.mock_open(), create=True) as mock_fifo:
exec(template, {})
handler = mock_fifo()
output_bytes = pickle.dumps(self.items)
handler.write.assert_called_with(output_bytes)
self.assertEqual(
self.items,
[
["builtins.test1", 1, 10000],
["__main__.test2", 3, 5],
[
"ast._things.test3",
10,
1024,
f"{dirname}ref_1234_test3.png",
f"{dirname}backref_1234_test3.png",
],
],
)
|
changedetectionio/notification.py | jeremysherriff/changedetection.io | 3,933 | 12738208 | import os
import apprise
valid_tokens = {
'base_url': '',
'watch_url': '',
'watch_uuid': '',
'watch_title': '',
'watch_tag': '',
'diff_url': '',
'preview_url': '',
'current_snapshot': ''
}
def process_notification(n_object, datastore):
import logging
log = logging.getLogger('apprise')
log.setLevel('TRACE')
apobj = apprise.Apprise(debug=True)
for url in n_object['notification_urls']:
url = url.strip()
print (">> Process Notification: AppRise notifying {}".format(url))
apobj.add(url)
# Get the notification body from datastore
n_body = n_object['notification_body']
n_title = n_object['notification_title']
# Insert variables into the notification content
notification_parameters = create_notification_parameters(n_object, datastore)
for n_k in notification_parameters:
token = '{' + n_k + '}'
val = notification_parameters[n_k]
n_title = n_title.replace(token, val)
n_body = n_body.replace(token, val)
apobj.notify(
body=n_body,
title=n_title
)
# Notification title + body content parameters get created here.
def create_notification_parameters(n_object, datastore):
from copy import deepcopy
# in the case we send a test notification from the main settings, there is no UUID.
uuid = n_object['uuid'] if 'uuid' in n_object else ''
if uuid != '':
watch_title = datastore.data['watching'][uuid]['title']
watch_tag = datastore.data['watching'][uuid]['tag']
else:
watch_title = 'Change Detection'
watch_tag = ''
# Create URLs to customise the notification with
base_url = datastore.data['settings']['application']['base_url']
watch_url = n_object['watch_url']
# Re #148 - Some people have just {base_url} in the body or title, but this may break some notification services
# like 'Join', so it's always best to atleast set something obvious so that they are not broken.
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_url = "{}/diff/{}".format(base_url, uuid)
preview_url = "{}/preview/{}".format(base_url, uuid)
# Not sure deepcopy is needed here, but why not
tokens = deepcopy(valid_tokens)
# Valid_tokens also used as a field validator
tokens.update(
{
'base_url': base_url if base_url is not None else '',
'watch_url': watch_url,
'watch_uuid': uuid,
'watch_title': watch_title if watch_title is not None else '',
'watch_tag': watch_tag if watch_tag is not None else '',
'diff_url': diff_url,
'preview_url': preview_url,
'current_snapshot': n_object['current_snapshot'] if 'current_snapshot' in n_object else ''
})
return tokens |
python/tvm/topi/nn/conv1d.py | XiaoSong9905/tvm | 4,640 | 12738217 | <reponame>XiaoSong9905/tvm<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""1D convolution operators."""
from .conv2d import conv
def conv1d(data, kernel, strides=1, padding="VALID", dilation=1, layout="NCW", out_dtype=None):
"""1D convolution forward operator.
Parameters
----------
data : tvm.te.Tensor
3-D input shape [batch, in_channel, in_width] for layout == 'NCW'
and [batch, in_width, in_channel] for layout == 'NWC'
kernel : tvm.te.Tensor
3-D kernel with shape [num_filter, in_channel, filter_size] for layout == 'NCW'
and [filter_size, in_channel, num_filter] for layout == 'NWC'
strides : int or tuple
The spatial stride along width
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation : int or tuple
Dilation rate if convolution should be dilated.
layout : str
How input data is laid out, must be one of ['NCW', 'NWC']
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, 1, layout, out_dtype)
def conv1d_nwc(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None):
"""1D convolution in NWC layout. See :py:func:`conv` for details on parameters"""
return conv(data, kernel, strides, padding, dilation, 1, "NWC", out_dtype=out_dtype)
def conv1d_ncw(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None):
"""1D convolution in NCW layout. See :py:func:`conv` for details on parameters"""
return conv(data, kernel, strides, padding, dilation, 1, "NCW", out_dtype=out_dtype)
def group_conv1d_nwc(
data, kernel, strides=1, padding="VALID", dilation=1, groups=1, out_dtype=None
):
"""1D convolution forward operator for NWC layout.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_width, in_channel]
kernel : tvm.te.Tensor
3-D with shape [filter_size, in_channel, num_filter]
strides : int or tuple
The spatial stride along width
padding : int, tuple, or str
Padding size can be an integer for equal padding,
a tuple of (left, right) or a string in ['VALID', 'SAME'].
dilation : int or tuple
Dilation rate if convolution should be dilated.
groups : int
Number of groups
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, groups, "NWC", out_dtype=out_dtype)
def group_conv1d_ncw(
data, kernel, strides=1, padding="VALID", dilation=1, groups=1, out_dtype=None
):
"""1D convolution forward operator for NCW layout.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_channel, in_width]
kernel : tvm.te.Tensor
3-D with shape [num_filter, in_channel, filter_size]
strides : int or tuple
The spatial stride along width
padding : int, tuple, or str
Padding size can be an integer for equal padding,
a tuple of (left, right) or a string in ['VALID', 'SAME'].
dilation : int or tuple
Dilation rate if convolution should be dilated.
groups : int
Number of groups
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, groups, "NCW", out_dtype=out_dtype)
|
tests/test_samples.py | j0ono0/pinout-diagram | 304 | 12738222 | ##########################################################
#
# pinout tests
#
# Use a user-defined temporary directory if
# you have problems with multiple harddrives (like I do):
#
# >>> pytest --basetemp=temp
#
##########################################################
import filecmp
import pytest
import re
import shutil
import uuid
from pathlib import Path
from importlib import reload
from pinout import manager
from pinout import config
def re_sub_ids(re_m):
id = re_m.group(0).split("_")
id = "unique_id_replaced-for-testing_" + id[-1]
return id
def mk_test_file(src, dest):
shutil.copyfile(src, dest)
with src.open() as f:
data = f.read()
# sub ids
id = re.compile(r"(?<=id=\").+(?=\")")
data = re.sub(id, re_sub_ids, data)
# sub hrefs
id = re.compile(r"(?<=href=\"#).+(?=\")")
data = re.sub(id, re_sub_ids, data)
# sub clip-path urls
id = re.compile(r"(?<=clip-path=\"url\(#).+(?=\")")
data = re.sub(id, re_sub_ids, data)
# write modified file data to testfile
dest.write_text(data)
return dest
@pytest.mark.parametrize(
"module_path, ref_path",
[
(
"../samples/arduino/arduino/uno/arduino_uno.py",
"../samples/arduino/pinout_arduino_uno_rev3.svg",
),
(
"../samples/arduino/arduino/rp2040/arduino_nano_rp2040_connect.py",
"../samples/arduino/pinout_arduino_nano_rp2040_connect.svg",
),
(
"../samples/attiny85/attiny85.py",
"../samples/attiny85/pinout_attiny85.svg",
),
(
"../samples/clip_path/pinout_diagram.py",
"../samples/clip_path/diagram.svg",
),
(
"../samples/full_sample/pinout_diagram.py",
"../samples/full_sample/pinout_diagram.svg",
),
(
"../samples/panel_layout/panel_layout.py",
"../samples/panel_layout/output/panel_layout.svg",
),
(
"../samples/panel_layout/populated_layout.py",
"../samples/panel_layout/output/populated_layout.svg",
),
(
"../samples/pci-express/pinout_x1.py",
"../samples/pci-express/pinout_x1.svg",
),
(
"../samples/section_pullout/pinout_diagram.py",
"../samples/section_pullout/diagram.svg",
),
(
"../samples/teensy_4.0/pinout_diagram.py",
"../samples/teensy_4.0/teensy_4.0_front_pinout_diagram.svg",
),
],
)
def test_output_against_reference(tmp_path, module_path, ref_path):
# Config requires reloading between tests to to ensure
# is in default state.
reload(config)
module_path = Path(module_path)
ref_path = Path(ref_path)
# Export a temp file in same location as reference:
# Required for relative links to be identical.
tempsvg = ref_path.parent / f"temp_pytest_{str(uuid.uuid4())}.svg"
manager.export_diagram(
module_path,
tempsvg,
overwrite=True,
)
# Create files for comparison. Unique ids are converted to match
file1 = mk_test_file(tempsvg, tmp_path / f"test_file.svg")
file2 = mk_test_file(ref_path, tmp_path / f"ref_file.svg")
# Remove temp file
tempsvg.unlink()
# Test files are identical
assert filecmp.cmp(file1, file2, shallow=False)
|
tools/mo/openvino/tools/mo/ops/Exit.py | ryanloney/openvino-1 | 1,127 | 12738238 | <reponame>ryanloney/openvino-1
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.op import Op
class Exit(Op):
op = "Exit"
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None,
'op': __class__.op,
'infer': Exit.exit_infer,
'in_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
@staticmethod
def exit_infer(node: Node):
output_shape = node.in_port(0).data.get_shape()
output_value = node.in_port(0).data.get_value()
for port in node.out_ports():
if not node.out_port(port).disconnected():
node.out_port(port).data.set_shape(output_shape)
if output_value is not None:
node.out_port(port).data.set_value(output_value)
|
api-inference-community/docker_images/timm/tests/test_api_text_to_speech.py | abidlabs/huggingface_hub | 362 | 12738249 | import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from app.validation import ffmpeg_read
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
class AudioSourceSeparationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-speech"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
def tearDown(self):
os.environ["MODEL_ID"] = self.old_model_id
os.environ["TASK"] = self.old_task
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.header["content-type"], "audio/wav")
audio = ffmpeg_read(response.content)
self.assertEqual(audio.shape, (10,))
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"This is some test")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
|
src/sage/algebras/finite_gca.py | UCD4IDS/sage | 1,742 | 12738269 | r"""
Finite dimensional graded commutative algebras
AUTHORS:
- <NAME> (2021): initial version
"""
#*****************************************************************************
# Copyright (C) 2021 <NAME> <m.jung at vu.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.combinat.free_module import CombinatorialFreeModule
from sage.categories.algebras import Algebras
from sage.misc.cachefunc import cached_method
from sage.combinat.integer_vector_weighted import WeightedIntegerVectors
from sage.rings.ring import Algebra
from sage.misc.functional import is_odd, is_even
from sage.sets.disjoint_union_enumerated_sets import DisjointUnionEnumeratedSets
from sage.sets.condition_set import ConditionSet
from sage.rings.integer_ring import ZZ
class FiniteGCAlgebra(CombinatorialFreeModule, Algebra):
r"""
Finite dimensional graded commutative algebras.
A finite dimensional graded commutative algebra `A` is an integer-graded
algebra satisfying the super-algebra relation w.r.t. the degree modulo 2.
More precisely, `A` has a graded ring structure
.. MATH::
A = \bigoplus_{i=0}^n A_i,
where `n \in \NN` is the finite maximal degree, and the multiplication
satisfies
.. MATH::
A_i \cdot A_j \subset \begin{cases}A_{i+j} & \text{if $i+j\leq n$}, \\
0 & \text{if $i+j > n$},\end{cases}
as well as the super-algebra relation
.. MATH::
x y = (-1)^{ij} y x
for all homogeneous elements `x \in A_i` and `y \in A_j`.
Such an algebra is multiplicatively generated by a set of single monomials
`\{ x_1, \ldots, x_k \}`, where each `x_i` is given a certain degree
`\mathrm{deg}(x_i)`. To that end, this algebra can be given a vector
space basis, and the basis vectors are of the form `x_1^{w_1} \cdots x_n^{
w_k}`, where `\sum_{i=1}^k \mathrm{deg}(x_i) \, w_i \leq n` and
.. MATH::
w_i \in \begin{cases} \ZZ_2 & \text{if $\mathrm{deg}(x_i)$ is odd}, \\
\NN & \text{if $\mathrm{deg}(x_i)$ is even}. \end{cases}
Typical examples of finite dimensional graded commutative algebras are
cohomology rings over finite dimensional CW-complexes.
INPUT:
- ``base`` -- the base field
- ``names`` -- (optional) names of the generators: a list of
strings or a single string with the names separated by
commas. If not specified, the generators are named "x0", "x1",...
- ``degrees`` -- (optional) a tuple or list specifying the degrees
of the generators; if omitted, each generator is given degree
1, and if both ``names`` and ``degrees`` are omitted, an error is
raised.
- ``max_degree`` -- the maximal degree of the graded algebra.
- ``mul_symbol`` -- (optional) symbol used for multiplication. If omitted,
the string "*" is used.
- ``mul_latex_symbol`` -- (optional) latex symbol used for multiplication.
If omitted, the empty string is used.
EXAMPLES::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1,2,2,3), max_degree=6)
sage: A
Graded commutative algebra with generators ('x', 'y', 'z', 't') in degrees (1, 2, 2, 3) with maximal degree 6
sage: t*x + x*t
0
sage: x^2
0
sage: x*t^2
0
sage: x*y^2+z*t
x*y^2 + z*t
The generators can be returned with :meth:`algebra_generators`::
sage: F = A.algebra_generators(); F
Family (x, y, z, t)
sage: [g.degree() for g in F]
[1, 2, 2, 3]
We can also return the basis::
sage: list(A.basis())
[1, x, z, y, t, x*z, x*y, x*t, z^2, y*z, y^2, z*t, y*t, x*z^2, x*y*z, x*y^2]
Depending on the context, the multiplication can be given a different
symbol::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1,2,6,6), max_degree=10, mul_symbol='⌣', mul_latex_symbol=r'\smile')
sage: x*y^2 + x*t
x⌣y^2 + x⌣t
sage: latex(x*y^2 - z*x)
x\smile y^{2} - x\smile z
.. NOTE::
Notice, when the argument ``max_degree`` in the global namespace is
omitted, an instance of the class
:class:`sage.algebras.commutative_dga.GCAlgebra` is created instead::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, degrees=(1,2,6,6))
sage: type(A)
<class 'sage.algebras.commutative_dga.GCAlgebra_with_category'>
"""
@staticmethod
def __classcall_private__(cls, base, names=None, degrees=None,
max_degree=None, category=None, **kwargs):
r"""
Normalize the input for the :meth:`__init__` method and the
unique representation.
INPUT:
- ``base`` -- the base ring of the algebra
- ``max_degree`` -- the maximal degree of the algebra
- ``names`` -- the names of the variables; by default, set to ``x1``,
``x2``, etc.
- ``degrees`` -- the degrees of the generators; by default, set to 1
TESTS::
sage: A1 = GradedCommutativeAlgebra(GF(2), 'x,y', (3, 6), max_degree=12)
sage: A2 = GradedCommutativeAlgebra(GF(2), ['x', 'y'], [3, 6], max_degree=12)
sage: A1 is A2
True
"""
if max_degree is None:
raise TypeError("max_degree must be specified")
if names is None:
if degrees is None:
raise ValueError("You must specify names or degrees")
else:
n = len(degrees)
names = tuple('x{}'.format(i) for i in range(n))
elif isinstance(names, str):
names = tuple(names.split(','))
n = len(names)
else:
n = len(names)
names = tuple(names)
if degrees is None:
degrees = tuple([1 for _ in range(n)])
else:
degrees = tuple(degrees)
return super().__classcall__(cls, base=base, names=names,
degrees=degrees, max_degree=max_degree,
category=category, **kwargs)
def __init__(self, base, names, degrees, max_degree,
category=None, **kwargs):
r"""
Construct a commutative graded algebra with finite degree.
TESTS::
sage: A.<x,y,z,t> = GradedCommutativeAlgebra(QQ, max_degree=6)
sage: TestSuite(A).run()
sage: A = GradedCommutativeAlgebra(QQ, ('x','y','z'), [2,3,4], max_degree=8)
sage: TestSuite(A).run()
sage: A = GradedCommutativeAlgebra(QQ, ('x','y','z','t'), [1,2,3,4], max_degree=10)
sage: TestSuite(A).run()
"""
from sage.arith.misc import gcd
if max_degree not in ZZ:
raise TypeError('max_degree must be an integer')
if max_degree < max(degrees):
raise ValueError(f'max_degree must not deceed {max(degrees)}')
self._names = names
self.__ngens = len(self._names)
self._degrees = degrees
self._max_deg = max_degree
self._weighted_vectors = WeightedIntegerVectors(degrees)
self._mul_symbol = kwargs.pop('mul_symbol', '*')
self._mul_latex_symbol = kwargs.pop('mul_latex_symbol', '')
step = gcd(degrees)
universe = DisjointUnionEnumeratedSets(self._weighted_vectors.subset(k)
for k in range(0, max_degree, step))
base_cat = Algebras(base).WithBasis().Super().Supercommutative().FiniteDimensional()
category = base_cat.or_subcategory(category, join=True)
indices = ConditionSet(universe, self._valid_index)
sorting_key = self._weighted_vectors.grading
CombinatorialFreeModule.__init__(self, base, indices,
sorting_key=sorting_key,
category=category)
def _valid_index(self, w):
r"""
Return whether ``w`` is a valid index; no multiple powers in odd
degrees.
TESTS::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8)
sage: w1 = A._weighted_vectors([1,2,1])
sage: w2 = A._weighted_vectors([1,2,2])
sage: A._valid_index(w1)
True
sage: A._valid_index(w2)
False
"""
return not any(i > 1 for i, d in zip(w, self._degrees) if is_odd(d))
def _repr_(self):
"""
Return the string representation of ``self``.
TESTS::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8)
sage: A._repr_()
"Graded commutative algebra with generators ('x', 'y', 'z') in degrees (1, 2, 3) with maximal degree 8"
sage: A # indirect doctest
Graded commutative algebra with generators ('x', 'y', 'z') in degrees (1, 2, 3) with maximal degree 8
"""
desc = f'Graded commutative algebra with generators {self._names} in '
desc += f'degrees {self._degrees} with maximal degree {self._max_deg}'
return desc
def ngens(self):
r"""
Return the number of generators of ``self``.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: A.ngens()
3
"""
return self.__ngens
@cached_method
def product_on_basis(self, w1, w2):
r"""
Return the product of two indices within the algebra.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: z*x
x*z
sage: x^3
0
sage: 5*z + 4*z*x
5*z + 4*x*z
::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=5)
sage: 2*x*y
2*x*y
sage: x^2
0
sage: x*z
x*z
sage: z*x
-x*z
sage: x*y*z
0
TESTS::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: weighted_vectors = A._weighted_vectors
sage: w1 = A._weighted_vectors([1,0,1])
sage: w2 = A._weighted_vectors([0,0,0])
sage: A.product_on_basis(w1, w2)
x*z
::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=5)
sage: weighted_vectors = A._weighted_vectors
sage: w1 = A._weighted_vectors([1,0,0])
sage: w2 = A._weighted_vectors([0,0,1])
sage: A.product_on_basis(w1, w2)
x*z
sage: A.product_on_basis(w2, w1)
-x*z
::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=10)
sage: weighted_vectors = A._weighted_vectors
sage: w1 = A._weighted_vectors([1,1,0])
sage: w2 = A._weighted_vectors([0,1,1])
sage: A.product_on_basis(w1, w2)
x*y^2*z
sage: A.product_on_basis(w2, w1)
-x*y^2*z
"""
grading = self._weighted_vectors.grading
deg_left = grading(w1)
deg_right = grading(w2)
deg_tot = deg_left + deg_right
if deg_tot > self._max_deg:
return self.zero()
w_tot = self._weighted_vectors([sum(w) for w in zip(w1, w2)])
if not self._valid_index(w_tot):
return self.zero()
# determine sign
n = self.__ngens
c = 0
for p, i, d in zip(reversed(range(n)), reversed(w1), reversed(self._degrees)):
if is_even(d) or i == 0:
continue
for q, j, b in zip(range(n), w2, self._degrees):
if q == p:
break
if j == 0 or is_even(b):
continue
c += 1
return (-1)**c * self.monomial(w_tot)
def degree_on_basis(self, i):
r"""
Return the degree of a homogeneous element with index `i`.
EXAMPLES::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(2,4,6), max_degree=7)
sage: a.degree()
2
sage: (2*a*b).degree()
6
sage: (a+b).degree()
Traceback (most recent call last):
...
ValueError: element is not homogeneous
TESTS::
sage: A.<a,b,c> = GradedCommutativeAlgebra(QQ, degrees=(2,4,6), max_degree=7)
sage: weighted_vectors = A._weighted_vectors
sage: i = A._weighted_vectors([1,1,0])
sage: A.degree_on_basis(i)
6
"""
return self._weighted_vectors.grading(i)
def _repr_term(self, w):
r"""
Return the string representation of basis with index ``w``.
TESTS::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8)
sage: w = A._weighted_vectors([1,2,1])
sage: A._repr_term(w)
'x*y^2*z'
sage: x*y^2*z # indirect doctest
x*y^2*z
::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8, mul_symbol='⌣')
sage: w = A._weighted_vectors([1,2,1])
sage: A._repr_term(w)
'x⌣y^2⌣z'
sage: x*y^2*z # indirect doctest
x⌣y^2⌣z
"""
# Trivial case:
if sum(w) == 0:
return '1'
# Non-trivial case:
terms = []
for i in range(len(w)):
if w[i] == 0:
continue
elif w[i] == 1:
terms.append(self._names[i])
else:
terms.append(self._names[i] + f'^{w[i]}')
return self._mul_symbol.join(terms)
def _latex_term(self, w):
r"""
Return the LaTeX representation of basis with index ``w``.
TESTS::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8)
sage: w = A._weighted_vectors([1,2,1])
sage: A._latex_term(w)
'x y^{2} z'
sage: latex(x*y^2*z) # indirect doctest
x y^{2} z
::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8, mul_latex_symbol=r'\smile')
sage: A._latex_term(w)
'x\\smile y^{2}\\smile z'
sage: latex(x*y^2*z) # indirect doctest
x\smile y^{2}\smile z
"""
# Trivial case:
if sum(w) == 0:
return '1'
# Non-trivial case:
terms = []
for i in range(len(w)):
if w[i] == 0:
continue
elif w[i] == 1:
terms.append(self._names[i])
else:
terms.append(self._names[i] + '^{' + str(w[i]) + '}')
latex_mul = self._mul_latex_symbol + ' ' # add whitespace
return latex_mul.join(terms)
def algebra_generators(self):
r"""
Return the generators of ``self`` as a
:class:`sage.sets.family.TrivialFamily`.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: A.algebra_generators()
Family (x, y, z)
"""
from sage.sets.family import Family
return Family(self.gens())
@cached_method
def one_basis(self):
r"""
Return the index of the one element of ``self``.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: ind = A.one_basis(); ind
[0, 0, 0]
sage: A.monomial(ind)
1
sage: A.one() # indirect doctest
1
"""
n = len(self._degrees)
return self._weighted_vectors([0 for _ in range(n)])
def gens(self):
r"""
Return the generators of ``self`` as a list.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: A.gens()
[x, y, z]
"""
n = len(self._degrees)
zero = [0 for _ in range(n)]
indices = []
for k in range(n):
ind = list(zero)
ind[k] = 1
indices.append(self._weighted_vectors(ind))
return [self.monomial(ind) for ind in indices]
@cached_method
def gen(self, i):
r"""
Return the `i`-th generator of ``self``.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(4,8,2), max_degree=10)
sage: A.gen(0)
x
sage: A.gen(1)
y
sage: A.gen(2)
z
"""
return self.gens()[i]
def maximal_degree(self):
r"""
Return the maximal degree of ``self``.
EXAMPLES::
sage: A.<x,y,z> = GradedCommutativeAlgebra(QQ, degrees=(1,2,3), max_degree=8)
sage: A.maximal_degree()
8
"""
return self._max_deg
max_degree = maximal_degree
|
configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py | evgps/mmdetection_trashcan | 367 | 12738289 | <reponame>evgps/mmdetection_trashcan
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
pretrained=None,
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
|
social_auth/backends/contrib/dropbox.py | merutak/django-social-auth | 863 | 12738298 | from social.backends.dropbox import DropboxOAuth as DropboxBackend
|
zktraffic/stats/stats.py | fakeNetflix/twitter-repo-zktraffic | 159 | 12738303 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
def sizeof_fmt(num):
for x in ('', 'KB', 'MB', 'GB'):
if num < 1024.0:
if x == '':
return "%d%s" % (num, x)
else:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class Counters(object):
ALL = -1
WRITES = 0
READS = 1
CREATE = 2
SET_DATA = 3
GET_DATA = 4
DELETE = 5
GET_CHILDREN = 6
EXISTS = 7
CREATE_BYTES = 8
SET_DATA_BYTES = 9
GET_DATA_BYTES = 10
DELETE_BYTES = 11
GET_CHILDREN_BYTES = 12
EXISTS_BYTES = 13
CountersByName = {
"all": Counters.ALL,
"writes": Counters.WRITES,
"reads": Counters.READS,
"create": Counters.CREATE,
"getdata": Counters.GET_DATA,
"setdata": Counters.SET_DATA,
"delete": Counters.DELETE,
"getchildren": Counters.GET_CHILDREN,
"getchildren_bytes": Counters.GET_CHILDREN_BYTES,
"create_bytes": Counters.CREATE_BYTES,
"getdata_bytes": Counters.GET_DATA_BYTES,
"setdata_bytes": Counters.SET_DATA_BYTES,
"delete_bytes": Counters.DELETE_BYTES,
}
def counter_to_str(counter):
for name, c in CountersByName.items():
if counter == c:
return name
return ""
|
koku/reporting/migrations/0210_ocpaws_partables.py | project-koku/koku | 157 | 12738310 | <gh_stars>100-1000
# Generated by Django 3.1.13 on 2021-11-18 04:49
import django.db.models.deletion
from django.db import migrations
from django.db import models
from koku.database import set_partition_mode
from koku.database import unset_partition_mode
class Migration(migrations.Migration):
dependencies = [("api", "0050_exchangerates"), ("reporting", "0209_gcp_partables")]
operations = [
migrations.RunPython(code=set_partition_mode, reverse_code=unset_partition_mode),
migrations.CreateModel(
name="OCPAWSStorageSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_storage_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_storage_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSNetworkSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_network_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_network_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSDatabaseSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_database_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_database_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSCostSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_cost_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_cost_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSCostSummaryByServiceP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("product_code", models.CharField(max_length=50)),
("product_family", models.CharField(max_length=150, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_cost_summary_by_service_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_cost_summary_by_service_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSCostSummaryByRegionP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("region", models.CharField(max_length=50, null=True)),
("availability_zone", models.CharField(max_length=50, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_cost_summary_by_region_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_cost_summary_by_region_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSCostSummaryByAccountP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_cost_summary_by_account_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_cost_summary_by_account_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="OCPAWSComputeSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("cluster_id", models.CharField(max_length=50, null=True)),
("cluster_alias", models.CharField(max_length=256, null=True)),
("usage_account_id", models.CharField(max_length=50)),
("instance_type", models.CharField(max_length=50, null=True)),
("resource_id", models.CharField(max_length=253, null=True)),
("usage_amount", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("unit", models.CharField(max_length=63, null=True)),
("unblended_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("markup_cost", models.DecimalField(decimal_places=15, max_digits=33, null=True)),
("currency_code", models.CharField(max_length=10)),
(
"account_alias",
models.ForeignKey(
null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="reporting.awsaccountalias"
),
),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_ocpaws_compute_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_ocpaws_compute_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.AddIndex(
model_name="ocpawsstoragesummaryp", index=models.Index(fields=["usage_start"], name="ocpawsstorsumm_usst")
),
migrations.AddIndex(
model_name="ocpawsstoragesummaryp",
index=models.Index(fields=["product_family"], name="ocpawsstorsumm_product_fam"),
),
migrations.AddIndex(
model_name="ocpawsnetworksummaryp", index=models.Index(fields=["usage_start"], name="ocpawsnetsumm_usst")
),
migrations.AddIndex(
model_name="ocpawsnetworksummaryp",
index=models.Index(fields=["product_code"], name="ocpawsnetsumm_product_cd"),
),
migrations.AddIndex(
model_name="ocpawsdatabasesummaryp", index=models.Index(fields=["usage_start"], name="ocpawsdbsumm_usst")
),
migrations.AddIndex(
model_name="ocpawsdatabasesummaryp",
index=models.Index(fields=["product_code"], name="ocpawsdbsumm_product_cd"),
),
migrations.AddIndex(
model_name="ocpawscostsummaryp", index=models.Index(fields=["usage_start"], name="ocpawscostsumm_usst")
),
migrations.AddIndex(
model_name="ocpawscostsummarybyservicep",
index=models.Index(fields=["usage_start"], name="ocpawscostsumm_svc_usst"),
),
migrations.AddIndex(
model_name="ocpawscostsummarybyservicep",
index=models.Index(fields=["product_code"], name="ocpawscostsumm_svc_prod_cd"),
),
migrations.AddIndex(
model_name="ocpawscostsummarybyregionp",
index=models.Index(fields=["usage_start"], name="ocpawscostsumm_reg_usst"),
),
migrations.AddIndex(
model_name="ocpawscostsummarybyregionp",
index=models.Index(fields=["region"], name="ocpawscostsumm_reg_region"),
),
migrations.AddIndex(
model_name="ocpawscostsummarybyregionp",
index=models.Index(fields=["availability_zone"], name="ocpawscostsumm_reg_zone"),
),
migrations.AddIndex(
model_name="ocpawscostsummarybyaccountp",
index=models.Index(fields=["usage_start"], name="ocpawscostsumm_acct_usst"),
),
migrations.AddIndex(
model_name="ocpawscomputesummaryp", index=models.Index(fields=["usage_start"], name="ocpawscompsumm_usst")
),
migrations.AddIndex(
model_name="ocpawscomputesummaryp",
index=models.Index(fields=["instance_type"], name="ocpawscompsumm_insttyp"),
),
migrations.RunPython(code=unset_partition_mode, reverse_code=set_partition_mode),
]
|
leather/shapes/dots.py | timgates42/leather | 198 | 12738314 | <reponame>timgates42/leather
#!/usr/bin/env python
from collections import defaultdict
import xml.etree.ElementTree as ET
import six
from leather.data_types import Text
from leather.series import CategorySeries
from leather.shapes.base import Shape
from leather import theme
from leather.utils import DummySeries, X, Y
class Dots(Shape):
"""
Render a series of data as dots.
:param fill_color:
The color to fill the dots. You may also specify a
:func:`.style_function`. If not specified, default chart colors will be
used.
:param radius:
The radius of the rendered dots. Defaults to
:data:`.theme.default_dot_radius`. You may also specify a
:func:`.style_function`.
"""
def __init__(self, fill_color=None, radius=None):
self._fill_color = fill_color
self._radius = radius or theme.default_dot_radius
def validate_series(self, series):
"""
Verify this shape can be used to render a given series.
"""
if series.data_type(X) is Text or series.data_type(Y) is Text:
raise ValueError('Dots do not support Text values.')
return True
def to_svg(self, width, height, x_scale, y_scale, series, palette):
"""
Render dots to SVG elements.
"""
group = ET.Element('g')
group.set('class', 'series dots')
default_colors = defaultdict(lambda: next(palette))
for d in series.data():
if d.x is None or d.y is None:
continue
proj_x = x_scale.project(d.x, 0, width)
proj_y = y_scale.project(d.y, height, 0)
if callable(self._fill_color):
fill_color = self._fill_color(d)
elif self._fill_color:
fill_color = self._fill_color
else:
fill_color = default_colors[d.z]
if callable(self._radius):
radius = self._radius(d)
else:
radius = self._radius
group.append(ET.Element('circle',
cx=six.text_type(proj_x),
cy=six.text_type(proj_y),
r=six.text_type(radius),
fill=fill_color
))
return group
def legend_to_svg(self, series, palette):
"""
Render the legend entries for these shapes.
"""
items = []
if isinstance(series, CategorySeries):
for category in series.categories():
items.extend(Shape.legend_to_svg(self, DummySeries(category), palette))
else:
items.extend(Shape.legend_to_svg(self, series, palette))
return items
|
pyNastran/bdf/mesh_utils/skin_solid_elements.py | luzpaz/pyNastran | 293 | 12738326 | <filename>pyNastran/bdf/mesh_utils/skin_solid_elements.py
"""
defines:
- get_solid_skin_faces(model)
"""
from collections import defaultdict
from copy import deepcopy
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
def write_skin_solid_faces(model, skin_filename,
write_solids=False, write_shells=True,
size=8, is_double=False, encoding=None):
"""
Writes the skinned elements
Parameters
----------
model : BDF()
the BDF object
skin_filename : str
the file to write
write_solids : bool; default=False
write solid elements that have skinned faces
write_shells : bool; default=False
write newly created shell elements
if there are shells in the model, doesn't write these
size : int; default=8
the field width
is_double : bool; default=False
double precision flag
encoding : str; default=None -> system default
the string encoding
"""
if(len(model.element_ids) == 0 or len(model.material_ids) == 0 or
len(model.property_ids) == 0):
return
eid_set, face_map = get_solid_skin_faces(model)
if len(eid_set) == 0:
return
eid_set_to_write = set()
nid_set_to_write = set()
mid_set_to_write = set()
if write_solids:
for face, eids in eid_set.items():
eid_set_to_write.update(eids)
for eid in eids:
elem = model.elements[eid]
pid = elem.Pid()
prop = model.properties[pid] # PSOLID
mid = prop.Mid()
#print(prop)
nid_set_to_write.update(elem.node_ids)
mid_set_to_write.add(mid)
#print('added_mid (a) =', mid)
elif write_shells:
for face, eids in eid_set.items():
eid_set_to_write.update(eids)
nid_set_to_write.update(face)
for eid in eids:
elem = model.elements[eid]
pid = elem.Pid()
prop = model.properties[pid] # PSOLID
if prop.type in ['PSOLID', 'PLSOLID']:
mid = prop.Mid()
elif prop.type in ['PCOMPS', 'PCOMPLS', 'PCOMP', 'PCOMPG']:
mid = prop.mids[0]
else:
raise NotImplementedError(prop)
#except TypeError:
#model.log.warning('TypeError: skipping:%s' % prop)
#raise
#except AttributeError:
#model.log.warning('skipping:%s' % prop)
#continue
mid_set_to_write.add(mid)
#print('added eid=%s pid=%s mid=%s (b)' % (eid, pid, mid))
else:
raise RuntimeError('write_solids=False write_shells=False')
eids_to_write = list(eid_set_to_write)
nids_to_write = list(nid_set_to_write)
mids_to_write = list(mid_set_to_write)
#element_ids_to_delete = set(model.element_ids) - eids_to_write
eid_shell = max(model.elements) + 1
pid_shell = max(model.properties) + 1
mid_shell = max(model.materials) + 1
_write_skin_solid_faces(model, skin_filename, face_map,
nids_to_write, eids_to_write, mids_to_write, eid_set,
eid_shell, pid_shell, mid_shell,
write_solids=write_solids, write_shells=write_shells,
size=size, is_double=is_double, encoding=encoding)
def get_solid_skin_faces(model):
"""
Gets the elements and faces that are skinned from solid elements.
This doesn't include internal faces or existing shells.
Parameters
----------
model : BDF()
the BDF object
Returns
-------
eid_set : Dict[sorted_face] = eids
sorted_face : tuple(int, int, ...)
the face nids in sorted order
eids : List[int]
list of element ids with that face
face_map : Dict[sorted_face] = face
sorted_face : tuple(int, int, ...)
the face nids in sorted order
face : List(int, int, ...)
the face nids
"""
eid_faces = model.get_element_faces()
face_set = defaultdict(int)
eid_set = defaultdict(list)
face_map = {}
for eid, face in eid_faces:
#print(eid, face)
raw_face = deepcopy(face)
try:
face.sort()
except Exception:
print('face = %s' % str(face))
raise
tface = tuple(face)
#print(tface)
face_set[tface] += 1
eid_set[tface].append(eid)
face_map[tface] = raw_face
#print('eid_set:')
#for tface, eidset in eid_set.items():
#print(tface, eidset)
#print('face_set:')
#for tface, faceset in face_set.items():
#print(tface, faceset)
#print('face_map:')
#for tface, facemap in face_map.items():
#print(tface, facemap)
del_faces = []
for face, face_count in face_set.items():
if face_count == 2:
del_faces.append(face)
for face in del_faces:
del face_set[face]
del eid_set[face]
return eid_set, face_map
def _write_skin_solid_faces(model, skin_filename, face_map,
nids_to_write, eids_to_write, mids_to_write, eid_set,
eid_shell, pid_shell, mid_shell,
write_solids=False, write_shells=True,
size=8, is_double=False, encoding=None):
"""
helper method for ``write_skin_solid_faces``
Parameters
----------
model : BDF()
the BDF object
skin_filename : str
the file to write
face_map : dict[sorted_face] : face
sorted_face : List[int, int, int] / List[int, int, int, int]
face : List[int, int, int] / List[int, int, int, int]
nids_to_write : List[int, int, ...]
list of node ids to write
eids_to_write : List[int, int, ...]
list of element ids to write
mids_to_write : List[int, int, ...]
list of material ids to write
eid_set : dict[face] : eids
???
eid_shell : int
the next id to use for the shell id
pid_shell : int
the next id to use for the shell property
mid_shell : int
the next id to use for the shell material
write_solids : bool; default=False
write solid elements that have skinned faces
write_shells : bool; default=True
write shell elements
size : int; default=8
the field width
is_double : bool; default=False
double precision flag
encoding : str; default=None -> system default
the string encoding
"""
encoding = model.get_encoding(encoding)
with open(skin_filename, 'w', encoding=encoding) as bdf_file:
bdf_file.write('$ pyNastran: punch=True\n')
for nid in sorted(nids_to_write):
if nid is None:
continue
node = model.nodes[nid]
bdf_file.write(node.write_card(size=size, is_double=is_double))
for cid, coord in model.coords.items():
if cid == 0:
continue
bdf_file.write(coord.write_card(size=size, is_double=is_double))
if write_solids:
for eid in sorted(eids_to_write):
elem = model.elements[eid]
bdf_file.write(elem.write_card(size=size))
for pid, prop in model.properties.items():
bdf_file.write(prop.write_card(size=size, is_double=is_double))
for mid in sorted(mids_to_write):
material = model.materials[mid]
bdf_file.write(material.write_card(size=size, is_double=is_double))
del eid, pid, mid
if write_shells:
mids_to_write.sort()
for imid, mid in enumerate(mids_to_write):
card = ['PSHELL', pid_shell + imid, mid_shell + imid, 0.1]
try:
msg = print_card_8(card)
except RuntimeError:
msg = print_card_16(card)
bdf_file.write(msg)
card = ['MAT1', mid_shell + imid, 3.e7, None, 0.3]
#bdf_file.write(model.materials[mid].comment)
try:
msg = print_card_8(card)
except RuntimeError:
msg = print_card_16(card)
bdf_file.write(msg)
for face, eids in eid_set.items():
face_raw = face_map[face]
nface = len(face)
#print("eids =", eids)
#assert len(eids) == 1, eids
#for eid in sorted(eids):
#elem = model.elements[eid]
#print(elem)
#break
assert len(eids) == 1, eids
elem = model.elements[eids[0]]
#pid = next(model.properties.keys())
pid = elem.Pid()
prop = model.properties[pid]
if prop.type in ['PSOLID']: # 'PSHELL',
mid = prop.Mid()
elif prop.type in ['PCOMPS', 'PCOMPLS']: # 'PSHELL',
#print(prop.get_stats())
mid = prop.Mid()
#elif prop.type in ['PCOMP', 'PCOMPG']:
#mid = prop.mids[0]
else:
raise NotImplementedError(prop)
#print('mids_to_write = %s' % mids_to_write)
#print('mids = ', model.materials.keys())
imid = mids_to_write.index(mid)
if nface == 3:
card = ['CTRIA3', eid_shell, pid_shell + imid] + list(face_raw)
elif nface == 4:
card = ['CQUAD4', eid_shell, pid_shell + imid] + list(face_raw)
elif nface == 4:
card = ['CQUAD4', eid_shell, pid_shell + imid] + list(face_raw)
elif nface == 6:
card = ['CTRIA6', eid_shell, pid_shell + imid] + list(face_raw)
elif nface == 8:
card = ['CQUAD8', eid_shell, pid_shell + imid] + list(face_raw)
else:
raise NotImplementedError('face=%s len(face)=%s' % (face, nface))
try:
msg = print_card_8(card)
except RuntimeError:
msg = print_card_16(card)
bdf_file.write(msg)
eid_shell += 1
#elem = model.elements[eid]
#bdf_file.write(elem.write_card(size=size))
#for pid, prop in model.properties.items():
#bdf_file.write(prop.write_card(size=size, is_double=is_double))
bdf_file.write('ENDDATA\n')
#if 0:
#model = model.__class__.__init__()
#model.read_bdf(skin_filename)
|
Example/Psi4Numpy/13-GeometryOptimization/opt_helper/printTools.py | yychuang/109-2-compchem-lite | 214 | 12738352 | <gh_stars>100-1000
from __future__ import print_function
def printMat(M, Ncol=7, title=None):
if title:
print(title + '\n')
for row in range(M.shape[0]):
tab = 0
for col in range(M.shape[1]):
tab += 1
print(" %10.6f" % M[row, col])
if tab == Ncol and col != (M.shape[1] - 1):
print("\n")
tab = 0
print("\n")
return
def printMatString(M, Ncol=7, title=None):
if title:
print(title + '\n')
s = ''
for row in range(M.shape[0]):
tab = 0
for col in range(M.shape[1]):
tab += 1
s += " %10.6f" % M[row, col]
if tab == Ncol and col != (M.shape[1] - 1):
s += '\n'
tab = 0
s += '\n'
return s
def printArray(M, Ncol=7, title=None):
if title:
print(title + '\n')
tab = 0
for col, entry in enumerate(M):
tab += 1
print(" %10.6f" % M[col])
if tab == Ncol and col != (len(M) - 1):
print("\n")
tab = 0
print("\n")
return
def printArrayString(M, Ncol=7, title=None):
if title:
print(title + '\n')
tab = 0
s = ''
for i, entry in enumerate(M):
tab += 1
s += " %10.6f" % entry
if tab == Ncol and i != (len(M) - 1):
s += '\n'
tab = 0
s += '\n'
return s
def printGeomGrad(geom, grad):
print("\tGeometry and Gradient\n")
Natom = geom.shape[0]
for i in range(Natom):
print("\t%20.10f%20.10f%20.10f\n" % (geom[i, 0], geom[i, 1], geom[i, 2]))
print("\n")
for i in range(Natom):
print("\t%20.10f%20.10f%20.10f\n" % (grad[3 * i + 0], grad[3 * i + 1],
grad[3 * i + 2]))
|
tests/test_views_objects_permissions.py | vincentfretin/kinto | 4,618 | 12738356 | <filename>tests/test_views_objects_permissions.py
import unittest
from kinto.core.testing import get_user_headers
from .support import (
MINIMALIST_BUCKET,
MINIMALIST_COLLECTION,
MINIMALIST_GROUP,
MINIMALIST_RECORD,
BaseWebTest,
)
class PermissionsTest(BaseWebTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.alice_headers = {**cls.headers, **get_user_headers("alice")}
cls.bob_headers = {**cls.headers, **get_user_headers("bob")}
cls.alice_principal = (
"basicauth:<PASSWORD>"
)
cls.bob_principal = (
"basicauth:<PASSWORD>"
)
class BucketPermissionsTest(PermissionsTest):
def setUp(self):
bucket = {**MINIMALIST_BUCKET, "permissions": {"read": [self.alice_principal]}}
self.app.put_json("/buckets/sodas", bucket, headers=self.headers)
def test_creation_is_allowed_to_authenticated_by_default(self):
self.app.put_json("/buckets/beer", MINIMALIST_BUCKET, headers=self.headers)
def test_current_user_receives_write_permission_on_creation(self):
resp = self.app.put_json("/buckets/beer", MINIMALIST_BUCKET, headers=self.headers)
permissions = resp.json["permissions"]
self.assertIn(self.principal, permissions["write"])
def test_can_read_if_allowed(self):
self.app.get("/buckets/sodas", headers=self.alice_headers)
def test_cannot_write_if_not_allowed(self):
self.app.put_json(
"/buckets/sodas", MINIMALIST_BUCKET, headers=self.alice_headers, status=403
)
def test_permissions_are_not_returned_if_can_only_read(self):
resp = self.app.get("/buckets/sodas", headers=self.alice_headers)
self.assertEqual(resp.json["permissions"], {})
def test_permissions_are_returned_if_can_write(self):
resp = self.app.get("/buckets/sodas", headers=self.headers)
self.assertIn("write", resp.json["permissions"])
def test_cannot_post_existing_id_if_cannot_read(self):
self.app.get("/buckets/sodas", headers=self.bob_headers, status=403)
self.app.post_json(
"/buckets", {"data": {"id": "sodas"}}, headers=self.bob_headers, status=403
)
def test_can_post_existing_id_if_can_read(self):
self.app.patch_json(
"/buckets/sodas",
{"data": {"marker": True}, "permissions": {"read": ["system.Authenticated"]}},
headers=self.headers,
)
resp = self.app.post_json("/buckets", {"data": {"id": "sodas"}}, headers=self.bob_headers)
assert resp.json["data"]["marker"]
class CollectionPermissionsTest(PermissionsTest):
def setUp(self):
bucket = {
**MINIMALIST_BUCKET,
"permissions": {"read": [self.alice_principal], "write": [self.bob_principal]},
}
self.app.put_json("/buckets/beer", bucket, headers=self.headers)
self.app.put_json(
"/buckets/beer/collections/barley",
MINIMALIST_COLLECTION,
headers=self.headers,
)
def test_passing_unicode_on_parent_id_is_supported(self):
self.app.get(
"/buckets/block%C2%93%C2%96sts/collections/barley",
headers=self.alice_headers,
status=403,
)
def test_read_is_allowed_if_read_on_bucket(self):
self.app.get("/buckets/beer/collections/barley", headers=self.alice_headers)
def test_read_is_allowed_if_write_on_bucket(self):
self.app.get("/buckets/beer/collections/barley", headers=self.bob_headers)
def test_cannot_read_if_not_allowed(self):
headers = {**self.headers, **get_user_headers("jean-louis")}
self.app.get("/buckets/beer/collections/barley", headers=headers, status=403)
def test_cannot_write_if_not_allowed(self):
self.app.put_json(
"/buckets/beer/collections/barley",
MINIMALIST_COLLECTION,
headers=self.alice_headers,
status=403,
)
def test_permission_backend_prevent_sql_injections(self):
self.app.get("/buckets/beer'", headers=self.headers, status=403)
self.app.get("/buckets/beer'/collections/barley", headers=self.headers, status=403)
self.app.get("/buckets/beer'/groups/barley", headers=self.headers, status=403)
self.app.get("/buckets/beer/collections/barley'", headers=self.headers, status=400)
# XXX: We should validate the collection ID on the records collection endpoint. #1077
self.app.get(
"/buckets/beer/collections/barley'/records",
headers=self.headers,
status=404,
)
self.app.get("/buckets/beer/groups/barley'", headers=self.headers, status=400)
class GroupPermissionsTest(PermissionsTest):
def setUp(self):
bucket = {
**MINIMALIST_BUCKET,
"permissions": {"read": [self.alice_principal], "write": [self.bob_principal]},
}
self.app.put_json("/buckets/beer", bucket, headers=self.headers)
self.app.put_json(
"/buckets/beer/groups/moderators", MINIMALIST_GROUP, headers=self.headers
)
def test_creation_is_allowed_if_write_on_bucket(self):
self.app.post_json("/buckets/beer/groups", MINIMALIST_GROUP, headers=self.headers)
def test_read_is_allowed_if_read_on_bucket(self):
self.app.get("/buckets/beer/groups/moderators", headers=self.alice_headers)
def test_read_is_allowed_if_write_on_bucket(self):
self.app.get("/buckets/beer/groups/moderators", headers=self.bob_headers)
def test_cannot_read_if_not_allowed(self):
headers = {**self.headers, **get_user_headers("jean-louis")}
self.app.get("/buckets/beer/groups/moderators", headers=headers, status=403)
def test_cannot_write_if_not_allowed(self):
self.app.put_json(
"/buckets/beer/groups/moderators",
MINIMALIST_GROUP,
headers=self.alice_headers,
status=403,
)
def test_creation_is_forbidden_is_no_write_on_bucket(self):
self.app.post_json(
"/buckets/beer/groups",
MINIMALIST_GROUP,
headers=self.alice_headers,
status=403,
)
class RecordPermissionsTest(PermissionsTest):
def setUp(self):
bucket = {**MINIMALIST_BUCKET, "permissions": {"write": [self.alice_principal]}}
self.app.put_json("/buckets/beer", bucket, headers=self.headers)
collection = {
**MINIMALIST_COLLECTION,
"permissions": {"write": [self.bob_principal]},
}
self.app.put_json("/buckets/beer/collections/barley", collection, headers=self.headers)
def test_creation_is_allowed_if_write_on_bucket(self):
self.app.post_json(
"/buckets/beer/collections/barley/records",
MINIMALIST_RECORD,
headers=self.alice_headers,
)
def test_creation_is_allowed_if_write_on_collection(self):
self.app.post_json(
"/buckets/beer/collections/barley/records",
MINIMALIST_RECORD,
headers=self.bob_headers,
)
def test_creation_is_forbidden_is_no_write_on_bucket_nor_collection(self):
headers = {**self.headers, **get_user_headers("jean-louis")}
self.app.post_json(
"/buckets/beer/collections/barley/records",
MINIMALIST_RECORD,
headers=headers,
status=403,
)
def test_record_permissions_are_modified_by_patch(self):
collection_url = "/buckets/beer/collections/barley/records"
resp = self.app.post_json(collection_url, MINIMALIST_RECORD, headers=self.headers)
record = resp.json["data"]
resp = self.app.patch_json(
"{}/{}".format(collection_url, record["id"]),
{"permissions": {"read": ["fxa:user"]}},
headers=self.headers,
)
self.assertIn("fxa:user", resp.json["permissions"]["read"])
class ChildrenCreationTest(PermissionsTest):
def setUp(self):
self.app.put_json(
"/buckets/create",
{"permissions": {"group:create": ["system.Authenticated"]}},
headers=self.alice_headers,
)
self.app.put_json(
"/buckets/write",
{"permissions": {"write": ["system.Authenticated"]}},
headers=self.alice_headers,
)
self.app.put_json(
"/buckets/read",
{"permissions": {"read": ["system.Authenticated"]}},
headers=self.alice_headers,
)
for parent in ("create", "write", "read"):
self.app.put_json(
"/buckets/{}/groups/child".format(parent),
MINIMALIST_GROUP,
headers=self.alice_headers,
)
self.bob_headers_safe_creation = dict({"If-None-Match": "*"}, **self.bob_headers)
def test_cannot_read_others_objects_if_only_allowed_to_create(self):
self.app.get("/buckets/create/groups/child", headers=self.bob_headers, status=403)
def test_safe_creation_with_put_returns_412_if_allowed_to_create(self):
self.app.put_json(
"/buckets/create/groups/child",
MINIMALIST_GROUP,
headers=self.bob_headers_safe_creation,
status=412,
)
def test_safe_creation_with_post_returns_412_if_allowed_to_create_and_read(self):
self.app.patch_json(
"/buckets/create/groups/child",
{"permissions": {"read": ["system.Authenticated"]}},
headers=self.alice_headers,
)
self.app.post_json(
"/buckets/create/groups",
{"data": {"id": "child", "members": []}},
headers=self.bob_headers_safe_creation,
status=412,
)
def test_safe_creation_with_put_returns_412_if_allowed_to_write(self):
self.app.put_json(
"/buckets/write/groups/child",
MINIMALIST_GROUP,
headers=self.bob_headers_safe_creation,
status=412,
)
def test_safe_creation_with_post_returns_412_if_allowed_to_write(self):
self.app.post_json(
"/buckets/write/groups",
{"data": {"id": "child", "members": []}},
headers=self.bob_headers_safe_creation,
status=412,
)
def test_safe_creation_with_put_returns_403_if_only_allowed_to_read(self):
self.app.put_json(
"/buckets/read/groups/child",
MINIMALIST_GROUP,
headers=self.bob_headers_safe_creation,
status=403,
)
def test_safe_creation_with_post_returns_403_if_not_allowed_to_read(self):
self.app.post_json(
"/buckets/create/groups",
{"data": {"id": "child", "members": []}},
headers=self.bob_headers_safe_creation,
status=403,
)
def test_safe_creation_with_post_returns_412_if_only_allowed_to_read(self):
self.app.post_json(
"/buckets/read/groups",
{"data": {"id": "child", "members": []}},
headers=self.bob_headers_safe_creation,
status=412,
)
def test_delete_returns_404_on_unknown_if_only_allowed_to_read(self):
self.app.delete("/buckets/read/groups/g1", headers=self.bob_headers, status=404)
def test_patch_returns_404_on_unknown_if_only_allowed_to_read(self):
self.app.patch_json(
"/buckets/read/groups/g1",
{"data": {"members": []}},
headers=self.bob_headers,
status=404,
)
class ParentMetadataTest(PermissionsTest):
def setUp(self):
self.app.put_json(
"/buckets/beer",
{"permissions": {"collection:create": [self.bob_principal]}},
headers=self.headers,
)
self.app.put_json("/buckets/beer/collections/wheat", headers=self.headers)
self.app.put_json("/buckets/beer/collections/root", headers=self.headers)
self.app.put_json(
"/buckets/beer/collections/barley",
{"permissions": {"record:create": [self.alice_principal]}},
headers=self.bob_headers,
)
def test_parent_metadata_can_be_read_if_allowed_to_create_child(self):
self.app.get("/buckets/beer", headers=self.bob_headers)
self.app.get("/buckets/beer/collections/barley", headers=self.alice_headers)
def test_parent_metadata_cannot_be_read_if_not_allowed_to_create_child(self):
self.app.get("/buckets/beer", headers=get_user_headers("jean:paul"), status=403)
self.app.get(
"/buckets/beer/collections/barley",
headers=get_user_headers("mahmud:hatim"),
status=403,
)
def test_list_can_be_obtained_if_allowed_to_create(self):
resp = self.app.get("/buckets/beer/collections", headers=self.bob_headers)
self.assertEqual(len(resp.json["data"]), 1)
self.assertEqual(resp.json["data"][0]["id"], "barley")
resp = self.app.get("/buckets/beer/collections/barley/records", headers=self.alice_headers)
self.assertEqual(resp.json["data"], [])
def test_list_is_denied_if_not_allowed_to_create(self):
self.app.get(
"/buckets/beer/collections",
headers=get_user_headers("jean:paul"),
status=403,
)
self.app.get(
"/buckets/beer/collections/barley/records",
headers=get_user_headers("mahmud:hatim"),
status=403,
)
class DisabledExplicitPermissionsTest(BaseWebTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.alice_headers = {**cls.headers, **get_user_headers("alice")}
cls.alice_principal = (
"basicauth:d5b0026601f1b251974e09548d44155e16812e3c64ff7ae053fe3542e2ca1570"
)
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings["explicit_permissions"] = "false"
settings["experimental_permissions_endpoint"] = "true"
return settings
def setUp(self):
self.app.put_json(
"/buckets/write",
{"permissions": {"write": ["system.Authenticated"]}},
headers=self.headers,
)
self.app.put_json(
"/buckets/write/collections/test",
{"permissions": {"write": ["system.Authenticated"]}},
headers=self.alice_headers,
)
def test_can_create_and_access_child_object(self):
self.app.put(
"/buckets/write/collections/test/records/1",
headers=self.alice_headers,
)
self.app.get(
"/buckets/write/collections/test/records/1",
headers=self.alice_headers,
)
def test_current_user_is_not_added_to_object_permissions(self):
resp = self.app.put_json(
"/buckets/write/collections/test/records/1",
{"permissions": {"write": ["system.Authenticated"], "read": ["ldap:chantal"]}},
headers=self.alice_headers,
)
self.assertEqual(
resp.json["permissions"],
{"write": ["system.Authenticated"], "read": ["ldap:chantal"]},
)
def test_child_objects_are_not_listed_in_permission_endpoint(self):
self.app.put(
"/buckets/write/collections/test/records/1",
headers=self.alice_headers,
)
resp = self.app.get("/permissions", headers=self.alice_headers)
perms = resp.json["data"]
self.assertEqual(
sorted(p["uri"] for p in perms),
["/", "/buckets/write", "/buckets/write/collections/test"],
)
def test_write_via_groups(self):
self.app.put_json(
"/buckets/viagroup",
{"permissions": {"write": [self.principal]}},
headers=self.headers,
)
self.app.put_json(
"/buckets/viagroup/collections/c",
{"permissions": {"write": ["/buckets/viagroup/groups/editors"]}},
headers=self.headers,
)
self.app.put_json(
"/buckets/viagroup/groups/editors",
{"data": {"members": [self.alice_principal]}},
headers=self.headers,
)
self.app.post_json(
"/buckets/viagroup/collections/c/records",
{},
headers=self.alice_headers,
)
|
sources/PacketStorm.py | eacg-gmbh/VIA4CVE | 109 | 12738380 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PacketStorm information
# Based on Vulners
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2017 <NAME> - <EMAIL>
# Sources
SOURCE_NAME = 'packetstorm'
SOURCE_FILE = "https://vulners.com/api/v3/archive/collection/?type=packetstorm&api_key={}"
# Imports
import json
from collections import defaultdict
from lib.Config import Configuration as conf
from lib.Source import Source
def add_if(_, entry, item, name=None):
if not name: name=item
if entry.get(item): _[name] = entry[item]
def clean_date(_, item):
if _.get(item): _[item] = _[item].split('T')[0]
class PacketStorm(Source):
def __init__(self):
self.name = SOURCE_NAME
self.cves = defaultdict(list)
source_file = SOURCE_FILE.format(conf.readSetting("vulners", "api_key", ""))
_file, r = conf.getFeedData(SOURCE_NAME, source_file)
data = json.loads(str(_file.read(), 'utf-8'))
for entry in data:
ps = {}
source = entry['_source']
add_if(ps, source, 'published')
add_if(ps, source, 'lastseen', 'last seen')
add_if(ps, source, 'id')
add_if(ps, source, 'title')
add_if(ps, source, 'description')
add_if(ps, source, 'references')
add_if(ps, source, 'reporter')
add_if(ps, source, 'href', 'source')
add_if(ps, source, 'sourceHref', 'data source')
for date in ['published', 'last seen']: clean_date(ps, date)
if ps:
for CVE in source['cvelist']: self.cves[CVE].append(ps)
def getSearchables(self):
return ['id', 'reporter']
|
causalml/__init__.py | zazrivec/causalml | 2,919 | 12738413 | <filename>causalml/__init__.py
name = 'causalml'
__version__ = '0.11.1'
__all__ = ['dataset',
'features',
'feature_selection',
'inference',
'match',
'metrics',
'optimize',
'propensity']
|
scripts/generate_aligned_wave.py | m95music/yukarin | 139 | 12738415 | <filename>scripts/generate_aligned_wave.py
"""
extract indexes for alignment.
"""
import argparse
import glob
import multiprocessing
from functools import partial
from pathlib import Path
from pprint import pprint
from typing import Tuple
import librosa
import numpy
import tqdm
from yukarin.acoustic_feature import AcousticFeature
from yukarin.align_indexes import AlignIndexes
from yukarin.param import AcousticParam
from yukarin.utility.json_utility import save_arguments
base_acoustic_param = AcousticParam()
parser = argparse.ArgumentParser()
parser.add_argument('--input_feature_glob1', '-if1')
parser.add_argument('--input_feature_glob2', '-if2')
parser.add_argument('--input_indexes', '-ii')
parser.add_argument('--output', '-o', type=Path)
parser.add_argument('--sampling_rate', type=int, default=base_acoustic_param.sampling_rate)
parser.add_argument('--frame_period', type=float, default=base_acoustic_param.frame_period)
parser.add_argument('--alpha', type=float, default=base_acoustic_param.alpha)
parser.add_argument('--disable_overwrite', action='store_true')
arguments = parser.parse_args()
def generate_aligned_wave(
pair_path: Tuple[Path, Path, Path],
sampling_rate: int,
frame_period: float,
alpha: float,
):
path_feature1, path_feature2, path_indexes = pair_path
if path_feature1.stem != path_feature2.stem:
print('warning: the file names are different', path_feature1, path_feature2)
if path_feature1.stem != path_indexes.stem:
print('warning: the file names are different', path_feature1, path_indexes)
out = Path(arguments.output, path_indexes.stem + '.wav')
if arguments.disable_overwrite:
return
feature1 = AcousticFeature.load(path=path_feature1)
feature2 = AcousticFeature.load(path=path_feature2)
feature1.sp = AcousticFeature.mc2sp(feature1.mc, sampling_rate=sampling_rate, alpha=alpha)
feature2.sp = AcousticFeature.mc2sp(feature2.mc, sampling_rate=sampling_rate, alpha=alpha)
feature1.ap = AcousticFeature.decode_ap(feature1.coded_ap, sampling_rate=sampling_rate)
feature2.ap = AcousticFeature.decode_ap(feature2.coded_ap, sampling_rate=sampling_rate)
align_indexes = AlignIndexes.load(path=path_indexes)
align_indexes.feature1 = feature1
align_indexes.feature2 = feature2
wave1 = align_indexes.get_aligned_feature1().decode(sampling_rate=sampling_rate, frame_period=frame_period)
wave2 = align_indexes.get_aligned_feature2().decode(sampling_rate=sampling_rate, frame_period=frame_period)
# save
y = numpy.vstack([wave1.wave, wave2.wave])
librosa.output.write_wav(str(out), y, sr=sampling_rate)
def main():
pprint(vars(arguments))
arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')
path_feature1 = {Path(p).stem: Path(p) for p in glob.glob(arguments.input_feature_glob1)}
path_feature2 = {Path(p).stem: Path(p) for p in glob.glob(arguments.input_feature_glob2)}
path_indexes = {Path(p).stem: Path(p) for p in glob.glob(arguments.input_indexes)}
fn_both_list = set(path_feature1.keys()) & set(path_indexes.keys())
pool = multiprocessing.Pool()
generate = partial(
generate_aligned_wave,
sampling_rate=arguments.sampling_rate,
frame_period=arguments.frame_period,
alpha=arguments.alpha,
)
it = pool.imap(generate, ((path_feature1[fn], path_feature2[fn], path_indexes[fn]) for fn in fn_both_list))
list(tqdm.tqdm(it, total=len(path_feature1)))
if __name__ == '__main__':
main()
|
py/examples/graphics_primitives.py | swt2c/wave | 3,013 | 12738417 | # Graphics / Primitives
# Use the #graphics module to render and update shapes.
# ---
from h2o_wave import site, ui, graphics as g
# Create some shapes
arc = g.arc(r1=25, r2=50, a1=90, a2=180)
circle = g.circle(cx=25, cy=25, r=25)
ellipse = g.ellipse(cx=25, cy=25, rx=25, ry=20)
image = g.image(width=50, height=50, href='https://www.python.org/static/community_logos/python-powered-h-140x182.png')
line = g.line(x1=0, y1=0, x2=50, y2=50)
path = g.path(d='M 0,0 L 50,50 L 50,0 L 0,50 z')
path2 = g.path(d=g.p().M(0, 0).L(50, 50).L(50, 0).L(0, 50).z().d()) # same effect as above, but programmable.
path3 = g.p().M(0, 0).L(50, 50).L(50, 0).L(0, 50).z().path() # same effect as above, but a tad more concise.
polygon = g.polygon(points='0,0 50,50 50,0 0,50')
polyline = g.polyline(points='0,0 50,50 50,0 0,50')
rect = g.rect(x=0, y=0, width=50, height=50)
rounded_rect = g.rect(x=0, y=0, width=50, height=50, rx=10)
text = g.text(x=0, y=48, text='Z', font_size='4em')
# Collect 'em all
shapes = [arc, circle, ellipse, image, line, path, path2, path3, polygon, polyline, rect, rounded_rect, text]
# Apply fill/stroke for each shape
for shape in shapes:
shape.fill = 'none' if g.type_of(shape) == 'polyline' else 'crimson'
shape.stroke = 'darkred'
shape.stroke_width = 5
# Lay out shapes vertically
y = 10
for shape in shapes:
shape.transform = f'translate(10,{y})'
y += 60
# Add shapes to the graphics card
page = site['/demo']
page['example'] = ui.graphics_card(
box='1 1 1 10', view_box='0 0 70 800', width='100%', height='100%',
stage=g.stage(
arc=arc,
circle=circle,
ellipse=ellipse,
image=image,
line=line,
path=path,
path2=path2,
path3=path3,
polygon=polygon,
polyline=polyline,
rect=rect,
rounded_rect=rounded_rect,
text=text,
),
)
page.save()
|
Packs/CloudConvert/Integrations/CloudConvert/CloudConvert.py | diCagri/content | 799 | 12738419 | import demistomock as demisto
from CommonServerPython import *
import urllib3
from typing import Any, Dict
# Disable insecure warnings
urllib3.disable_warnings()
class Client(BaseClient):
@logger
def __init__(self, headers, verify=False, proxy=False):
url = 'https://api.cloudconvert.com/v2'
super().__init__(url, headers=headers, verify=verify, proxy=proxy)
@logger
def upload_url(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Upload the file given as url to the API's server, for later conversion.
Note - this operation is called 'import' by the API.
Args:
arguments: dict containing the request arguments, should contain the field 'url'
Returns:
dict containing the results of the upload action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
return self._http_request(
method='POST',
url_suffix='import/url',
data=arguments,
ok_codes=(200, 201, 422),
)
@logger
def upload_entry_id(self, file_path: str, file_name: str) -> Dict[str, Any]:
"""
Upload the file given as a war room entry id to the API's server, for later conversion
Note - this operation is called 'import' by the API.
Args:
file_path: path to given file, derived from the entry id
file_name: name of file, including format suffix
Returns:
dict containing the results of the upload action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
response_get_form = self._http_request(
method='POST',
url_suffix='import/upload'
)
form = dict_safe_get(response_get_form, ('data', 'result', 'form'), default_return_value={})
port_url = form.get('url')
params = form.get('parameters')
if port_url is None or params is None:
raise ValueError('Failed to initiate an upload operation')
file_dict = {'file': (file_name, open(file_path, 'rb'))}
self._http_request(
method='POST',
url_suffix=None,
full_url=port_url,
files=file_dict,
empty_valid_codes=[201, 204],
return_empty_response=True,
data=params
)
# As shown, this operation has two requests
# The data about the operation is within the first request's response,
# So in order to keep the operation's data, we should return the first request's response,
# But first we should remove fields that are no longer true, such as ones that indicates that
# The second request has not been done yet
if response_get_form.get('data'):
response_get_form.get('data').pop('message', None)
response_get_form.get('data').pop('result', None)
return response_get_form
@logger
def convert(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Convert a file to desired format, given the file was priorly uploaded to the API's server
Args:
arguments: dict containing the request arguments, should contain the fields 'task_id' and 'output_format'
Returns:
dict containing the results of the convert action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
arguments['input'] = arguments.pop('task_id')
return self._http_request(
method='POST',
url_suffix='convert',
data=arguments,
ok_codes=(200, 201, 422),
)
def check_status(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Check the status of a request sent to the API's server
Args:
arguments: dict containing the request arguments, should contain the field 'task_id'
Returns:
dict containing the results of the check status action as returned from the API (status, task ID, etc.)
``Dict[str, Any]``
"""
task_id = arguments.get('task_id')
return self._http_request(
method='GET',
url_suffix=f'/tasks/{task_id}',
ok_codes=(200, 201, 422),
)
@logger
def download_url(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
"""
Download a converted file to a url
Note - this operation is called 'export' by the API.
Args:
arguments:
dict containing the request arguments, should contain the field 'task_id' of the desired file
Returns:
dict containing the results of the download action as returned from the API (status, task ID, etc.)
if the action was complete, the result url will be a part of this dict. If the request is pending,
one should retrieve the url via the 'check_status' command
``Dict[str, Any]``
"""
arguments['input'] = arguments.pop('task_id')
return self._http_request(
method='POST',
url_suffix='/export/url',
data=arguments,
ok_codes=(200, 201, 422),
)
@logger
def get_file_from_url(self, url: str):
"""
Call a GET http request in order to get the file data given as url
Args:
url: url containing a file
Returns:
request response, containing the data of the file
"""
# Saving the headers of this client instance
# The HTTP request that gets the file data needs to have no headers
# Passing an empty dictionary to _http_request cause it to use this client's headers by default
session_headers = self._headers
self._headers = {}
try:
results = self._http_request(
method='GET',
url_suffix=None,
full_url=url,
headers={},
resp_type='response',
)
return results.content
finally:
self._headers = session_headers
@logger
def raise_error_if_no_data(results: Dict[str, Any]):
"""
This function checks if No 'data' field was returned from the request, meaning the input was invalid
Args:
results: a dict containing the request's results
Returns:
raises error if there is no 'data' field, with the matching error message returned from the server
if no error message was given from the server, suggests the other optional errors
"""
if results.get('data') is None:
if results.get('message'):
raise ValueError(results.get('message'))
else:
raise ValueError('No response from server, the server could be temporary unavailable or it is handling too '
'many requests. Please try again later.')
@logger
def upload_command(client: Client, arguments: Dict[str, Any]):
"""
Upload a file to the API for later conversion
Args:
client: CloudConvert client to use
arguments: All command arguments - either 'url' or 'entry_id'.
Returns:
CommandResults object containing the results of the upload action as returned from the API and its
readable output
"""
if arguments.get('url'):
if arguments.get('entry_id'):
raise ValueError('Both url and entry id were inserted - please insert only one.')
results = client.upload_url(arguments)
elif arguments.get('entry_id'):
demisto.debug('getting the path of the file from its entry id')
result = demisto.getFilePath(arguments.get('entry_id'))
if not result:
raise ValueError('No file was found for given entry id')
file_path, file_name = result['path'], result['name']
results = client.upload_entry_id(file_path, file_name)
else:
raise ValueError('No url or entry id specified.')
raise_error_if_no_data(results)
format_operation_title(results)
results_data = results.get('data')
readable_output = tableToMarkdown(
'Upload Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
@logger
def convert_command(client: Client, arguments: Dict[str, Any]):
"""
Convert a file that was priorly uploaded
Args:
client: CloudConvert client to use
arguments: All command arguments, the fields 'task_id' and 'output_format'
Returns:
CommandResults object containing the results of the convert action as returned from the API and its readable output
"""
results = client.convert(arguments)
raise_error_if_no_data(results)
results_data = results.get('data')
readable_output = tableToMarkdown(
'Convert Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
@logger
def check_status_command(client: Client, arguments: Dict[str, Any]):
"""
Check status of an existing operation using it's task id
Args:
client: CloudConvert client to use
arguments: All command arguments, the field 'task_id'
Note: When the checked operation is 'download', the field 'create_war_room_entry' should be set according
to the chosen download method, true if downloading as war room entry and false if not.
This way a war room entry containing the file will be created if needed.
Returns:
CommandResults object containing the results of the check status action as returned from the API
and its readable output OR if the argument create_war_room_entry is set to True, then a war room entry is also
being created.
"""
results = client.check_status(arguments)
raise_error_if_no_data(results)
format_operation_title(results)
results_data = results.get('data', {})
# If checking on an download to entry operation, manually change the operation name
# This is because the 'download as entry' operation is our variation on the export to url operation,
# hence not distinguished as a different operation by the API
if argToBoolean(arguments.get('create_war_room_entry', False)) \
and results_data.get('operation') == 'download/url':
results['data']['operation'] = 'download/entry'
# Check if an download to war room entry operation is finished
# If it did - create the entry
if results_data.get('status') == 'finished' \
and argToBoolean(arguments.get('create_war_room_entry', 'False'))\
and results_data.get('operation') == 'download/entry':
modify_results_dict(results_data)
url = results_data.get('url')
file_name = results_data.get('file_name')
file_data = client.get_file_from_url(url)
war_room_file = fileResult(filename=file_name, data=file_data, file_type=entryTypes['entryInfoFile'])
readable_output = tableToMarkdown('Check Status Results', remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids',
'file_name', 'url'),
headerTransform=string_to_table_header,)
return_results(CommandResults(
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
readable_output=readable_output,
outputs=remove_empty_elements(results_data)
))
return war_room_file
else:
modify_results_dict(results_data)
readable_output = tableToMarkdown(
'Check Status Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids', 'file_name', 'url'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
def modify_results_dict(results_data: Dict[str, Any]):
"""
The results of the specific file converted/uploaded/downloaded are sub-values of some keys,
so parse the results field to the outer scope of the dict
Args:
results_data: the dict under the 'data' field in the response's results
"""
if results_data.get('result'):
results_info = results_data.get('result', {}).get('files')
if results_info:
results_data['file_name'] = results_info[0].get('filename')
results_data['url'] = results_info[0].get('url')
results_data['size'] = results_info[0].get('size')
@logger
def download_command(client: Client, arguments: Dict[str, Any]):
"""
Download a converted file back to the user, either as a url or directly as a war room entry
Note: in order to get the resulted url/entry of the file you need to use a check-status command as well,
since the response of the download command is usually responded before the file is fully downloaded (hence the
'status' field is 'waiting', and not 'finished')
Args:
client: CloudConvert client to use
arguments: All command arguments, the fields 'task_id', and 'download_as' (url/war_room_entry)
Returns:
CommandResults object containing the results of the download action as returned from the API, and its readable
"""
# Call download as url request
# In both url and war room entry we still first get a url
results = client.download_url(arguments)
raise_error_if_no_data(results)
# If downloading as war room entry, manually change the operation name
# This is because the 'download as entry' operation is our variation on the export to url operation,
# hence not distinguished as a different operation by the API
if arguments['download_as'] == 'war_room_entry':
results['data']['operation'] = 'download/entry'
else:
format_operation_title(results)
results_data = results.get('data')
readable_output = tableToMarkdown(
'Download Results',
remove_empty_elements(results_data),
headers=('id', 'operation', 'created_at', 'status', 'depends_on_task_ids'),
headerTransform=string_to_table_header,
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CloudConvert.Task',
outputs_key_field='id',
raw_response=results,
outputs=remove_empty_elements(results_data),
)
def test_module(client: Client):
"""
Returning 'ok' indicates that the integration works like it suppose to. Connection to the service is successful.
Args:
client: CloudConvert client
Returns:
'ok' if test passed, anything else will fail the test
"""
dummy_url = 'https://raw.githubusercontent.com/demisto/content/master/TestData/pdfworking.pdf'
result = client.upload_url({'url': dummy_url})
if result.get('data'):
return 'ok'
elif result.get('message') == "Unauthenticated.":
return 'Authorization Error: make sure API Key is correctly set'
elif result.get('message'):
return result.get('message')
else:
return 'No response from server, the server could be temporary unavailable or it is handling too ' \
'many requests. Please try again later.'
def format_operation_title(results: Dict[str, Any]):
"""
This function is being used in order to change the titles of the operations that are done by the API and are
returned in the response to titles that makes more sense for the users actions, and matches the API's use in
our system.
Args:
results: The response from the http request
"""
title_exchange_dict = {
'import/url': 'upload/url',
'import/upload': 'upload/entry',
'export/url': 'download/url'}
operation = results['data']['operation']
results['data']['operation'] = title_exchange_dict[operation] if operation in title_exchange_dict.keys() \
else operation
def main() -> None:
try:
command = demisto.command()
params = demisto.params()
api_key = params.get('apikey')
verify = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(headers, verify, proxy)
if command == 'cloudconvert-upload':
return_results(upload_command(client, demisto.args()))
elif command == 'cloudconvert-convert':
return_results(convert_command(client, demisto.args()))
elif command == 'cloudconvert-check-status':
return_results(check_status_command(client, demisto.args()))
elif command == 'cloudconvert-download':
return_results(download_command(client, demisto.args()))
elif command == 'test-module':
return_results(test_module(client))
except Exception as e:
err_msg = 'Task id not found or expired' if 'No query results for model' in str(e) else \
('No more conversion minutes for today for this user' if 'Payment Required' in str(e) else str(e))
return_error(f'Failed to execute {command} command. Error: {err_msg}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
server/migrations/0070_remove_machine_install_log_hash.py | nathandarnell/sal | 215 | 12738435 | <reponame>nathandarnell/sal<gh_stars>100-1000
# Generated by Django 1.11 on 2018-04-25 18:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0069_remove_machine_install_log'),
]
operations = [
migrations.RemoveField(
model_name='machine',
name='install_log_hash',
),
]
|
tests/unit/lib/utils/test_hash.py | zhuhaow/aws-sam-cli | 2,959 | 12738468 | import hashlib
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
from samcli.lib.utils.hash import dir_checksum, str_checksum
class TestHash(TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_dir_hash_independent_of_location(self):
temp_dir1 = os.path.join(self.temp_dir, "temp-dir-1")
os.mkdir(temp_dir1)
with open(os.path.join(temp_dir1, "test-file"), "w+") as f:
f.write("Testfile")
checksum1 = dir_checksum(temp_dir1)
temp_dir2 = shutil.move(temp_dir1, os.path.join(self.temp_dir, "temp-dir-2"))
checksum2 = dir_checksum(temp_dir2)
self.assertEqual(checksum1, checksum2)
def test_dir_hash_independent_of_file_order(self):
file1 = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
file1.write(b"Testfile")
file1.close()
file2 = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
file2.write(b"Testfile")
file2.close()
dir_checksums = {}
with patch("os.walk") as mockwalk:
mockwalk.return_value = [
(
self.temp_dir,
[],
[
file1.name,
file2.name,
],
),
]
dir_checksums["first"] = dir_checksum(self.temp_dir)
with patch("os.walk") as mockwalk:
mockwalk.return_value = [
(
self.temp_dir,
[],
[
file2.name,
file1.name,
],
),
]
dir_checksums["second"] = dir_checksum(self.temp_dir)
self.assertEqual(dir_checksums["first"], dir_checksums["second"])
def test_dir_hash_same_contents_diff_file_per_directory(self):
_file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
_file.write(b"Testfile")
_file.close()
checksum_before = dir_checksum(os.path.dirname(_file.name))
shutil.move(os.path.abspath(_file.name), os.path.join(os.path.dirname(_file.name), "different_name"))
checksum_after = dir_checksum(os.path.dirname(_file.name))
self.assertNotEqual(checksum_before, checksum_after)
def test_dir_hash_with_ignore_list(self):
_file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
_file.write(b"Testfile")
_file.close()
dir_path = os.path.dirname(_file.name)
checksum_before = dir_checksum(dir_path)
# add a file to .aws-sam/
aws_sam_dir_path = os.path.join(dir_path, ".aws-sam")
os.mkdir(aws_sam_dir_path)
_new_file = tempfile.NamedTemporaryFile(delete=False, dir=aws_sam_dir_path)
_new_file.write(b"dummy")
_new_file.close()
checksum_after = dir_checksum(os.path.dirname(_file.name))
self.assertNotEqual(checksum_before, checksum_after)
checksum_after_with_ignore_list = dir_checksum(os.path.dirname(_file.name), ignore_list=[".aws-sam"])
self.assertEqual(checksum_before, checksum_after_with_ignore_list)
def test_hashing_method(self):
_file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
_file.write(b"Testfile")
_file.close()
checksum_sha256 = dir_checksum(os.path.dirname(_file.name), hash_generator=hashlib.sha256())
checksum_md5 = dir_checksum(os.path.dirname(_file.name), hashlib.md5())
checksum_default = dir_checksum(os.path.dirname(_file.name))
self.assertEqual(checksum_default, checksum_md5)
self.assertNotEqual(checksum_md5, checksum_sha256)
def test_dir_cyclic_links(self):
_file = tempfile.NamedTemporaryFile(delete=False, dir=self.temp_dir)
_file.write(b"Testfile")
_file.close()
os.symlink(os.path.abspath(_file.name), os.path.join(os.path.dirname(_file.name), "symlink"))
os.symlink(
os.path.join(os.path.dirname(_file.name), "symlink"), os.path.join(os.path.dirname(_file.name), "symlink2")
)
os.unlink(os.path.abspath(_file.name))
os.symlink(os.path.join(os.path.dirname(_file.name), "symlink2"), os.path.abspath(_file.name))
with self.assertRaises(OSError) as ex:
dir_checksum(os.path.dirname(_file.name))
self.assertIn("Too many levels of symbolic links", ex.message)
def test_str_checksum(self):
checksum = str_checksum("Hello, World!")
self.assertEqual(checksum, "65a8e27d8879283831b664bd8b7f0ad4")
|
modules/pymol/wizard/filter.py | dualword/pymol-open-source | 636 | 12738487 | <gh_stars>100-1000
# filter wizard
# no-frills tool for quickly filtering docked compounds, etc.
import os,sys
from pymol.wizard import Wizard
from pymol import cmd
import traceback
# global dictionary for saving result on a per-object basis
static_dict = {}
# last/current object being filtered
default_object = None
# browing mode
default_browse = 1
accept_str = "Accept"
defer_str = "Defer"
reject_str = "Reject"
# class definition (class name must match wizard name with cap)
class Filter(Wizard):
def migrate_session(self, version):
if version >= self.cmd.get_version()[2]:
return
# remap old "title" state identifiers to new identifiers
# (changed in 1.7.0.0)
for object, sdo in self.dict.items():
if not sdo:
continue
# build title -> ident mapping
tota = self.cmd.count_states(object)
title2ident = dict(
(self.cmd.get_title(object, state),
self.get_ident(object, state))
for state in range(1, tota + 1))
# build remapped sdo
try:
new_sdo = {}
for title, value in sdo.items():
new_sdo[title2ident[title]] = value
sdo.clear()
sdo.update(new_sdo)
except KeyError:
# lookup by title failed, we can assume that this instance
# already uses new identifiers
return
self.load_state_dict()
def __init__(self,_self=cmd):
# initialize parent class
Wizard.__init__(self,_self)
self.update_object_menu()
# restore previous state from global storage
self.dict = static_dict
self.object = default_object if default_object in self.avail_objects else None
self.browse = default_browse
self.state_dict = {}
# if we don't have a current object, choose the first multi-state object
if not self.object and self.avail_objects:
self.object = self.avail_objects[0]
# menu for
self.menu['browse'] = [
[2, 'Browse Mode',''],
[1, 'Browse All','cmd.get_wizard().set_browse(1)'],
[1, 'Browse Accepted','cmd.get_wizard().set_browse(2)'],
[1, 'Browse Rejected','cmd.get_wizard().set_browse(3)'],
[1, 'Browse Deferred','cmd.get_wizard().set_browse(4)'],
[1, 'Browse Remaining','cmd.get_wizard().set_browse(5)'],
]
self.menu['create'] = [
[2, 'Create Filtered Object', ''],
[1, 'Accepted','cmd.get_wizard().create_object("Accept")'],
[1, 'Rejected','cmd.get_wizard().create_object("Reject")'],
[1, 'Deferred','cmd.get_wizard().create_object("Defer")'],
]
self.count_object()
self.load_state_dict()
self.update_object_menu()
cmd.set_key('F1',lambda s=self:s.accept())
cmd.set_key('F2',lambda s=self:s.reject())
cmd.set_key('F3',lambda s=self:s.defer())
cmd.set_key('right',lambda s=self:s.forward())
cmd.set_key('left',lambda s=self:s.backward())
def do_select(self,name):
try:
obj_name = cmd.index('first ?' + name)[0][0]
self.set_object(obj_name)
except:
pass
self.cmd.deselect()
def do_pick(self,bondFlag):
self.do_select('pk1')
self.cmd.unpick()
def do_state(self, state):
cmd.refresh_wizard()
def get_event_mask(self):
return Wizard.event_mask_pick + Wizard.event_mask_select + Wizard.event_mask_state
def update_object_menu(self):
# find objects with > 1 state
self.avail_objects = []
for a in cmd.get_names('objects'):
if cmd.get_type(a)=='object:molecule':
if cmd.count_states(a)>1:
self.avail_objects.append(a)
# now create the object menu
self.menu['object'] = [[2,'Select Object','']]
for a in self.avail_objects:
self.menu['object'].append([ 1,a,'cmd.get_wizard().set_object("%s")'%(a) ])
self.menu['object'].append([ 1,'None','cmd.get_wizard().set_object(None)'])
def set_browse(self,browse):
# allow user to focus on only a subset of the compounds
self.browse = browse
if self.browse == 1:
print(" Filter: Browsing all compounds.")
cmd.mset() # all states visible
elif self.object is None:
print(" Filter-Error: please choose an object first")
else:
self.check_object_dict()
if self.browse == 2:
print(" Filter: Browsing accepted compounds.")
target = accept_str
elif self.browse == 3:
print(" Filter: Browsing rejected compounds.")
target = reject_str
elif self.browse == 4:
print(" Filter: Browsing deferred compounds.")
target = defer_str
lst = []
sd = self.state_dict
sdo = self.dict[self.object]
if self.browse<5:
for a in list(sdo.keys()):
if sdo[a]==target:
lst.append(sd[a])
else:
print(" Filter: Browsing remaining compounds")
for a in sd.keys():
if a not in sdo:
lst.append(sd[a])
lst.sort()
if len(lst)==0:
print(" Filter-Error: No matching compounds.")
cmd.mset(' '.join(map(str,lst)))
cmd.rewind()
cmd.refresh_wizard()
def check_object_dict(self):
# make sure we have a valid entry for this object in our dictionary
if self.object not in self.dict:
self.dict[self.object]={} # create dictionary to store results
def adjust(self,decision,inc):
# utility routine to increment/decrement counters
if decision == accept_str:
self.acce = self.acce + inc
elif decision == reject_str:
self.reje = self.reje + inc
elif decision == defer_str:
self.defe = self.defe + inc
def load_state_dict(self):
# establish relationship between names and states
# ASSUMPTION: identifiers will be unique
self.state_dict = {}
sd = self.state_dict
so = self.object
if so is not None:
cnt = cmd.count_states(so)
for a in range(1,cnt+1):
sd[self.get_ident(so,a)] = a
def count_object(self):
# record how many molecular are in an object, etc.
self.check_object_dict()
if self.object is not None:
self.acce = 0
self.reje = 0
self.defe = 0
self.togo = 0
self.tota = cmd.count_states(self.object)
sdo=self.dict[self.object]
self.togo = self.tota-len(sdo)
for a in list(sdo.keys()):
dec = sdo[a]
self.adjust(dec,1)
def set_object(self,obj_name):
self.object = obj_name
self.count_object()
self.load_state_dict()
cmd.refresh_wizard()
def get_panel(self):
# returns Wizard panel for PyMOL to display
# 1 = title/text
# 2 = button
# 3 = pop-up menu
self.update_object_menu()
if self.object is not None:
save_str = 'Save %s.txt'%self.object
else:
save_str = ""
return [
[ 1, 'Filtering Wizard',''],
[ 3, self.menu['browse'][self.browse][1], 'browse' ],
[ 3, 'Object: %s' % (self.object), 'object' ],
[ 2, 'Accept (F1)','cmd.get_wizard().accept()'],
[ 2, 'Reject (F2)','cmd.get_wizard().reject()'],
[ 2, 'Defer (F3)','cmd.get_wizard().defer()'],
[ 2, 'Forward (->)','cmd.get_wizard().forward()'],
[ 2, 'Back (<-)','cmd.get_wizard().backward()'],
[ 3, 'Create Filtered Object', 'create'],
[ 2, save_str,'cmd.get_wizard().save()'],
[ 2, 'Refresh','cmd.refresh_wizard()'],
[ 2, 'Done','cmd.set_wizard()'],
]
def get_ident(self, object, state):
return '%d/%d %s' % (state, self.tota,
self.cmd.get_title(self.object, state))
def get_prompt(self):
# returns text prompt
self.prompt = None
if self.object is None:
self.prompt = [ 'Please select a multi-state object...' ]
else:
self.prompt = [ '%s: %d accepted, %d rejected, %d deferred, %d remaining'%(
self.object,self.acce,self.reje,self.defe,self.togo) ]
state = cmd.get_object_state(self.object)
ident = self.get_ident(self.object,state)
sdo=self.dict[self.object]
if ident in sdo:
self.prompt.append('%s: %s'%(ident,sdo[ident]))
else:
self.prompt.append('%s?'%(ident))
return self.prompt
def count(self,entry,str):
# keep track of how many compounds are in which category
self.check_object_dict()
sdo = self.dict[self.object]
if entry in sdo:
self.adjust(sdo[entry],-1)
else:
self.togo = self.togo - 1
sdo[entry] = str
self.adjust(sdo[entry],1)
def accept(self):
# accept compound and advance
if self.object is None:
print(" Filter-Error: Please choose an object first")
else:
state = cmd.get_object_state(self.object)
ident = self.get_ident(self.object,state)
print(" Filter: Accepting '%s'"%ident)
self.count(ident,accept_str)
cmd.forward()
cmd.refresh_wizard()
def reject(self):
# reject compound and advance
if self.object is None:
print(" Filter-Error: Please choose an object first")
else:
state = cmd.get_object_state(self.object)
ident = self.get_ident(self.object,state)
print(" Filter: Rejecting '%s'"%ident)
self.check_object_dict()
self.count(ident,reject_str)
cmd.forward()
cmd.refresh_wizard()
def defer(self):
# defer compound and advance
if self.object is None:
print(" Filter-Error: Please choose an object first")
else:
state = cmd.get_object_state(self.object)
ident = self.get_ident(self.object,state)
print(" Filter: Deferring '%s'"%ident)
self.check_object_dict()
self.count(ident,defer_str)
cmd.forward()
cmd.refresh_wizard()
def forward(self):
# go forward and update information
cmd.forward()
cmd.refresh_wizard()
def backward(self):
# go backward and update information
cmd.backward()
cmd.refresh_wizard()
def create_object(self, what='Accept'):
if not self.object:
print(" Filter-Error: Please choose an object first")
return
name = self.cmd.get_unused_name(self.object + '_' + what, 0)
sdo = self.dict[self.object]
lst = [self.state_dict[ident] for (ident, w) in sdo.items() if w == what]
for state in sorted(lst):
self.cmd.create(name, self.object, state, -1)
def save(self):
# write compounds to a file
if self.object is None:
print(" Filter-Error: please choose an object first")
else:
self.check_object_dict()
fname = self.object+".txt"
try:
f=open(fname,'w')
f.close()
except:
print(" Filter-Warning: '"+fname+"' in current directory is not writable.")
print(" Filter-Warning: attempting to write in home directory.")
fname = cmd.exp_path(os.path.join('~', fname))
try:
f=open(fname,'w')
sd = self.state_dict
sdo = self.dict[self.object]
f.write('Object\t"%s"\n'%(self.object))
f.write('Total\t%d\nAccepted\t%d\nRejected\t%d\nDeferred\t%d\nRemaining\t%d\n\n'%(
self.tota,
self.acce,
self.reje,
self.defe,
self.togo))
# sort output in order of states
lst = []
for a in sd.keys():
lst.append((sd[a],a))
lst.sort()
# write list with decisions
for a in lst:
if a[1] in sdo:
f.write('%d\t"%s"\t"%s"\n'%(a[0],a[1],sdo[a[1]]))
else:
f.write('%d\t"%s"\t"?"\n'%(a[0],a[1]))
f.close()
print(" Filter: Wrote '%s'."%fname)
except:
traceback.print_exc()
print(" Filter-Error: Unable to write '%s'."%fname)
def cleanup(self):
# save current state in global vars...
global default_object,default_browse,static_dict
default_object = self.object
default_browse = self.browse
static_dict = self.dict
# restore key actions
cmd.set_key('F1',None)
cmd.set_key('F2',None)
cmd.set_key('F3',None)
cmd.set_key('right',cmd.forward)
cmd.set_key('left',cmd.backward)
|
haven/haven_jupyter/images_tab.py | haven-ai/haven-ai | 145 | 12738505 | from .. import haven_utils
from .. import haven_results as hr
from .. import haven_utils as hu
from .. import haven_share as hd
import os
import pprint
import json
import copy
import pprint
import pandas as pd
from . import widgets as wdg
try:
import ast
from ipywidgets import Button, HBox, VBox
from ipywidgets import widgets
from IPython.display import display
from IPython.core.display import Javascript, display, HTML
from IPython.display import FileLink, FileLinks
from ipywidgets.widgets.interaction import show_inline_matplotlib_plots
except Exception:
print("widgets not available...")
def images_tab(self, output):
db = self
if db.vars.get("legend_list") is None:
db.vars["legend_list"] = hu.get_diff_hparam(db.rm.exp_list)
w_legend = wdg.SelectMultiple(header="Legend:", options=db.rm.exp_params, db_vars=db.vars, var="legend_list")
w_n_exps = wdg.Text("n_exps:", default="3", type="int", db_vars=db.vars, var="n_exps")
w_n_images = wdg.Text("n_images:", default="5", type="int", db_vars=db.vars, var="n_images")
w_figsize = wdg.Text("figsize:", default="(10,5)", type="tuple", db_vars=db.vars, var="figsize")
w_dirname = wdg.Text("dirname:", default="images", type="str", db_vars=db.vars, var="dirname")
bdownload = widgets.Button(description="Download Images", layout=self.layout_button)
bdownload_out = widgets.Output(layout=self.layout_button)
bdownload_zip = widgets.Button(description="Download Images zipped", layout=self.layout_button)
bdownload_zip_out = widgets.Output(layout=self.layout_button)
brefresh = widgets.Button(description="Display Images")
button = widgets.VBox(
[
widgets.HBox(
[
w_legend.get_widget(),
w_n_exps.get_widget(),
w_n_images.get_widget(),
w_figsize.get_widget(),
w_dirname.get_widget(),
]
),
widgets.HBox([brefresh, bdownload, bdownload_out, bdownload_zip, bdownload_zip_out]),
]
)
output_plot = widgets.Output()
with output:
display(button)
display(output_plot)
def on_clicked(b):
output_plot.clear_output()
with output_plot:
self.update_rm()
self.rm_original.fig_image_list = self.rm.get_images(
legend_list=w_legend.update(),
n_images=w_n_images.update(),
n_exps=w_n_exps.update(),
figsize=w_figsize.update(),
dirname=w_dirname.update(),
)
show_inline_matplotlib_plots()
brefresh.on_click(on_clicked)
def on_download_clicked(b):
fname = "images"
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
pp = PdfPages(fname)
for fig in self.rm_original.fig_image_list:
fig.savefig(pp, format="pdf")
pp.close()
bdownload_out.clear_output()
with bdownload_out:
display(FileLink(fname, result_html_prefix="Download: "))
def on_download_clicked_zip(b):
fname = "results.zip"
bdownload_zip_out.clear_output()
with bdownload_zip_out:
import zipfile, glob
exp_id_list = [hu.hash_dict(exp_dict) for exp_dict in self.rm.exp_list]
zipf = zipfile.ZipFile(fname, "w", zipfile.ZIP_DEFLATED)
for exp_id in exp_id_list:
abs_path_list = glob.glob(os.path.join(self.rm.savedir_base, exp_id, "images", "*"))
for abs_path in abs_path_list:
# weq
iname = os.path.split(abs_path)[-1]
rel_path = f"{exp_id}_{iname}"
zipf.write(abs_path, rel_path)
zipf.close()
# self.rm.to_zip(savedir_base="", fname=fname, fname_list=self.vars["fname_list"])
bdownload_zip_out.clear_output()
with bdownload_zip_out:
display("%d exps zipped." % len(self.rm.exp_list))
display(FileLink(fname, result_html_prefix="Download: "))
bdownload.on_click(on_download_clicked)
bdownload_zip.on_click(on_download_clicked_zip)
|
moviepy/audio/tools/cuts.py | odidev/moviepy | 8,558 | 12738510 | <reponame>odidev/moviepy<filename>moviepy/audio/tools/cuts.py<gh_stars>1000+
"""Cutting utilities working with audio."""
import numpy as np
def find_audio_period(clip, min_time=0.1, max_time=2, time_resolution=0.01):
"""Finds the period, in seconds of an audioclip.
Parameters
----------
min_time : float, optional
Minimum bound for the returned value.
max_time : float, optional
Maximum bound for the returned value.
time_resolution : float, optional
Numerical precision.
"""
chunksize = int(time_resolution * clip.fps)
chunk_duration = 1.0 * chunksize / clip.fps
# v denotes the list of volumes
v = np.array([(chunk ** 2).sum() for chunk in clip.iter_chunks(chunksize)])
v = v - v.mean()
corrs = np.correlate(v, v, mode="full")[-len(v) :]
corrs[: int(min_time / chunk_duration)] = 0
corrs[int(max_time / chunk_duration) :] = 0
return chunk_duration * np.argmax(corrs)
|
var/spack/repos/builtin/packages/kadath/package.py | LiamBindle/spack | 2,360 | 12738511 | <filename>var/spack/repos/builtin/packages/kadath/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Kadath(CMakePackage):
"""KADATH SPECTRAL SOLVER.
The Frankfurt University/Kadath (FUKA) Initial Data solver branch is
a collection of ID solvers aimed at delivering consistent initial
data (ID) solutions to the eXtended Conformal Thin-Sandwich (XCTS)
formulation of Einstein's field equations for a variety of compact
object configurations to include extremely compact, asymmetric, and
mixed spin binaries.
"""
homepage = "https://kadath.obspm.fr/fuka/"
git = "https://gitlab.obspm.fr/grandcle/Kadath.git"
maintainers = ['eschnett']
version('fuka', branch='fuka')
variant('mpi', default=True, description='Enable MPI support')
variant('codes', multi=True,
description="Codes to enable",
values=('none', 'BBH', 'BH', 'BHNS', 'BNS', 'NS'),
default='none')
depends_on('blas')
depends_on('boost cxxstd=17') # kadath uses std=C++17
depends_on('cmake @2.8:', type='build')
depends_on('fftw-api @3:')
depends_on('gsl')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('pgplot')
depends_on('scalapack')
root_cmakelists_dir = 'build_release'
def patch(self):
for code in self.spec.variants['codes'].value:
if code != 'none':
# Disable unwanted explicit include directory settings
filter_file(r"include_directories\(/usr",
"# include_directories(/usr",
join_path("codes", code, "CMakeLists.txt"))
def setup_build_environment(self, env):
env.set('HOME_KADATH', self.stage.source_path)
def cmake_args(self):
return [
# kadath uses a non-standard option to enable MPI
self.define_from_variant('PAR_VERSION', 'mpi'),
]
def cmake(self, spec, prefix):
options = self.std_cmake_args
options += self.cmake_args()
options.append(os.path.abspath(self.root_cmakelists_dir))
with working_dir(self.build_directory, create=True):
cmake(*options)
for code in self.spec.variants['codes'].value:
if code != 'none':
with working_dir(join_path("codes", code)):
cmake(*options)
def build(self, spec, prefix):
with working_dir(self.build_directory):
make(*self.build_targets)
for code in self.spec.variants['codes'].value:
if code != 'none':
with working_dir(join_path("codes", code)):
make(*self.build_targets)
def install(self, spec, prefix):
mkdirp(prefix.include)
install_tree('include', prefix.include)
mkdirp(prefix.lib)
install_tree('lib', prefix.lib)
|
dulwich/tests/test_refs.py | jessecureton-aurora/dulwich | 460 | 12738543 | <filename>dulwich/tests/test_refs.py
# test_refs.py -- tests for refs.py
# encoding: utf-8
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for dulwich.refs."""
from io import BytesIO
import os
import sys
import tempfile
from dulwich import errors
from dulwich.file import (
GitFile,
)
from dulwich.objects import ZERO_SHA
from dulwich.refs import (
DictRefsContainer,
InfoRefsContainer,
check_ref_format,
_split_ref_line,
parse_symref_value,
read_packed_refs_with_peeled,
read_packed_refs,
strip_peeled_refs,
write_packed_refs,
)
from dulwich.repo import Repo
from dulwich.tests import (
SkipTest,
TestCase,
)
from dulwich.tests.utils import (
open_repo,
tear_down_repo,
)
class CheckRefFormatTests(TestCase):
"""Tests for the check_ref_format function.
These are the same tests as in the git test suite.
"""
def test_valid(self):
self.assertTrue(check_ref_format(b"heads/foo"))
self.assertTrue(check_ref_format(b"foo/bar/baz"))
self.assertTrue(check_ref_format(b"refs///heads/foo"))
self.assertTrue(check_ref_format(b"foo./bar"))
self.assertTrue(check_ref_format(b"heads/foo@bar"))
self.assertTrue(check_ref_format(b"heads/fix.lock.error"))
def test_invalid(self):
self.assertFalse(check_ref_format(b"foo"))
self.assertFalse(check_ref_format(b"heads/foo/"))
self.assertFalse(check_ref_format(b"./foo"))
self.assertFalse(check_ref_format(b".refs/foo"))
self.assertFalse(check_ref_format(b"heads/foo..bar"))
self.assertFalse(check_ref_format(b"heads/foo?bar"))
self.assertFalse(check_ref_format(b"heads/foo.lock"))
self.assertFalse(check_ref_format(b"heads/v@{ation"))
self.assertFalse(check_ref_format(b"heads/foo\bar"))
ONES = b"1" * 40
TWOS = b"2" * 40
THREES = b"3" * 40
FOURS = b"4" * 40
class PackedRefsFileTests(TestCase):
def test_split_ref_line_errors(self):
self.assertRaises(errors.PackedRefsException, _split_ref_line, b"singlefield")
self.assertRaises(errors.PackedRefsException, _split_ref_line, b"badsha name")
self.assertRaises(
errors.PackedRefsException,
_split_ref_line,
ONES + b" bad/../refname",
)
def test_read_without_peeled(self):
f = BytesIO(b"\n".join([b"# comment", ONES + b" ref/1", TWOS + b" ref/2"]))
self.assertEqual(
[(ONES, b"ref/1"), (TWOS, b"ref/2")], list(read_packed_refs(f))
)
def test_read_without_peeled_errors(self):
f = BytesIO(b"\n".join([ONES + b" ref/1", b"^" + TWOS]))
self.assertRaises(errors.PackedRefsException, list, read_packed_refs(f))
def test_read_with_peeled(self):
f = BytesIO(
b"\n".join(
[
ONES + b" ref/1",
TWOS + b" ref/2",
b"^" + THREES,
FOURS + b" ref/4",
]
)
)
self.assertEqual(
[
(ONES, b"ref/1", None),
(TWOS, b"ref/2", THREES),
(FOURS, b"ref/4", None),
],
list(read_packed_refs_with_peeled(f)),
)
def test_read_with_peeled_errors(self):
f = BytesIO(b"\n".join([b"^" + TWOS, ONES + b" ref/1"]))
self.assertRaises(errors.PackedRefsException, list, read_packed_refs(f))
f = BytesIO(b"\n".join([ONES + b" ref/1", b"^" + TWOS, b"^" + THREES]))
self.assertRaises(errors.PackedRefsException, list, read_packed_refs(f))
def test_write_with_peeled(self):
f = BytesIO()
write_packed_refs(f, {b"ref/1": ONES, b"ref/2": TWOS}, {b"ref/1": THREES})
self.assertEqual(
b"\n".join(
[
b"# pack-refs with: peeled",
ONES + b" ref/1",
b"^" + THREES,
TWOS + b" ref/2",
]
)
+ b"\n",
f.getvalue(),
)
def test_write_without_peeled(self):
f = BytesIO()
write_packed_refs(f, {b"ref/1": ONES, b"ref/2": TWOS})
self.assertEqual(
b"\n".join([ONES + b" ref/1", TWOS + b" ref/2"]) + b"\n",
f.getvalue(),
)
# Dict of refs that we expect all RefsContainerTests subclasses to define.
_TEST_REFS = {
b"HEAD": b"<PASSWORD>493e<PASSWORD>ec",
b"refs/heads/40-char-ref-aaaaaaaaaaaaaaaaaa": b"<PASSWORD>f02ec",
b"refs/heads/master": b"<PASSWORD>",
b"refs/heads/packed": b"<PASSWORD>",
b"refs/tags/refs-0.1": b"df6800012397fb85c56e7418dd4eb9405dee075c",
b"refs/tags/refs-0.2": b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8",
b"refs/heads/loop": b"ref: refs/heads/loop",
}
class RefsContainerTests(object):
def test_keys(self):
actual_keys = set(self._refs.keys())
self.assertEqual(set(self._refs.allkeys()), actual_keys)
self.assertEqual(set(_TEST_REFS.keys()), actual_keys)
actual_keys = self._refs.keys(b"refs/heads")
actual_keys.discard(b"loop")
self.assertEqual(
[b"40-char-ref-aaaaaaaaaaaaaaaaaa", b"master", b"packed"],
sorted(actual_keys),
)
self.assertEqual(
[b"refs-0.1", b"refs-0.2"], sorted(self._refs.keys(b"refs/tags"))
)
def test_iter(self):
actual_keys = set(self._refs.keys())
self.assertEqual(set(self._refs), actual_keys)
self.assertEqual(set(_TEST_REFS.keys()), actual_keys)
def test_as_dict(self):
# refs/heads/loop does not show up even if it exists
expected_refs = dict(_TEST_REFS)
del expected_refs[b"refs/heads/loop"]
self.assertEqual(expected_refs, self._refs.as_dict())
def test_get_symrefs(self):
self._refs.set_symbolic_ref(b"refs/heads/src", b"refs/heads/dst")
symrefs = self._refs.get_symrefs()
if b"HEAD" in symrefs:
symrefs.pop(b"HEAD")
self.assertEqual(
{
b"refs/heads/src": b"refs/heads/dst",
b"refs/heads/loop": b"refs/heads/loop",
},
symrefs,
)
def test_setitem(self):
self._refs[b"refs/some/ref"] = b"42d06bd4b77fed026b154d16493e5deab78f02ec"
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/some/ref"],
)
self.assertRaises(
errors.RefFormatError,
self._refs.__setitem__,
b"notrefs/foo",
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
)
def test_set_if_equals(self):
nines = b"9" * 40
self.assertFalse(self._refs.set_if_equals(b"HEAD", b"c0ffee", nines))
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec", self._refs[b"HEAD"]
)
self.assertTrue(
self._refs.set_if_equals(
b"HEAD", b"42d06bd4b77fed026b154d16493e5deab78f02ec", nines
)
)
self.assertEqual(nines, self._refs[b"HEAD"])
# Setting the ref again is a no-op, but will return True.
self.assertTrue(self._refs.set_if_equals(b"HEAD", nines, nines))
self.assertEqual(nines, self._refs[b"HEAD"])
self.assertTrue(self._refs.set_if_equals(b"refs/heads/master", None, nines))
self.assertEqual(nines, self._refs[b"refs/heads/master"])
self.assertTrue(
self._refs.set_if_equals(b"refs/heads/nonexistant", ZERO_SHA, nines)
)
self.assertEqual(nines, self._refs[b"refs/heads/nonexistant"])
def test_add_if_new(self):
nines = b"9" * 40
self.assertFalse(self._refs.add_if_new(b"refs/heads/master", nines))
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/heads/master"],
)
self.assertTrue(self._refs.add_if_new(b"refs/some/ref", nines))
self.assertEqual(nines, self._refs[b"refs/some/ref"])
def test_set_symbolic_ref(self):
self._refs.set_symbolic_ref(b"refs/heads/symbolic", b"refs/heads/master")
self.assertEqual(
b"ref: refs/heads/master",
self._refs.read_loose_ref(b"refs/heads/symbolic"),
)
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/heads/symbolic"],
)
def test_set_symbolic_ref_overwrite(self):
nines = b"9" * 40
self.assertNotIn(b"refs/heads/symbolic", self._refs)
self._refs[b"refs/heads/symbolic"] = nines
self.assertEqual(nines, self._refs.read_loose_ref(b"refs/heads/symbolic"))
self._refs.set_symbolic_ref(b"refs/heads/symbolic", b"refs/heads/master")
self.assertEqual(
b"ref: refs/heads/master",
self._refs.read_loose_ref(b"refs/heads/symbolic"),
)
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/heads/symbolic"],
)
def test_check_refname(self):
self._refs._check_refname(b"HEAD")
self._refs._check_refname(b"refs/stash")
self._refs._check_refname(b"refs/heads/foo")
self.assertRaises(errors.RefFormatError, self._refs._check_refname, b"refs")
self.assertRaises(
errors.RefFormatError, self._refs._check_refname, b"notrefs/foo"
)
def test_contains(self):
self.assertIn(b"refs/heads/master", self._refs)
self.assertNotIn(b"refs/heads/bar", self._refs)
def test_delitem(self):
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/heads/master"],
)
del self._refs[b"refs/heads/master"]
self.assertRaises(KeyError, lambda: self._refs[b"refs/heads/master"])
def test_remove_if_equals(self):
self.assertFalse(self._refs.remove_if_equals(b"HEAD", b"c0ffee"))
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec", self._refs[b"HEAD"]
)
self.assertTrue(
self._refs.remove_if_equals(
b"refs/tags/refs-0.2",
b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8",
)
)
self.assertTrue(self._refs.remove_if_equals(b"refs/tags/refs-0.2", ZERO_SHA))
self.assertNotIn(b"refs/tags/refs-0.2", self._refs)
def test_import_refs_name(self):
self._refs[
b"refs/remotes/origin/other"
] = b"48d01bd4b77fed026b154d16493e5deab78f02ec"
self._refs.import_refs(
b"refs/remotes/origin",
{b"master": b"4<PASSWORD>6b154d16493e5deab78f02ec"},
)
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/remotes/origin/master"],
)
self.assertEqual(
b"48d01bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/remotes/origin/other"],
)
def test_import_refs_name_prune(self):
self._refs[
b"refs/remotes/origin/other"
] = b"48d01bd4b77fed026b154d16493e5deab78f02ec"
self._refs.import_refs(
b"refs/remotes/origin",
{b"master": b"<PASSWORD>1<PASSWORD>3e5deab78f02ec"},
prune=True,
)
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/remotes/origin/master"],
)
self.assertNotIn(b"refs/remotes/origin/other", self._refs)
def test_watch(self):
try:
watcher = self._refs.watch()
except (NotImplementedError, ImportError):
self.skipTest("watching not supported")
with watcher:
self._refs[
b"refs/remotes/origin/other"
] = b"48d01bd4b77fed026b154d16493e5deab78f02ec"
change = next(watcher)
self.assertEqual(
(
b"refs/remotes/origin/other",
b"48d01bd4b77fed026b154d16493e5deab78f02ec",
),
change,
)
self._refs[
b"refs/remotes/origin/other"
] = b"48d01bd4b77fed026b154d16493e5deab78f02ed"
change = next(watcher)
self.assertEqual(
(
b"refs/remotes/origin/other",
b"48d01bd4b77fed026b154d16493e5deab78f02ed",
),
change,
)
del self._refs[b"refs/remotes/origin/other"]
change = next(watcher)
self.assertEqual((b"refs/remotes/origin/other", None), change)
class DictRefsContainerTests(RefsContainerTests, TestCase):
def setUp(self):
TestCase.setUp(self)
self._refs = DictRefsContainer(dict(_TEST_REFS))
def test_invalid_refname(self):
# FIXME: Move this test into RefsContainerTests, but requires
# some way of injecting invalid refs.
self._refs._refs[b"refs/stash"] = b"00" * 20
expected_refs = dict(_TEST_REFS)
del expected_refs[b"refs/heads/loop"]
expected_refs[b"refs/stash"] = b"00" * 20
self.assertEqual(expected_refs, self._refs.as_dict())
class DiskRefsContainerTests(RefsContainerTests, TestCase):
def setUp(self):
TestCase.setUp(self)
self._repo = open_repo("refs.git")
self.addCleanup(tear_down_repo, self._repo)
self._refs = self._repo.refs
def test_get_packed_refs(self):
self.assertEqual(
{
b"refs/heads/packed": b"42d06bd4b77fed026b154d16493e5deab78f02ec",
b"refs/tags/refs-0.1": b"df6800012397fb85c56e7418dd4eb9405dee075c",
},
self._refs.get_packed_refs(),
)
def test_get_peeled_not_packed(self):
# not packed
self.assertEqual(None, self._refs.get_peeled(b"refs/tags/refs-0.2"))
self.assertEqual(
b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8",
self._refs[b"refs/tags/refs-0.2"],
)
# packed, known not peelable
self.assertEqual(
self._refs[b"refs/heads/packed"],
self._refs.get_peeled(b"refs/heads/packed"),
)
# packed, peeled
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs.get_peeled(b"refs/tags/refs-0.1"),
)
def test_setitem(self):
RefsContainerTests.test_setitem(self)
path = os.path.join(self._refs.path, b"refs", b"some", b"ref")
with open(path, "rb") as f:
self.assertEqual(b"42d06bd4b77fed026b154d16493e5deab78f02ec", f.read()[:40])
self.assertRaises(
OSError,
self._refs.__setitem__,
b"refs/some/ref/sub",
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
)
def test_delete_refs_container(self):
# We shouldn't delete the refs directory
self._refs[b'refs/heads/blah'] = b"42d06bd4b77fed026b154d16493e5deab78f02ec"
for ref in self._refs.allkeys():
del self._refs[ref]
self.assertTrue(os.path.exists(os.path.join(self._refs.path, b'refs')))
def test_setitem_packed(self):
with open(os.path.join(self._refs.path, b"packed-refs"), "w") as f:
f.write("# pack-refs with: peeled fully-peeled sorted \n")
f.write("42d06bd4b77fed026b154d16493e5deab78f02ec refs/heads/packed\n")
# It's allowed to set a new ref on a packed ref, the new ref will be
# placed outside on refs/
self._refs[b"refs/heads/packed"] = b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8"
packed_ref_path = os.path.join(self._refs.path, b"refs", b"heads", b"packed")
with open(packed_ref_path, "rb") as f:
self.assertEqual(b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8", f.read()[:40])
self.assertRaises(
OSError,
self._refs.__setitem__,
b"refs/heads/packed/sub",
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
)
def test_setitem_symbolic(self):
ones = b"1" * 40
self._refs[b"HEAD"] = ones
self.assertEqual(ones, self._refs[b"HEAD"])
# ensure HEAD was not modified
f = open(os.path.join(self._refs.path, b"HEAD"), "rb")
v = next(iter(f)).rstrip(b"\n\r")
f.close()
self.assertEqual(b"ref: refs/heads/master", v)
# ensure the symbolic link was written through
f = open(os.path.join(self._refs.path, b"refs", b"heads", b"master"), "rb")
self.assertEqual(ones, f.read()[:40])
f.close()
def test_set_if_equals(self):
RefsContainerTests.test_set_if_equals(self)
# ensure symref was followed
self.assertEqual(b"9" * 40, self._refs[b"refs/heads/master"])
# ensure lockfile was deleted
self.assertFalse(
os.path.exists(
os.path.join(self._refs.path, b"refs", b"heads", b"master.lock")
)
)
self.assertFalse(os.path.exists(os.path.join(self._refs.path, b"HEAD.lock")))
def test_add_if_new_packed(self):
# don't overwrite packed ref
self.assertFalse(self._refs.add_if_new(b"refs/tags/refs-0.1", b"9" * 40))
self.assertEqual(
b"df6800012397fb85c56e7418dd4eb9405dee075c",
self._refs[b"refs/tags/refs-0.1"],
)
def test_add_if_new_symbolic(self):
# Use an empty repo instead of the default.
repo_dir = os.path.join(tempfile.mkdtemp(), "test")
os.makedirs(repo_dir)
repo = Repo.init(repo_dir)
self.addCleanup(tear_down_repo, repo)
refs = repo.refs
nines = b"9" * 40
self.assertEqual(b"ref: refs/heads/master", refs.read_ref(b"HEAD"))
self.assertNotIn(b"refs/heads/master", refs)
self.assertTrue(refs.add_if_new(b"HEAD", nines))
self.assertEqual(b"ref: refs/heads/master", refs.read_ref(b"HEAD"))
self.assertEqual(nines, refs[b"HEAD"])
self.assertEqual(nines, refs[b"refs/heads/master"])
self.assertFalse(refs.add_if_new(b"HEAD", b"1" * 40))
self.assertEqual(nines, refs[b"HEAD"])
self.assertEqual(nines, refs[b"refs/heads/master"])
def test_follow(self):
self.assertEqual(
(
[b"HEAD", b"refs/heads/master"],
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
),
self._refs.follow(b"HEAD"),
)
self.assertEqual(
(
[b"refs/heads/master"],
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
),
self._refs.follow(b"refs/heads/master"),
)
self.assertRaises(KeyError, self._refs.follow, b"refs/heads/loop")
def test_delitem(self):
RefsContainerTests.test_delitem(self)
ref_file = os.path.join(self._refs.path, b"refs", b"heads", b"master")
self.assertFalse(os.path.exists(ref_file))
self.assertNotIn(b"refs/heads/master", self._refs.get_packed_refs())
def test_delitem_symbolic(self):
self.assertEqual(b"ref: refs/heads/master", self._refs.read_loose_ref(b"HEAD"))
del self._refs[b"HEAD"]
self.assertRaises(KeyError, lambda: self._refs[b"HEAD"])
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs[b"refs/heads/master"],
)
self.assertFalse(os.path.exists(os.path.join(self._refs.path, b"HEAD")))
def test_remove_if_equals_symref(self):
# HEAD is a symref, so shouldn't equal its dereferenced value
self.assertFalse(
self._refs.remove_if_equals(
b"HEAD", b"42d06bd4b77fed026b154d16493e5deab78f02ec"
)
)
self.assertTrue(
self._refs.remove_if_equals(
b"refs/heads/master",
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
)
)
self.assertRaises(KeyError, lambda: self._refs[b"refs/heads/master"])
# HEAD is now a broken symref
self.assertRaises(KeyError, lambda: self._refs[b"HEAD"])
self.assertEqual(b"ref: refs/heads/master", self._refs.read_loose_ref(b"HEAD"))
self.assertFalse(
os.path.exists(
os.path.join(self._refs.path, b"refs", b"heads", b"master.lock")
)
)
self.assertFalse(os.path.exists(os.path.join(self._refs.path, b"HEAD.lock")))
def test_remove_packed_without_peeled(self):
refs_file = os.path.join(self._repo.path, "packed-refs")
f = GitFile(refs_file)
refs_data = f.read()
f.close()
f = GitFile(refs_file, "wb")
f.write(
b"\n".join(
line
for line in refs_data.split(b"\n")
if not line or line[0] not in b"#^"
)
)
f.close()
self._repo = Repo(self._repo.path)
refs = self._repo.refs
self.assertTrue(
refs.remove_if_equals(
b"refs/heads/packed",
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
)
)
def test_remove_if_equals_packed(self):
# test removing ref that is only packed
self.assertEqual(
b"df6800012397fb85c56e7418dd4eb9405dee075c",
self._refs[b"refs/tags/refs-0.1"],
)
self.assertTrue(
self._refs.remove_if_equals(
b"refs/tags/refs-0.1",
b"df6800012397fb85c56e7418dd4eb9405dee075c",
)
)
self.assertRaises(KeyError, lambda: self._refs[b"refs/tags/refs-0.1"])
def test_remove_parent(self):
self._refs[b"refs/heads/foo/bar"] = b"df6800012397fb85c56e7418dd4eb9405dee075c"
del self._refs[b"refs/heads/foo/bar"]
ref_file = os.path.join(
self._refs.path,
b"refs",
b"heads",
b"foo",
b"bar",
)
self.assertFalse(os.path.exists(ref_file))
ref_file = os.path.join(self._refs.path, b"refs", b"heads", b"foo")
self.assertFalse(os.path.exists(ref_file))
ref_file = os.path.join(self._refs.path, b"refs", b"heads")
self.assertTrue(os.path.exists(ref_file))
self._refs[b"refs/heads/foo"] = b"df6800012397fb85c56e7418dd4eb9405dee075c"
def test_read_ref(self):
self.assertEqual(b"ref: refs/heads/master", self._refs.read_ref(b"HEAD"))
self.assertEqual(
b"42d06bd4b77fed026b154d16493e5deab78f02ec",
self._refs.read_ref(b"refs/heads/packed"),
)
self.assertEqual(None, self._refs.read_ref(b"nonexistant"))
def test_read_loose_ref(self):
self._refs[b"refs/heads/foo"] = b"df6800012397fb85c56e7418dd4eb9405dee075c"
self.assertEqual(None, self._refs.read_ref(b"refs/heads/foo/bar"))
def test_non_ascii(self):
try:
encoded_ref = os.fsencode(u"refs/tags/schön")
except UnicodeEncodeError:
raise SkipTest("filesystem encoding doesn't support special character")
p = os.path.join(os.fsencode(self._repo.path), encoded_ref)
with open(p, "w") as f:
f.write("00" * 20)
expected_refs = dict(_TEST_REFS)
expected_refs[encoded_ref] = b"00" * 20
del expected_refs[b"refs/heads/loop"]
self.assertEqual(expected_refs, self._repo.get_refs())
def test_cyrillic(self):
if sys.platform in ("darwin", "win32"):
raise SkipTest("filesystem encoding doesn't support arbitrary bytes")
# reported in https://github.com/dulwich/dulwich/issues/608
name = b"\xcd\xee\xe2\xe0\xff\xe2\xe5\xf2\xea\xe01"
encoded_ref = b"refs/heads/" + name
with open(os.path.join(os.fsencode(self._repo.path), encoded_ref), "w") as f:
f.write("00" * 20)
expected_refs = set(_TEST_REFS.keys())
expected_refs.add(encoded_ref)
self.assertEqual(expected_refs, set(self._repo.refs.allkeys()))
self.assertEqual(
{r[len(b"refs/") :] for r in expected_refs if r.startswith(b"refs/")},
set(self._repo.refs.subkeys(b"refs/")),
)
expected_refs.remove(b"refs/heads/loop")
expected_refs.add(b"HEAD")
self.assertEqual(expected_refs, set(self._repo.get_refs().keys()))
_TEST_REFS_SERIALIZED = (
b"42d06bd4b77fed026b154d16493e5deab78f02ec\t"
b"refs/heads/40-char-ref-aaaaaaaaaaaaaaaaaa\n"
b"42d06bd4b77fed026b154d16493e5deab78f02ec\trefs/heads/master\n"
b"42d06bd4b77fed026b154d16493e5deab78f02ec\trefs/heads/packed\n"
b"df6800012397fb85c56e7418dd4eb9405dee075c\trefs/tags/refs-0.1\n"
b"3ec9c43c84ff242e3ef4a9fc5bc111fd780a76a8\trefs/tags/refs-0.2\n"
)
class InfoRefsContainerTests(TestCase):
def test_invalid_refname(self):
text = _TEST_REFS_SERIALIZED + b"00" * 20 + b"\trefs/stash\n"
refs = InfoRefsContainer(BytesIO(text))
expected_refs = dict(_TEST_REFS)
del expected_refs[b"HEAD"]
expected_refs[b"refs/stash"] = b"00" * 20
del expected_refs[b"refs/heads/loop"]
self.assertEqual(expected_refs, refs.as_dict())
def test_keys(self):
refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))
actual_keys = set(refs.keys())
self.assertEqual(set(refs.allkeys()), actual_keys)
expected_refs = dict(_TEST_REFS)
del expected_refs[b"HEAD"]
del expected_refs[b"refs/heads/loop"]
self.assertEqual(set(expected_refs.keys()), actual_keys)
actual_keys = refs.keys(b"refs/heads")
actual_keys.discard(b"loop")
self.assertEqual(
[b"40-char-ref-aaaaaaaaaaaaaaaaaa", b"master", b"packed"],
sorted(actual_keys),
)
self.assertEqual([b"refs-0.1", b"refs-0.2"], sorted(refs.keys(b"refs/tags")))
def test_as_dict(self):
refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))
# refs/heads/loop does not show up even if it exists
expected_refs = dict(_TEST_REFS)
del expected_refs[b"HEAD"]
del expected_refs[b"refs/heads/loop"]
self.assertEqual(expected_refs, refs.as_dict())
def test_contains(self):
refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))
self.assertIn(b"refs/heads/master", refs)
self.assertNotIn(b"refs/heads/bar", refs)
def test_get_peeled(self):
refs = InfoRefsContainer(BytesIO(_TEST_REFS_SERIALIZED))
# refs/heads/loop does not show up even if it exists
self.assertEqual(
_TEST_REFS[b"refs/heads/master"],
refs.get_peeled(b"refs/heads/master"),
)
class ParseSymrefValueTests(TestCase):
def test_valid(self):
self.assertEqual(b"refs/heads/foo", parse_symref_value(b"ref: refs/heads/foo"))
def test_invalid(self):
self.assertRaises(ValueError, parse_symref_value, b"foobar")
class StripPeeledRefsTests(TestCase):
all_refs = {
b"refs/heads/master": b"8843d7f92416211de9ebb963ff4ce28125932878",
b"refs/heads/testing": b"186a005b134d8639a58b6731c7c1ea821a6eedba",
b"refs/tags/1.0.0": b"a93db4b0360cc635a2b93675010bac8d101f73f0",
b"refs/tags/1.0.0^{}": b"a93db4b0360cc635a2b93675010bac8d101f73f0",
b"refs/tags/2.0.0": b"0749936d0956c661ac8f8d3483774509c165f89e",
b"refs/tags/2.0.0^{}": b"0749936d0956c661ac8f8d3483774509c165f89e",
}
non_peeled_refs = {
b"refs/heads/master": b"8843d7f92416211de9ebb963ff4ce28125932878",
b"refs/heads/testing": b"186a005b134d8639a58b6731c7c1ea821a6eedba",
b"refs/tags/1.0.0": b"a93db4b0360cc635a2b93675010bac8d101f73f0",
b"refs/tags/2.0.0": b"0749936d0956c661ac8f8d3483774509c165f89e",
}
def test_strip_peeled_refs(self):
# Simple check of two dicts
self.assertEqual(strip_peeled_refs(self.all_refs), self.non_peeled_refs)
|
env/lib/python3.4/site-packages/bulbs/titan/index.py | mudbungie/NetExplorer | 234 | 12738545 | # -*- coding: utf-8 -*-
#
# Copyright 2012 <NAME> (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
An interface for interacting with indices on Rexster.
"""
from bulbs.utils import initialize_element, initialize_elements, get_one_result
class IndexProxy(object):
"""Abstract base class the index proxies."""
def __init__(self, index_class, client):
# The index class for this proxy, e.g. ManualIndex.
self.index_class = index_class
# The Client object for the database.
self.client = client
class VertexIndexProxy(IndexProxy):
"""
Manage vertex indices on Rexster.
:param index_class: The index class for this proxy, e.g. ManualIndex.
:type index_class: Index
:param client: The Client object for the database.
:type client: bulbs.rexster.client.RexsterClient
:ivar index_class: Index class.
:ivar client: RexsterClient object.
"""
def create(self, index_name):
"""
Creates an Vertex index and returns it.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
raise NotImplementedError
def get(self, index_name="vertex"):
"""
Returns the Index object with the specified name or None if not found.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
index = self.index_class(self.client, None)
index.base_type = "vertex"
index._index_name = index_name
self.client.registry.add_index(index_name, index)
return index
def get_or_create(self, index_name="vertex", index_params=None):
"""
Get a Vertex Index or create it if it doesn't exist.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
return self.get(index_name)
def delete(self, index_name):
"""
Deletes an index and returns the Response.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.client.RexsterResponse
"""
raise NotImplementedError
class EdgeIndexProxy(IndexProxy):
"""
Manage edge indices on Rexster.
:param index_class: The index class for this proxy, e.g. ManualIndex.
:type index_class: Index
:param client: The Client object for the database.
:type client: bulbs.rexster.client.RexsterClient
:ivar index_class: Index class.
:ivar client: RexsterClient object.
"""
def create(self,index_name,*args,**kwds):
"""
Adds an index to the database and returns it.
index_keys must be a string in this format: '[k1,k2]'
Don't pass actual list b/c keys get double quoted.
:param index_name: The name of the index to create.
:param index_class: The class of the elements stored in the index.
Either vertex or edge.
"""
raise NotImplementedError
def get(self, index_name="edge"):
"""
Returns the Index object with the specified name or None if not found.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
index = self.index_class(self.client, None)
index.base_type = "edge"
index._index_name = index_name
self.client.registry.add_index(index_name, index)
return index
def get_or_create(self, index_name="edge", index_params=None):
"""
Get an Edge Index or create it if it doesn't exist.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.index.Index
"""
return self.get(index_name)
def delete(self,index_name):
"""
Deletes an index and returns the Response.
:param index_name: Index name.
:type index_name: str
:rtype: bulbs.rexster.client.RexsterResponse
"""
raise NotImplementedError
#
# Index Containers (Titan only supports KeyIndex so far)
#
class Index(object):
"""Abstract base class for an index."""
def __init__(self, client, result):
self.client = client
self.result = result
self.base_type = None # set by Factory.get_index
self._index_name = None # ditto
# the index_name is actually ignored with Titan,
# but setting it like normal to make tests pass
@classmethod
def get_proxy_class(cls, base_type):
"""
Returns the IndexProxy class.
:param base_type: Index base type, either vertex or edge.
:type base_type: str
:rtype: class
"""
class_map = dict(vertex=VertexIndexProxy, edge=EdgeIndexProxy)
return class_map[base_type]
@property
def index_name(self):
"""
Returns the index name.
:rtype: str
"""
# faking the index name as "vertex"
return self._index_name
@property
def index_class(self):
"""
Returns the index class, either vertex or edge.
:rtype: class
"""
return self.base_type
@property
def index_type(self):
"""
Returns the index type, which will either be automatic or manual.
:rtype: str
"""
return "automatic"
def count(self,key=None,value=None,**pair):
"""
Return a count of all elements with 'key' equal to 'value' in the index.
:param key: The index key. This is optional because you can instead
supply a key/value pair such as name="James".
:param value: The index key's value. This is optional because you can
instead supply a key/value pair such as name="James".
:param pair: Optional keyword param. Instead of supplying key=name
and value = 'James', you can supply a key/value pair in
the form of name='James'.
"""
raise NotImplementedError
def _get_key_value(self, key, value, pair):
"""Return the key and value, regardless of how it was entered."""
if pair:
key, value = pair.popitem()
return key, value
def _get_method(self, **method_map):
method_name = method_map[self.index_class]
method = getattr(self.client, method_name)
return method
def lookup(self, key=None, value=None, **pair):
"""
Return a generator containing all the elements with key property equal
to value in the index.
:param key: The index key. This is optional because you can instead
supply a key/value pair such as name="James".
:param value: The index key's value. This is optional because you can
instead supply a key/value pair such as name="James".
:param raw: Optional keyword param. If set to True, it won't try to
initialize the results. Defaults to False.
:param pair: Optional keyword param. Instead of supplying key=name
and value = 'James', you can supply a key/value pair in
the form of name='James'.
"""
key, value = self._get_key_value(key, value, pair)
resp = self.client.lookup_vertex(self.index_name,key,value)
return initialize_elements(self.client,resp)
def get_unique(self,key=None,value=None,**pair):
"""
Returns a max of 1 elements matching the key/value pair in the index.
:param key: The index key. This is optional because you can instead
supply a key/value pair such as name="James".
:param value: The index key's value. This is optional because you can
instead supply a key/value pair such as name="James".
:param pair: Optional keyword param. Instead of supplying key=name
and value = 'James', you can supply a key/value pair in
the form of name='James'.
"""
key, value = self._get_key_value(key,value,pair)
resp = self.client.lookup_vertex(self.index_name,key,value)
if resp.total_size > 0:
result = get_one_result(resp)
return initialize_element(self.client, result)
class KeyIndex(Index):
def keys(self):
"""Return the index's keys."""
# Titan does not support edge indices.
resp = self.client.get_vertex_keys()
return [result.raw for result in resp.results]
def create_key(self, key):
# TODO: You can't create a key if prop already exists - workaround?
if self.base_type is "edge":
return self.create_edge_key(key)
return self.create_vertex_key(key)
def create_vertex_key(self, key):
return self.client.create_vertex_key_index(key)
def create_edge_key(self, key):
return self.client.create_vertex_key_index(key)
def rebuild(self):
raise NotImplementedError # (for now)
# need class_map b/c the Blueprints need capitalized class names,
# but Rexster returns lower-case class names for index_class
method_map = dict(vertex=self.client.rebuild_vertex_index,
edge=self.client.rebuild_edge_index)
rebuild_method = method_map.get(self.index_class)
resp = rebuild_method(self.index_name)
return list(resp.results)
|
test/regression/features/assignment/assign_multi.py | ppelleti/berp | 137 | 12738583 | <filename>test/regression/features/assignment/assign_multi.py
x = y = 1
print(x,y)
x = y = z = 1
print(x,y,z)
|
zoopt/objective.py | HowardHu97/ZOOpt | 403 | 12738646 | <reponame>HowardHu97/ZOOpt<filename>zoopt/objective.py<gh_stars>100-1000
"""
This module contains the class Objective
Author:
<NAME>, <NAME>
"""
from zoopt.solution import Solution
from zoopt.utils.zoo_global import pos_inf
from zoopt.utils.tool_function import ToolFunction
import numpy as np
class Objective:
"""
This class represents the objective function and its associated variables
"""
def __init__(self, func=None, dim=None, constraint=None, resample_func=None):
"""
Initialization.
:param func: objective function defined by the user
:param dim: a Dimension object, which describes the search space.
:param constraint: constraint function for POSS
:param resample_func: resample function for SSRacos
:param reducedim: whether to use sequential random embedding
"""
self.__func = func
self.__dim = dim
# the function for inheriting solution attachment
self.__inherit = self.default_inherit
self.__post_inherit = self.default_post_inherit
# the constraint function
self.__constraint = constraint
# the history of optimization
self.__history = []
self.__resample_times = 1
self.__resample_func = self.resample_func if resample_func is None else resample_func
self.__balance_rate = 1
# for sequential random embedding
self.__reducedim = False
self.__A = None
self.__last_x = None
def parameter_set(self, parameter):
"""
Use a Parameter object to set attributes in Objective object.
:param parameter: a Parameter object
:return: no return
"""
if parameter.get_noise_handling() is True and parameter.get_suppression() is True:
self.__balance_rate = parameter.get_balance_rate()
if parameter.get_noise_handling() is True and parameter.get_resampling() is True:
self.__resample_times = parameter.get_resample_times()
if parameter.get_high_dim_handling() is True and parameter.get_reducedim() is True:
self.__reducedim = True
def construct_solution(self, x, parent=None):
"""
Construct a solution from x
:param x: a list
:param parent: the attached structure
:return: solution
"""
new_solution = Solution()
new_solution.set_x(x)
new_solution.set_attach(self.__inherit(parent))
return new_solution
def eval(self, solution):
"""
Use the objective function to evaluate a solution.
:param solution:
:return: value of fx(evaluation result) will be returned
"""
res = []
for i in range(self.__resample_times):
if self.__reducedim is False:
val = self.__func(solution)
else:
x = solution.get_x()
x_origin = x[0] * np.array(self.__last_x.get_x()) + np.dot(self.__A, np.array(x[1:]))
val = self.__func(Solution(x=x_origin))
res.append(val)
self.__history.append(val)
value = sum(res) / float(len(res))
solution.set_value(value)
solution.set_post_attach(self.__post_inherit())
return value
def resample(self, solution, repeat_times):
"""
Resample function for value suppression.
:param solution: a Solution object
:param repeat_times: repeat times
:return: repeat times
"""
if solution.get_resample_value() is None:
solution.set_resample_value(self.__resample_func(solution, repeat_times))
solution.set_value((1 - self.__balance_rate) * solution.get_value() +
self.__balance_rate * solution.get_resample_value())
solution.set_post_attach(self.__post_inherit())
return repeat_times
else:
return 0
def resample_func(self, solution, iteration_num):
result = []
for i in range(iteration_num):
result.append(self.eval(solution))
return sum(result) * 1.0 / len(result)
def eval_constraint(self, solution):
solution.set_value(
[self.eval(solution), self.__constraint(solution)])
solution.set_post_attach(self.__post_inherit())
def set_func(self, func):
"""
Set the objective function
:param func: the objective function
:return: no return value
"""
self.__func = func
def get_func(self):
return self.__func
def set_dim(self, dim):
self.__dim = dim
def get_dim(self):
return self.__dim
def set_inherit_func(self, inherit_func):
self.__inherit = inherit_func
def set_post_inherit_func(self, inherit_func):
self.__post_inherit = inherit_func
def get_post_inherit_func(self):
return self.__post_inherit
def get_inherit_func(self):
return self.__inherit
def set_constraint(self, constraint):
self.__constraint = constraint
return
def get_constraint(self):
return self.__constraint
def set_history(self, history):
self.__history = history
def get_history(self):
return self.__history
def get_history_bestsofar(self):
"""
Get the best-so-far history.
"""
history_bestsofar = []
bestsofar = pos_inf
for i in range(len(self.__history)):
if self.__history[i] < bestsofar:
bestsofar = self.__history[i]
history_bestsofar.append(bestsofar)
return history_bestsofar
def get_reducedim(self):
return self.__reducedim
def get_last_x(self):
return self.__last_x
def get_A(self):
return self.__A
def set_A(self, A):
self.__A = A
def set_last_x(self, x):
self.__last_x = x
def clean_history(self):
"""
clean the optimization history
"""
self.__history = []
@staticmethod
def default_inherit(parent=None):
"""
Default inherited function.
:param parent: the parent structure
:return: None
"""
return None
@staticmethod
def default_post_inherit(parent=None):
"""
Default post inherited function.
:param parent: the parent structure
:return: None
"""
return None
|
scripts/test_zmq/test_zmq/server.py | IDunion/indy-plenum | 148 | 12738649 | <filename>scripts/test_zmq/test_zmq/server.py<gh_stars>100-1000
#! /usr/bin/env python3
import argparse
import shutil
import socket
from concurrent.futures import ThreadPoolExecutor
from tempfile import TemporaryDirectory
from typing import NamedTuple
import zmq
from test_zmq.zstack import ZStack
SEED = b'E21bEA7DeaE981cBabCECd9FAeF4e340'
EXPECTED_ZMQ_REPLY = '{"type": "DIAGNOSE", "text": "ZMQ connection is possible"}'
EXPECTED_TCP_REPLY = '{"type": "DIAGNOSE", "text": "TCP connection is possible"}'
QUIT = False
HA = NamedTuple("HA", [
("host", str),
("port", int)])
def msg_handler(zstack, msg):
_, frm = msg
print(msg)
zstack.send(EXPECTED_ZMQ_REPLY, frm)
def loop():
loop = zmq.asyncio.ZMQEventLoop()
return loop
class SafeTemporaryDirectory(TemporaryDirectory):
@classmethod
def _cleanup(cls, name, warn_message):
shutil.rmtree(name, ignore_errors=True)
def cleanup(self):
if self._finalizer.detach():
shutil.rmtree(self.name, ignore_errors=True)
def up_zmq_server(server_ha):
print("ZMQ_SERVER: Seed is {}".format(SEED))
server = ZStack(name='Test_zmq',
ha=server_ha,
basedirpath=base_dir,
msgHandler=msg_handler,
seed=SEED,
onlyListener=True)
server.start()
return server
def up_tcp_server(server_ha):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(server_ha)
s.listen()
print("TCP_SERVER: Listen clients on {}".format(server_ha))
while True:
conn, addr = s.accept()
with conn:
print('TCP_SERVER: Connected by', addr)
while True:
data = conn.recv(1024)
if data:
print("TCP_SERVER: Received {} from client through tcp".format(data))
conn.sendall(EXPECTED_TCP_REPLY.encode())
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--zmq_port', help="Port which will be used for ZMQ client's connections")
parser.add_argument('--tcp_port', help="Port which will be used for TCP client's connections")
parser.add_argument('--addr', help="Address which will used for incoming client's connection. 0.0.0.0 by default",
default='0.0.0.0', required=False)
args = parser.parse_args()
zmq_server_ha = HA(args.addr,
int(args.zmq_port) if args.zmq_port else '9999')
tcp_server_ha = HA(args.addr,
int(args.tcp_port) if args.tcp_port else 10000)
with SafeTemporaryDirectory() as base_dir:
zmq_server = up_zmq_server(zmq_server_ha)
tpe = ThreadPoolExecutor(max_workers=4)
tpe.submit(up_tcp_server, tcp_server_ha)
async def wrapper():
while True:
await zmq_server.service()
looper = loop()
try:
looper.run_until_complete(wrapper())
except KeyboardInterrupt:
zmq_server.stop()
tpe.shutdown(wait=False)
print("Server was stopped")
exit(0)
|
SuperClippy/superclippy.py | zatherz/reddit | 444 | 12738656 | #/u/GoldenSights
import sys
import traceback
import time
import datetime
import sqlite3
import json
import praw
'''USER CONFIGURATION'''
"""GENERAL"""
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = "/r/Excel Clippy Office Assistant all-in-one moderator."
# This is a short description of what the bot does.
# For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "Goldtesting"
# This is the sub or list of subs to scan for new posts.
# For a single sub, use "sub1".
# For multiple subreddits, use "sub1+sub2+sub3+..."
PLAY_BOOT_SOUND = True
#Play boot.wav
MAXPOSTS = 100
# How many posts to get from the /new queue at once
WAIT = 30
# The number of seconds between cycles. The bot is completely inactive during
# this time
"""**************"""
"""CLIPPYPOINTS™ """
"""**************"""
POINT_STRING_USR = ["Solution Verified"]
# OP can use this string to award points in his thread.
POINT_STRING_MOD = ["+1 Point"]
# Moderators can use this to give points at any time.
POINT_FLAIR_CSS = "points"
# The CSS class associated with point flair
# Set to "" for none
POINT_REPLY = "You have awarded one point to _parent_"
# This is the phrase that User will receive
# _parent_ will be replaced by the username of the Parent.
POINT_EXEMPT = []
# Any usernames in this list will not receive points.
# Perhaps they have special flair.
POINT_OP_ONLY = True
# Is OP the only person who can give points?
# I recommend setting this to False. Other users might have the same question
# and would like to reward a good answer.
POINT_PER_THREAD = 200
# How many points can be distributed in a single thread?
POINT_DO_EXPLAIN = True
# If the max-per-thread is reached and someone tries to give a point, reply to
# them saying that the max has already been reached
POINT_EXPLAIN = """
Sorry, but %d point(s) have already been distributed in this thread.
This is the maximum allowed at this time.
"""%POINT_PER_THREAD
# If EXPLAINMAX is True, this will be said to someone who tries to give a
# point after max is reached
POINT_EXPLAIN_OP_ONLY = """
Hi!
It looks like you are trying to award a point and you are not the OP!
I am here to assist you!
What would you like help with?
[ClippyPoints^(TM)?](/r/excel/wiki/clippy)
[Flair Descriptions](http://www.reddit.com/r/excel/wiki/index)
"""
"""**************"""
"""FLAIR REMINDER"""
"""**************"""
FLAIR_WARN_DELAY = 86400
# This is the time, IN SECONDS, the user has to reply to the first comment.
# If he does not respond by this time, post is removed
NCDELAY = 172800
FLAIR_WARN_MESSAGE = """
Hi!
It looks like you are trying to ask a question!
Since you have not responded in the last 24 hours, I am here to assist you!
If your questions has been solved, please be sure to update the flair.
Would you like help?
[Help Changing Your
Flair?](https://www.reddit.com/r/excel/wiki/flair)
[Asking Question and Sharing
Data](https://www.reddit.com/r/excel/wiki/sharingquestions)
"""
# This is what the bot tells you when you dont meet the DELAY. Uses reddit's
# usual Markdown formatting
FLAIR_IGNORE_MODS = False
# Do you want the bot to ignore posts made by moderators?
# Use True or False (With capitals! No quotations!)
FLAIR_IGNORE_SELF = False
#Do you want the bot to ignore selfposts?
FLAIR_SOLVED = "solved"
FLAIR_UNSOLVED = "unsolved"
FLAIR_CHAL = "challenge"
FLAIR_MANN = "Mod Announcement"
FLAIR_MNEWS = "Mod News"
FLAIR_WAITING = "Waiting on OP"
FLAIR_DISCUSS = "discussion"
FLAIR_ADVERTISEMENT = "advertisement"
FLAIR_TEMPLATE = "User Template"
FLAIR_PROTIP = "pro tip"
FLAIR_TRIGGERS = ["that works", "perfect", "thank you so much", "huge help",
"figured it out", "got it", "thanks for your help"]
#These encourage OP to change flair / award point
FLAIR_REMINDER = """
Hi!
It looks like you received an answer to your question! Since the top is
still marked as unsolved, I am here to assist you!
If your questions has been solved, please be sure to update the flair.
Would you like help?
[Help Changing Your Flair?](http://www.reddit.com/r/excel/wiki/index)
[Flair Descriptions](http://www.reddit.com/r/excel/wiki/index)
"""
"""******************"""
"""FUNCTION REFERENCE"""
"""******************"""
DICT_TRIGGER = "clippy: "
# The trigger phrase for perfoming a lookup
DICT_FILE = 'reference.txt'
# The file with the Keys/Values
DICT_RESULT_FORM = "_value_"
# This is the form that the result will take
# You may use _key_ and _value_ to inject the key/value from the dict.
# You may delete one or both of these injectors.
DICT_LEVENSHTEIN = False
# If this is True it will use a function that is slow but can find
# misspelled keys
# If this is False it will use a simple function that is very fast but can
# only find keys which are spelled exactly
DICT_FAIL = """
Hi! It looks like you're looking for help with an Excel function!
Unfortunately I have not learned that function yet. If you'd like to
change that, [message the
moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)!
"""
# The comment which is created when a function is requested
# but not in the file
"""***************"""
"""WELCOME MESSAGE"""
"""***************"""
WELCOME_SUBJECT = """Welcome to /r/Excel, I am here to help!"""
WELCOME_MESSAGE = """
Hi %s!
It looks like you are new to posting in /r/Excel.
Did you know we have a few ways to help you receive better help?
How can I help you?
[How to Share Your Questions](/r/excel/wiki/sharingquestions)
[Changing Link Flair](/r/excel/wiki/flair)
[ClippyPoints^TM](/r/excel/wiki/clippy)
^This ^message ^is ^auto-generated ^and ^is ^not ^monitored ^on ^a
^regular ^basis, ^replies ^to ^this ^message ^may ^not ^go ^answered.
^Remember ^to [^contact ^the
^moderators](http://www.reddit.com/message/compose?to=%2Fr%2Fexcel)
^to ^guarantee ^a ^response
"""
# Sent to the user if he has created his first post in the subreddit
'''All done!'''
class ClippyPoints:
def incrementflair(self, subreddit, username):
#Returns True if the operation was successful
if isinstance(subreddit, str):
subreddit = r.get_subreddit(subreddit)
success = False
print('\t\tChecking flair for ' + username)
flairs = subreddit.get_flair(username)
flairs = flairs['flair_text']
if flairs is not None and flairs != '':
print('\t\t:' + flairs)
try:
flairs = int(flairs)
flairs += 1
flairs = str(flairs)
success = True
except ValueError:
print('\t\tCould not convert flair to a number.')
else:
print('\t\tNo current flair. 1 point')
flairs = '1'
success = True
if success:
print('\t\tAssigning Flair: ' + flairs)
subreddit.set_flair(username, flair_text=flairs,
flair_css_class=POINT_FLAIR_CSS)
return success
def receive(self, comments):
print('\tClippyPoints received comments.')
subreddit = r.get_subreddit(SUBREDDIT)
for comment in comments:
cid = comment.id
cur.execute('SELECT * FROM clippy_points WHERE ID=?', [cid])
if not cur.fetchone():
print(cid)
cbody = comment.body.lower()
try:
if not comment.is_root:
cauthor = comment.author.name
print('\tChecking subreddit moderators')
moderators = [user.name for user in subreddit.get_moderators()]
byuser = False
if cauthor not in moderators and any(flag.lower() in cbody for flag in POINT_STRING_USR):
byuser = True
if byuser or (
(cauthor in moderators and any(flag.lower() in cbody for flag in POINT_STRING_MOD))):
print('\tFlagged %s.' % cid)
print('\t\tFetching parent and Submission data.')
parentcom = r.get_info(thing_id=comment.parent_id)
pauthor = parentcom.author.name
op = comment.submission.author.name
opid = comment.submission.id
if pauthor != cauthor:
if not any(exempt.lower() == pauthor.lower() for exempt in POINT_EXEMPT):
if POINT_OP_ONLY is False or cauthor == op or cauthor in moderators:
cur.execute('SELECT * FROM clippy_points_s WHERE ID=?', [opid])
fetched = cur.fetchone()
if not fetched:
cur.execute('INSERT INTO clippy_points_s VALUES(?, ?)', [opid, 0])
fetched = 0
else:
fetched = fetched[1]
if fetched < POINT_PER_THREAD:
if self.incrementflair(subreddit, pauthor):
print('\t\tWriting reply')
comment_confirm = comment.reply(POINT_REPLY.replace('_parent_', pauthor))
comment_confirm.distinguish()
cur.execute('UPDATE clippy_points_s SET count=? WHERE ID=?', [fetched+1, opid])
if byuser:
comment.submission.set_flair(flair_text=FLAIR_SOLVED, flair_css_class="solvedcase")
else:
print('\t\tMaxPerThread has been reached')
if EXPLAINMAX is True:
print('\t\tWriting reply')
comment.reply(POINT_EXPLAIN)
else:
print('\tOther users cannot give points.')
#comment_confirm = comment.reply(EXPLAINOPONLY)
#comment_confirm.distinguish()
else:
print('\t\tParent is on the exempt list.')
else:
print('\t\tCannot give points to self.')
else:
print('\t\tRoot comment. Ignoring.')
except AttributeError:
print('\t\tCould not fetch usernames. Cannot proceed.')
cur.execute('INSERT INTO clippy_points VALUES(?)', [cid])
sql.commit()
print('\tClippyPoints finished')
class ClippyFlairReminder:
def receive(self, posts):
print('\tClippyFlair received submissions')
now = datetime.datetime.now()
subreddit = r.get_subreddit(SUBREDDIT)
print('\tChecking subreddit moderators')
moderators = [user.name for user in subreddit.get_moderators()]
for post in posts:
found = False
ctimes = []
pid = post.id
try:
pauthor = post.author.name
except AttributeError:
pauthor = '[deleted]'
ptime = post.created_utc
curtime = getTime(True)
ctime = curtime
cur.execute('SELECT * FROM clippy_flair WHERE id=?', [pid])
if not cur.fetchone():
if post.is_self is False or FLAIR_IGNORE_SELF is False:
if pauthor not in moderators or FLAIR_IGNORE_MODS is False:
comments = praw.helpers.flatten_tree(post.comments)
try:
flair = post.link_flair_text.lower()
except AttributeError:
flair = ''
if flair == FLAIR_UNSOLVED.lower():
print(pid + ': Unsolved')
for comment in comments:
try:
cauthor = comment.author.name
except AttributeError:
cauthor = '[deleted]'
if cauthor != pauthor:
found = True
break
if not found:
print('\tNo comments by another user. No action taken.')
else:
print('\tFound comment by other user. Marking as Waiting.')
post.set_flair(flair_text=FLAIR_WAITING, flair_css_class="waitingonop")
elif flair == FLAIR_WAITING.lower():
print(pid + ': Waiting')
for comment in comments:
try:
cauthor = comment.author.name
except AttributeError:
cauthor = '[deleted]'
if cauthor == pauthor:
found = True
pbody = comment.body.lower()
else:
ctimes.append(comment.created_utc)
if found is True:
if not any(trigger in pbody for trigger in POINT_STRING_USR):
print('\tFound comment by OP. All clear, changing flair back to unsolved.')
post.set_flair(flair_text=FLAIR_UNSOLVED, flair_css_class="notsolvedcase")
#print('\tUpvoting comment..')
#post.upvote()
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if any(key.lower() in pbody for key in FLAIR_TRIGGERS):
print('Replying to ' + pid + ' by ' + pauthor)
comment.reply(FLAIR_REMINDER)
newcomment.distinguish()
elif found is False and len(ctimes) > 0:
print('\tNo comments by OP. Checking time limit.')
ctime = min(ctimes)
difference = curtime - ctime
if difference > FLAIR_WARN_DELAY:
print('\tTime is up.')
print('\tLeaving Comment')
newcomment = post.add_comment(FLAIR_WARN_MESSAGE)
print('\tDistinguishing Comment')
newcomment.distinguish()
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
else:
differences = str('%.0f' % (FLAIR_WARN_DELAY - difference))
print('\tStill has ' + differences + 's.')
elif found is False and len(ctimes) == 0:
print('\tNo comments by OP, but no other comments are available.')
else:
print(pid + ': Neither flair')
if flair == FLAIR_DISCUSS.lower():
print(pid + ': is a discussion post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_ADVERTISEMENT.lower():
print(pid + ': is an advertisement post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_TEMPLATE.lower():
print(pid + ': is a User Template post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_PROTIP.lower():
print(pid + ': is a ProTip post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_SOLVED.lower():
print(pid + ': is a SOLVED post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_MANN.lower():
print(pid + ': is a Mod Annoucement post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if flair == FLAIR_MNEWS.lower():
print(pid + ': is a Mod News post, adding to ignore list...')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
else:
cur.execute('SELECT * FROM clippy_flair WHERE id=?', [pid])
if not cur.fetchone():
print('\tAssigning Flair')
post.set_flair(flair_text=FLAIR_UNSOLVED, flair_css_class="notsolvedcase")
else:
#cur.execute('INSERT INTO flair VALUES("%s")' % pid)
if pauthor in moderators and FLAIR_IGNORE_MODS is True:
print(pid + ', ' + pauthor + ': Ignoring Moderator')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
if post.is_self is True and FLAIR_IGNORE_SELF is True:
print(pid + ', ' + pauthor + ': Ignoring Selfpost')
cur.execute('INSERT INTO clippy_flair VALUES(?)', [pid])
sql.commit()
print('\tClippyFlair finished')
class ClippyReference:
def __init__(self):
with open(DICT_FILE, 'r') as f:
self.DICT = json.loads(f.read())
def levenshtein(self, s1, s2):
#Levenshtein algorithm to figure out how close two strings are two each other
#Courtesy http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
if len(s1) < len(s2):
return self.levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def findsuper(self, comment, tolerance= 1):
results = []
used = []
for itemname in self.DICT:
itemlength = len(itemname.split())
pos = 0
commentsplit = comment.split()
end = False
while not end:
try:
gram = commentsplit[pos:pos+itemlength]
gramjoin = ' '.join(gram)
lev = self.levenshtein(itemname, gramjoin)
if lev <= tolerance:
if itemname not in used:
used.append(itemname)
result = DICT_RESULT_FORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', self.DICT[itemname])
results.append(result)
pos += 1
if pos > len(commentsplit):
end = True
except IndexError:
end = True
return results
def findsimple(self, comment):
results = []
for itemname in self.DICT:
if itemname.lower() in comment.lower():
result = DICT_RESULT_FORM
result = result.replace('_key_', itemname)
result = result.replace('_value_', self.DICT[itemname])
results.append(result)
return results
def receive(self, comments):
lev = "True" if DICT_LEVENSHTEIN else "False"
print('\tClippyReference received comments (Lev: %s)'%lev)
for comment in comments:
results = []
cid = comment.id
try:
cauthor = comment.author.name
cur.execute('SELECT * FROM clippy_reference WHERE ID=?',[cid])
if not cur.fetchone():
print('\t' + cid)
if cauthor.lower() != r.user.name.lower():
cbody = comment.body.lower()
if DICT_LEVENSHTEIN is True:
results = self.findsuper(cbody)
else:
results = self.findsimple(cbody)
if DICT_TRIGGER.lower() in cbody.lower() and (
len(results) == 0):
#They made a request, but we didn't find anything
results.append(DICT_FAIL)
if len(results) > 0:
newcomment = '\n\n'.join(results)
print('\t\tReplying to %s with %d items...'%
(cauthor, len(results)), end="")
sys.stdout.flush()
comment.reply(newcomment)
print('done.')
else:
#Will not reply to self
pass
cur.execute('INSERT INTO clippy_reference VALUES(?)',[cid])
sql.commit()
except AttributeError:
# Comment Author is deleted
pass
print('\tClippyReference finished')
class ClippyWelcome:
def receive(self, posts):
print('\tClippyWelcome received submissions')
for post in posts:
try:
pauthor = post.author.name
pid = post.id
cur.execute('SELECT * FROM clippy_welcome WHERE NAME=?', [pauthor])
if not cur.fetchone():
print('\t' + pid)
print('\t\tFound new user: ' + pauthor)
print('\t\tSending message...', end="")
sys.stdout.flush()
#r.send_message(pauthor, WELCOME_SUBJECT, WELCOME_MESSAGE%pauthor, captcha=None)
cur.execute('INSERT INTO clippy_welcome VALUES(?, ?)', (pauthor, pid))
print('done.')
sql.commit()
except AttributeError:
#Post author is deleted
pass
print('\tClippyWelcome finished')
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool is False:
return timeNow
else:
return timeUnix
def clippy_manager():
try:
subreddit = r.get_subreddit(SUBREDDIT)
print('Getting new comments')
newcomments =list( subreddit.get_comments(limit=MAXPOSTS))
clippyreference.receive(newcomments)
clippypoints.receive(newcomments)
print('Getting new submissions')
newposts = list(subreddit.get_new(limit=MAXPOSTS))
clippywelcome.receive(newposts)
clippyflair.receive(newposts)
except Exception:
traceback.print_exc()
if __name__ == "__main__":
sql = sqlite3.connect('superclippy.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS clippy_welcome(NAME TEXT, ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_reference(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_points(ID TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_points_s(ID TEXT, count INT)')
cur.execute('CREATE TABLE IF NOT EXISTS clippy_flair(id TEXT)')
print('Loaded SQL Database')
sql.commit()
if PLAY_BOOT_SOUND:
try:
import winsound
import threading
def bootsound():
winsound.PlaySound('boot.wav', winsound.SND_FILENAME)
soundthread = threading.Thread(target=bootsound)
soundthread.daemon = True
soundthread.start()
except Exception:
pass
print('Logging in...', end="")
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sys.stdout.flush()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
print('done.')
print('Starting Points...', end="")
clippypoints = ClippyPoints()
print('done.')
print('Starting Welcome...', end="")
clippywelcome = ClippyWelcome()
print('done.')
print('Starting Flair...', end="")
clippyflair = ClippyFlairReminder()
print('done.')
print('Starting Reference...', end="")
clippyreference = ClippyReference()
print('done.')
while True:
clippy_manager()
print('Sleeping %d seconds.\n\n'%WAIT)
time.sleep(WAIT)
|
tests/file_tests.py | nbanmp/seninja | 109 | 12738661 | from ..expr import BVS, BVV
from ..memory.sym_file import SymFile
from ..os_models.os_file import OsFileHandler
def test_1(): # read unconstrained
f = SymFile("a")
res = f.read(1)
assert len(res) == 1
assert isinstance(res[0], BVS)
assert res[0].name == "unconstrained_a_0"
def test_2(): # read concrete
f = SymFile("a")
f.write([BVV(0xff, 8)])
f.seek(0)
res = f.read(1)
assert len(res) == 1
assert isinstance(res[0], BVV)
assert res[0].value == 255
def test_3():
os = OsFileHandler()
fd = os.open("stdin", "r--")
assert fd == 0
def test_4():
os = OsFileHandler()
fd = os.open("stdin", "r--")
assert os.is_open(fd)
os.close(fd)
assert not os.is_open(fd)
def test_5():
os = OsFileHandler()
fd1 = os.open("A", "-w-")
fd2 = os.open("A", "r--")
os.write(fd1, [BVV(0xff, 8)])
res = os.read(fd2, 1)
assert len(res) == 1
assert isinstance(res[0], BVV)
assert res[0].value == 255
def test_6():
os1 = OsFileHandler()
os2 = OsFileHandler()
fd1 = os1.open("A", "-w-")
os1.write(fd1, [BVV(0xff, 8)])
fd2 = os1.open("A", "r--")
os1.copy_to(os2)
res = os2.read(fd2, 1)
assert len(res) == 1
assert isinstance(res[0], BVV)
assert res[0].value == 255
|
aw_nas/weights_manager/diff_super_net.py | Harald-R/aw_nas | 195 | 12738723 | """
Supernet for differentiable rollouts.
"""
import contextlib
import torch
from torch.nn import functional as F
from aw_nas import assert_rollout_type, utils
from aw_nas.rollout.base import DartsArch, DifferentiableRollout, BaseRollout
from aw_nas.utils import data_parallel, use_params
from aw_nas.weights_manager.base import CandidateNet
from aw_nas.weights_manager.shared import SharedNet, SharedCell, SharedOp
__all__ = ["DiffSubCandidateNet", "DiffSuperNet"]
class DiffSubCandidateNet(CandidateNet):
def __init__(self, super_net, rollout: DifferentiableRollout, gpus=tuple(),
virtual_parameter_only=True, eval_no_grad=True):
super(DiffSubCandidateNet, self).__init__(eval_no_grad=eval_no_grad)
self.super_net = super_net
self._device = super_net.device
self.gpus = gpus
self.arch = rollout.arch
self.virtual_parameter_only = virtual_parameter_only
def get_device(self):
return self._device
@contextlib.contextmanager
def begin_virtual(self):
w_clone = {k: v.clone() for k, v in self.named_parameters()}
if not self.virtual_parameter_only:
buffer_clone = {k: v.clone() for k, v in self.named_buffers()}
yield
for n, v in self.named_parameters():
v.data.copy_(w_clone[n])
del w_clone
if not self.virtual_parameter_only:
for n, v in self.named_buffers():
v.data.copy_(buffer_clone[n])
del buffer_clone
def forward(self, inputs, detach_arch=True): #pylint: disable=arguments-differ
if detach_arch:
arch = [
DartsArch(
op_weights=op_weights.detach(),
edge_norms=edge_norms.detach() if edge_norms is not None else None
) for op_weights, edge_norms in self.arch
]
else:
arch = self.arch
if not self.gpus or len(self.gpus) == 1:
return self.super_net.forward(inputs, arch, detach_arch=detach_arch)
if arch[0].op_weights.ndimension() == 2:
arch = [
DartsArch(
op_weights=a.op_weights.repeat(len(self.gpus), 1),
edge_norms=(a.edge_norms.repeat(len(self.gpus)) \
if a.edge_norms is not None else None))
for a in arch
]
else:
# Ugly fix for rollout_size > 1
# call scatter here and stack...
# split along dimension 1,
# then concatenate along dimension 0 for `data_parallel` to scatter it again
num_split = len(self.gpus)
rollout_batch_size = arch[0].op_weights.shape[1]
assert rollout_batch_size % num_split == 0
split_size = rollout_batch_size // num_split
# arch = [torch.cat(torch.split(a, split_size, dim=1), dim=0) for a in arch]
# Note: edge_norms (1-dim) do not support batch_size, just repeat
arch = [DartsArch(
op_weights=torch.cat(torch.split(a.op_weights, split_size, dim=1), dim=0),
edge_norms=(a.edge_norms.repeat(len(self.gpus)) \
if a.edge_norms is not None else None))
for a in arch]
return data_parallel(self.super_net, (inputs, arch), self.gpus,
module_kwargs={"detach_arch": detach_arch})
def _forward_with_params(self, inputs, params, **kwargs): #pylint: disable=arguments-differ
with use_params(self.super_net, params):
return self.forward(inputs, **kwargs)
def named_parameters(self, *args, **kwargs): #pylint: disable=arguments-differ
return self.super_net.named_parameters(*args, **kwargs)
def named_buffers(self, *args, **kwargs): #pylint: disable=arguments-differ
return self.super_net.named_buffers(*args, **kwargs)
def eval_data(self, data, criterions, mode="eval", **kwargs): #pylint: disable=arguments-differ
"""
Override eval_data, to enable gradient.
Returns:
results (list of results return by criterions)
"""
self._set_mode(mode)
outputs = self.forward_data(data[0], **kwargs)
return utils.flatten_list([c(data[0], outputs, data[1]) for c in criterions])
class DiffSuperNet(SharedNet):
NAME = "diff_supernet"
def __init__(self, search_space, device, rollout_type="differentiable",
gpus=tuple(),
num_classes=10, init_channels=16, stem_multiplier=3,
max_grad_norm=5.0, dropout_rate=0.1,
use_stem="conv_bn_3x3", stem_stride=1, stem_affine=True,
preprocess_op_type=None,
cell_use_preprocess=True,
cell_use_shortcut=False,
cell_shortcut_op_type="skip_connect",
cell_group_kwargs=None,
candidate_virtual_parameter_only=False,
candidate_eval_no_grad=True):
super(DiffSuperNet, self).__init__(
search_space, device, rollout_type,
cell_cls=DiffSharedCell, op_cls=DiffSharedOp,
gpus=gpus,
num_classes=num_classes, init_channels=init_channels,
stem_multiplier=stem_multiplier,
max_grad_norm=max_grad_norm, dropout_rate=dropout_rate,
use_stem=use_stem, stem_stride=stem_stride, stem_affine=stem_affine,
preprocess_op_type=preprocess_op_type,
cell_use_preprocess=cell_use_preprocess,
cell_group_kwargs=cell_group_kwargs,
cell_use_shortcut=cell_use_shortcut,
cell_shortcut_op_type=cell_shortcut_op_type)
self.candidate_virtual_parameter_only = candidate_virtual_parameter_only
self.candidate_eval_no_grad = candidate_eval_no_grad
# ---- APIs ----
def extract_features(self, inputs, rollout_or_arch, **kwargs):
if isinstance(rollout_or_arch, BaseRollout):
# from extract_features (wrapper wm)
arch = rollout_or_arch.arch
else:
# from candidate net
arch = rollout_or_arch
return super().extract_features(inputs, arch, **kwargs)
def assemble_candidate(self, rollout):
return DiffSubCandidateNet(self, rollout, gpus=self.gpus,
virtual_parameter_only=self.candidate_virtual_parameter_only,
eval_no_grad=self.candidate_eval_no_grad)
@classmethod
def supported_rollout_types(cls):
return [assert_rollout_type("differentiable")]
class DiffSharedCell(SharedCell):
def num_out_channel(self):
return self.num_out_channels * self._steps
def forward(self, inputs, arch, detach_arch=True): # pylint: disable=arguments-differ
assert self._num_init == len(inputs)
states = [op(_input) for op, _input in zip(self.preprocess_ops, inputs)]
offset = 0
# in parallel forward, after scatter, a namedtuple will be come a normal tuple
arch = DartsArch(*arch)
use_edge_normalization = arch.edge_norms is not None
for i_step in range(self._steps):
to_ = i_step + self._num_init
if use_edge_normalization:
act_lst = [
arch.edge_norms[offset + from_] * # edge norm factor scalar on this edge
self.edges[from_][to_](
state,
arch.op_weights[offset + from_], # op weights vector on this edge
detach_arch=detach_arch
)
for from_, state in enumerate(states)
]
else:
act_lst = [
self.edges[from_][to_](
state, arch.op_weights[offset + from_], detach_arch=detach_arch
)
for from_, state in enumerate(states)
]
new_state = sum(act_lst)
offset += len(states)
states.append(new_state)
out = torch.cat(states[-self._steps:], dim=1)
if self.use_shortcut and self.layer_index != 0:
out = out + self.shortcut_reduction_op(inputs[-1])
return out
class DiffSharedOp(SharedOp):
def forward(self, x, weights, detach_arch=True): # pylint: disable=arguments-differ
if weights.ndimension() == 2:
# weights: (batch_size, num_op)
if not weights.shape[0] == x.shape[0]:
# every `x.shape[0] % weights.shape[0]` data use the same sampled arch weights
assert x.shape[0] % weights.shape[0] == 0
weights = weights.repeat(x.shape[0] // weights.shape[0], 1)
return sum(
[
weights[:, i].reshape(-1, 1, 1, 1) * op(x)
for i, op in enumerate(self.p_ops)
]
)
out_act: torch.Tensor = 0.0
# weights: (num_op)
if self.partial_channel_proportion is None:
for w, op in zip(weights, self.p_ops):
if detach_arch and w.item() == 0:
continue
act = op(x).detach_() if w.item() == 0 else op(x)
out_act += w * act
else:
op_channels = x.shape[1] // self.partial_channel_proportion
x_1 = x[:, :op_channels, :, :] # these channels goes through op
x_2 = x[:, op_channels:, :, :] # these channels skips op
# apply pooling if the ops have stride=2
if self.stride == 2:
x_2 = F.max_pool2d(x_2, 2, 2)
for w, op in zip(weights, self.p_ops):
# if detach_arch and w.item() == 0:
# continue # not really sure about this
act = op(x_1)
# if w.item() == 0:
# act.detach_() # not really sure about this either
out_act += w * act
out_act = torch.cat((out_act, x_2), dim=1)
# PC-DARTS implements a deterministic channel_shuffle() (not what they said in the paper)
# ref: https://github.com/yuhuixu1993/PC-DARTS/blob/b74702f86c70e330ce0db35762cfade9df026bb7/model_search.py#L9
out_act = self._channel_shuffle(out_act, self.partial_channel_proportion)
# this is the random channel shuffle
# channel_perm = torch.randperm(out_act.shape[1])
# out_act = out_act[:, channel_perm, :, :]
return out_act
@staticmethod
def _channel_shuffle(x: torch.Tensor, groups: int):
"""channel shuffle for PC-DARTS"""
n, c, h, w = x.shape
x = x.view(n, groups, -1, h, w).transpose(1, 2).contiguous()
x = x.view(n, c, h, w).contiguous()
return x
|
salem/__init__.py | sunt05/salem | 147 | 12738724 | <gh_stars>100-1000
"""
Salem package
"""
from __future__ import division
from os import path
from os import makedirs
import sys
from functools import wraps
import pyproj
try:
from .version import version as __version__
except ImportError: # pragma: no cover
raise ImportError('Salem is not properly installed. If you are running '
'from the source directory, please instead create a '
'new virtual environment (using conda or virtualenv) '
'and then install it in-place by running: '
'pip install -e .')
def lazy_property(fn):
"""Decorator that makes a property lazy-evaluated."""
attr_name = '_lazy_' + fn.__name__
@property
@wraps(fn)
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
# Default proj
wgs84 = pyproj.Proj(proj='latlong', datum='WGS84')
# Path to the cache directory
cache_dir = path.join(path.expanduser('~'), '.salem_cache')
if not path.exists(cache_dir):
makedirs(cache_dir)
download_dir = path.join(cache_dir, 'downloads')
if not path.exists(download_dir):
makedirs(download_dir)
sample_data_gh_commit = '758f7ddd0fa6b5b1bd4c63b6dcfe8d5eec0f4c59'
sample_data_dir = path.join(cache_dir, 'salem-sample-data-' +
sample_data_gh_commit)
# python version
python_version = 'py3'
if sys.version_info.major == 2:
python_version = 'py2'
# API
from salem.gis import *
from salem.datasets import *
from salem.sio import read_shapefile, read_shapefile_to_grid, grid_from_dataset
from salem.sio import (open_xr_dataset, open_metum_dataset,
open_wrf_dataset, open_mf_wrf_dataset)
from salem.sio import DataArrayAccessor, DatasetAccessor
from salem.utils import get_demo_file, reduce
try:
from salem.graphics import get_cmap, DataLevels, Map
except ImportError as err:
if 'matplotlib' not in str(err):
raise
def get_cmap():
raise ImportError('requires matplotlib')
def DataLevels():
raise ImportError('requires matplotlib')
def Map():
raise ImportError('requires matplotlib')
from salem.wrftools import geogrid_simulator
|
scripts/27_send_sms.py | Kirklin12/python-scripts | 2,076 | 12738730 | <reponame>Kirklin12/python-scripts
import requests
message = raw_input('Enter a Message: ')
number = raw_input('Enter the phone number: ')
payload = {'number': number, 'message': message}
r = requests.post("http://textbelt.com/text", data=payload)
if r.json()['success']:
print('Success!')
else:
print('Error!')
|
keras2onnx/ke2onnx/layer_spec.py | TomWildenhain-Microsoft/keras-onnx | 362 | 12738734 | # SPDX-License-Identifier: Apache-2.0
import tensorflow as tf
from ..proto import keras, is_tf_keras, is_keras_older_than
from ..proto.tfcompat import is_tf2
_layer = keras.layers
_adv_activations = keras.layers.advanced_activations
def _default_layer_name_extractor(fstr_list, node_name):
for fstr in fstr_list:
idx = fstr.rfind('{}/')
if node_name.endswith(fstr[idx + 3:]):
klen = len(fstr) + idx - 2 # 2 = len('{}')
return node_name[:len(node_name) - klen]
return None
def _simple_layer_name_extractor(fstr_list, node_name):
ri = node_name.rindex('/')
return node_name[:ri]
def _conv_layer_spec_outputs(layer, node):
if type(layer) == _layer.Conv1D:
return node.name + '/Squeeze'
activation_map = {
keras.activations.linear: '',
tf.nn.sigmoid: 'Sigmoid',
tf.nn.softmax: 'Softmax',
tf.nn.relu: 'Relu',
tf.nn.elu: 'Elu',
tf.nn.tanh: 'Tanh',
tf.nn.swish: 'mul'}
node_act = activation_map.get(layer.activation, None)
if node_act is None:
actname_map = {a_.__name__: a_ for a_ in activation_map if hasattr(a_, "__name__")}
act_trans = actname_map.get(layer.activation.__name__, None)
if act_trans is not None:
node_act = activation_map.get(act_trans)
assert node_act is not None, "Unsupported activation in the layer({})".format(layer.activation)
if node_act:
ri = node.name.rindex('/')
return node.name[:ri + 1] + node_act
else:
if not layer.use_bias:
if node.inputs[0].op.type == 'SpaceToBatchND':
return node.name + '/BatchToSpaceND'
else:
return node.name
else:
ri = node.name.rindex('/')
return node.name[:ri + 1] + 'BiasAdd'
def _relu_like_spec_outputs(layer, node):
if isinstance(layer, _adv_activations.PReLU):
ri = node.name.rindex('/')
return node.name[:ri + 1] + 'add'
return node.name
_keras_layer_spec = {
# layer-type: ([pattern-list], [extract-layer-name, output-name-generator(optional)]
_layer.AveragePooling1D: (["{}/AvgPool"], [_default_layer_name_extractor]),
_layer.AveragePooling2D: (["{}/AvgPool"], [_default_layer_name_extractor]),
_layer.AveragePooling3D: (["{}/AvgPool"], [_default_layer_name_extractor]),
_layer.MaxPooling1D: (["{}/MaxPool"], [_default_layer_name_extractor]),
_layer.MaxPooling2D: (["{}/MaxPool"], [_default_layer_name_extractor]),
_layer.MaxPooling3D: (["{}/MaxPool"], [_default_layer_name_extractor]),
_layer.Conv1D: (["{}/conv1d"], [_simple_layer_name_extractor, _conv_layer_spec_outputs]),
_layer.Conv2D: (["{}/Conv2D"], [_simple_layer_name_extractor, _conv_layer_spec_outputs]),
_layer.Conv2DTranspose: (["{}/conv2d_transpose"], [_simple_layer_name_extractor, _conv_layer_spec_outputs]),
_layer.DepthwiseConv2D: (["{}/depthwise"], [_simple_layer_name_extractor, _conv_layer_spec_outputs]),
_layer.LeakyReLU: (["{}/LeakyRelu"], [_default_layer_name_extractor]),
_adv_activations.PReLU: (["{}/Relu"], [_simple_layer_name_extractor, _relu_like_spec_outputs]),
_layer.Reshape: (["{}/Reshape"], [_default_layer_name_extractor])
}
if not is_keras_older_than('2.2.0'):
_keras_layer_spec.update({
_adv_activations.ReLU: (["{}/Relu"], [_simple_layer_name_extractor, _relu_like_spec_outputs]),
})
if is_tf_keras and is_tf2 and hasattr(_layer, 'normalization_v2'):
_keras_layer_spec.update({
_layer.normalization_v2.BatchNormalization: (
["{}/FusedBatchNormV3", "{}/batchnorm/add_1"], [_default_layer_name_extractor])
})
def keras_layer_spec(layer_type):
return _keras_layer_spec.get(layer_type, (None, []))
|
superset/migrations/versions/31bb738bd1d2_move_pivot_table_v2_legacy_order_by_to_.py | delorenzosoftware/superset | 18,621 | 12738744 | <reponame>delorenzosoftware/superset<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""move_pivot_table_v2_legacy_order_by_to_timeseries_limit_metric
Revision ID: <KEY>
Revises: fe23025b9441
Create Date: 2021-12-17 16:56:55.186285
"""
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "fe23025b9441"
import json
import logging
from alembic import op
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from superset import db
Base = declarative_base()
logger = logging.getLogger("alembic")
class Slice(Base):
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
params = Column(Text)
viz_type = Column(String(250))
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
slices = session.query(Slice).filter(Slice.viz_type == "pivot_table_v2").all()
for slc in slices:
try:
params = json.loads(slc.params)
legacy_order_by = params.pop("legacy_order_by", None)
if legacy_order_by:
params["series_limit_metric"] = legacy_order_by
slc.params = json.dumps(params, sort_keys=True)
except Exception as e:
logger.exception(
f"An error occurred: parsing params for slice {slc.id} failed."
f"You need to fix it before upgrading your DB."
)
raise e
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
slices = session.query(Slice).filter(Slice.viz_type == "pivot_table_v2").all()
for slc in slices:
try:
params = json.loads(slc.params)
series_limit_metric = params.pop("series_limit_metric", None)
if series_limit_metric:
params["legacy_order_by"] = series_limit_metric
slc.params = json.dumps(params, sort_keys=True)
except Exception as e:
logger.exception(
f"An error occurred: parsing params for slice {slc.id} failed. "
"You need to fix it before downgrading your DB."
)
raise e
session.commit()
session.close()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.