max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
example_nodes/__init__.py | 996268132/NodeGraphQt | 582 | 40556 | <reponame>996268132/NodeGraphQt
#!/usr/bin/python
import os
import sys
import ast
VALID_NODE_TYPE = ['BaseNode', 'AutoNode']
def detectNodesFromText(filepath):
"""returns Node names from a python script"""
froms = []
with open(filepath, "r") as source:
tree = ast.parse(source.read())
for node in tree.body:
if isinstance(node, ast.ClassDef):
for base in node.bases:
if base.id in VALID_NODE_TYPE:
for indef in node.body:
if isinstance(indef, ast.Assign):
for target in indef.targets:
if target.id == '__identifier__':
froms.append(node.name)
return froms
def getNodesRecursively(path=__file__):
""" Returns imported nodes. """
Nodes = []
basedir, filename = os.path.split(path)
rootModule = os.path.basename(basedir)
for root, dirs, files in os.walk(basedir, topdown=False):
if root not in sys.path:
sys.path.append(root)
for name in files:
if name.endswith('.py') and not name.startswith('_'):
module_name = root.split(rootModule)[1].replace('\\', '.') + name[:-3]
modulePath = os.path.join(root, name)
froms = detectNodesFromText(modulePath)
if not froms:
continue
try:
mod = __import__(module_name, globals(), locals(), froms, 0)
for node in froms:
Nodes.append(getattr(mod, node))
except ImportError as e:
print ('Error in importing class: %s' % (e))
continue
return Nodes
Nodes = getNodesRecursively()
|
33. Python Programs/FactorialOfNumbers.py | Ujjawalgupta42/Hacktoberfest2021-DSA | 225 | 40580 | <reponame>Ujjawalgupta42/Hacktoberfest2021-DSA
for i in range(int(input())):
fact=1
a=int(input())
for j in range(1,a+1,1):
fact=fact*j
print(fact)
def factorial(n):
return 1 if (n==1 or n==0) else n * factorial(n - 1);
num = int(input('Enter number'))
print("Factorial of",num,"is",
factorial(num))
|
legacy/models/resnet/tensorflow2/train_tf2_resnet.py | kevinyang8/deep-learning-models | 129 | 40624 | <reponame>kevinyang8/deep-learning-models<filename>legacy/models/resnet/tensorflow2/train_tf2_resnet.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
mpirun -np 8 --H localhost:8 \
-bind-to none -map-by slot -mca pml ob1 -mca -x TF_CUDNN_USE_AUTOTUNE=0 \
-x TF_ENABLE_NHWC=1 -x FI_OFI_RXR_INLINE_MR_ENABLE=1 -x NCCL_TREE_THRESHOLD=4294967296 \
-x PATH -x NCCL_SOCKET_IFNAME=^docker0,lo -x NCCL_MIN_NRINGS=13 -x NCCL_DEBUG=INFO \
-x HOROVOD_CYCLE_TIME=0.5 -x HOROVOD_FUSION_THRESHOLD=67108864 python new_resnet.py --synthetic
source activate tensorflow2_p36 && \
mpirun -np 8 --H localhost:8 -mca plm_rsh_no_tree_spawn 1 \
-bind-to socket -map-by slot \
-x HOROVOD_HIERARCHICAL_ALLREDUCE=1 -x HOROVOD_FUSION_THRESHOLD=16777216 \
-x NCCL_MIN_NRINGS=4 -x LD_LIBRARY_PATH -x PATH -mca pml ob1 -mca btl ^openib \
-x NCCL_SOCKET_IFNAME=$INTERFACE -mca btl_tcp_if_exclude lo,docker0 \
-x TF_CPP_MIN_LOG_LEVEL=0 \
python -W ignore ~/new_resnet.py \
--synthetic --batch_size 128 --num_batches 100 --clear_log 2 > train.log
'''
import os
import numpy as np
import getpass
import tensorflow as tf
import horovod.tensorflow as hvd
from tensorflow.python.util import nest
import argparse
from time import time, sleep
@tf.function
def parse(record):
features = {'image/encoded': tf.io.FixedLenFeature((), tf.string),
'image/class/label': tf.io.FixedLenFeature((), tf.int64)}
parsed = tf.io.parse_single_example(record, features)
image = tf.image.decode_jpeg(parsed['image/encoded'])
image = tf.image.resize(image, (224, 224))
image = tf.image.random_brightness(image, .1)
image = tf.image.random_jpeg_quality(image, 70, 100)
image = tf.image.random_flip_left_right(image)
image = tf.cast(image, tf.float32)
label = tf.cast(parsed['image/class/label'] - 1, tf.int32)
return image, label
def data_gen():
input_shape = [224, 224, 3]
while True:
image = tf.random.uniform(input_shape)
label = tf.random.uniform(minval=0, maxval=999, shape=[1], dtype=tf.int32)
yield image, label
def create_data(data_dir = None, synthetic=False, batch_size=256):
if synthetic:
ds = tf.data.Dataset.from_generator(data_gen, output_types=(tf.float32, tf.int32))
else:
filenames = [os.path.join(data_dir, i) for i in os.listdir(data_dir)]
ds = tf.data.Dataset.from_tensor_slices(filenames).shard(hvd.size(), hvd.rank())
ds = ds.shuffle(1000, seed=7 * (1 + hvd.rank()))
ds = ds.interleave(
tf.data.TFRecordDataset, cycle_length=1, block_length=1)
ds = ds.map(parse, num_parallel_calls=10)
ds = ds.apply(tf.data.experimental.shuffle_and_repeat(10000, seed=5 * (1 + hvd.rank())))
ds = ds.batch(batch_size)
return ds
@tf.function
def train_step(model, opt, loss_func, images, labels, first_batch):
with tf.GradientTape() as tape:
probs = model(images, training=True)
loss_value = loss_func(labels, probs)
tape = hvd.DistributedGradientTape(tape, compression=hvd.Compression.fp16)
grads = tape.gradient(loss_value, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
if first_batch:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(opt.variables(), root_rank=0)
return loss_value
def add_bool_argument(cmdline, shortname, longname=None, default=False, help=None):
if longname is None:
shortname, longname = None, shortname
elif default == True:
raise ValueError("""Boolean arguments that are True by default should not have short names.""")
name = longname[2:]
feature_parser = cmdline.add_mutually_exclusive_group(required=False)
if shortname is not None:
feature_parser.add_argument(shortname, '--' + name, dest=name, action='store_true', help=help, default=default)
else:
feature_parser.add_argument('--' + name, dest=name, action='store_true', help=help, default=default)
feature_parser.add_argument('--no' + name, dest=name, action='store_false')
return cmdline
def add_cli_args():
cmdline = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdline.add_argument('--data_dir', default='',
help="""Path to dataset in TFRecord format
(aka Example protobufs). Files should be
named 'train-*' and 'validation-*'.""")
cmdline.add_argument('-b', '--batch_size', default=128, type=int,
help="""Size of each minibatch per GPU""")
cmdline.add_argument('--num_batches', default=100, type=int,
help="""Number of batches to run.
Ignored during eval or if num epochs given""")
cmdline.add_argument('-lr', '--learning_rate', default=0.01, type=float,
help="""Start learning rate""")
cmdline.add_argument('--momentum', default=0.01, type=float,
help="""Start learning rate""")
add_bool_argument(cmdline, '--synthetic', help="""Whether to use synthetic data for training""")
return cmdline
def main():
# setup horovod
start = time()
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
# get command line args
cmdline = add_cli_args()
FLAGS, unknown_args = cmdline.parse_known_args()
ds = create_data(FLAGS.data_dir, FLAGS.synthetic, FLAGS.batch_size)
model = tf.keras.applications.ResNet50(weights=None, classes=1000)
opt = tf.keras.optimizers.SGD(learning_rate=FLAGS.learning_rate * hvd.size(), momentum=0.1)
loss_func = tf.keras.losses.SparseCategoricalCrossentropy()
loop_time = time()
if hvd.local_rank() == 0:
print("Step \t Throughput \t Loss")
for batch, (images, labels) in enumerate(ds):
loss = train_step(model, opt, loss_func, images, labels, batch==0)
if hvd.local_rank() == 0:
duration = time() - loop_time
loop_time = time()
throughput = (hvd.size()*FLAGS.batch_size)/duration
print("{} \t images/sec: {} \t {}".format(batch, throughput, loss))
if batch==FLAGS.num_batches:
break
if hvd.rank() == 0:
print("\nFinished in {}".format(time()-start))
if __name__=='__main__':
main() |
faketests/slowtests/test_3.py | Djailla/pytest-sugar | 418 | 40627 | import time
import pytest
@pytest.mark.parametrize("index", range(7))
def test_cat(index):
"""Perform several tests with varying execution times."""
time.sleep(0.2 + (index * 0.1))
assert True
|
recipes/Python/577462_A_Buttonbar_program_with_color_/recipe-577462.py | tdiprima/code | 2,023 | 40631 | '''
;#template` {-path} {-menu} {-s1} {-s2} {-s3}
;#option`-path`Path to controlling file`F`c:\source\python\projects\menu\buttonbar.py`
;#option`-menu`Path to menu file`F`c:\source\python\projects\menu\test.ini`
;#option`-s1`First section`X`info`
;#option`-s2`Second section`X`help`
;#option`-s3`Third section`X`data`
;#end
'''
mHelpText = '''
# ----------------------------------------------
# Name: ButtonBarV1Ex
# Description:
## D20G-76 Popup button bar using Qeditor style menus.ini file, selects sections to use (ExPopen)
#
# Author: <NAME>
# Date: 10/27/2010
# Copyright 2010 by St. Thomas Software
# ----------------------------------------------
# This program is freeware. It may be used
# for any moral purpose. You use it at your
# own risk. St. Thomas Software makes no
# guarantees to its fitness to do anything.
#
# If you feel you must pay for it. Please
# send one million dollars to
#
# The International Rescue Committee
# 122 East 42nd Street
# New York, N.Y. 10168-1289
#
# Ok, we are in a recession. So, make it a half
# million.
#
# Example of .reg file entry
# [HKEY_CLASSES_ROOT\textfile\Shell\TextTools]
# @="T&ext Tools
# "EditFlags"=hex:01,00,00,00
# [HKEY_CLASSES_ROOT\textfile\Shell\TextTools\Command]
# @="c:\\sys\\python25\\python.exe c:\\bin\\ButtonBarV1Ex.py \"%1\" {p}\\menus.ini;{o}\\menus.ini;c:\\bin\\menus.ini ide "
#
#
# Syntax:
# ButtonBarV1Ex.py [options] <file path> <menu path> <first section> [ <another section> ... ]
#
# Options:
#
# -b <color> - default button background color
# -d <directory> - set working directory
# -e <name>=<value> - set environment variable
# -h - print help text
# -l <color> - default label background color
# -o - set orientation to horizontal
#
# The command line template extracted from the menu file may contain macros of the form '{x}' which
# are replaced by parts of the file path or other string. The Do.py and Do2.py modules are used.
# %1 is replaced by the selected file path. The file path string is also used for display
# at the top of the button bar. If no expansion is performed any string can be used and displayed.
#
# The second parameter is the name of an initialization file similar to the one shown below.
# Each line contains a prompt displayed on a button and a command separated by a comma.
# Macros are replaced before execution.
#
# The remaining parameters name sections to be used to make the button bar. The order in the
# initialization file is maintained. Selection is case insensitive.
#
# Do.py and Do2.py are used to expand the command line, the menu selection and each section
# name. The command line can also contain '{pr}' which will be replaced by the prompt
# displayed on the button. Also, any string enclosed in braces not a macro will be taken
# as a command name in the registry entry for the primary files file type.
#
# The Menu File:
#
# The menu file follows the syntax used by the QEditor menus.ini file. The same syntax as an .ini
# file except a ',' is used to separate key from value. In this case the values are command
# line templates usually containing one or more macro strings.
#
# Lines beginning with ';' are comments unless followed by '#' in which case the line may contain
# a special command. Command line options are also available for the same functions
#
# ;#cd`c:\bin - Sets the working directory to c:\bin or '-d c:\bin'
# ;#set`abc=def - Sets the environment variable abc to 'def' or '-s abc=def'
# ;#button`#00CCCC - Sets the background of succeeding buttons to #00CCCC. or '-b #00CCCC'
# ;#label`light blue - Sets the background of labels created by succeeding '-' lines
# to light blue. If included before the first section the
# color will also be used for section header labels or '-l "light blue"'
# Within sections not included in the button bar special commands are ignored. The '`' usually
# appears under the '~' on the keyboard.
#
# -o option will produce a horizontal button bar
#
# Section names and command prompts can contan a file pattern enclosed within two '/' or two '!'
# to include the section or line if the file path matches or does not match the pattern. Examples
# are shown below.
#
# Example initialization file (Text Tools.ini)
;#set`test=A Short String
;#set`use=c:\sys\python27\pythonw.exe
;#set`gain=none
;#cd`\source\tcl
;#cd`c:\source\python\Projects\Program Execution
;#label`yellow
[Test1]
;#button`#00EEEE
;#label`#00CCCC
test 1,c:\bin\messagebox.exe {a} {b} {p} "{f}" {n} {e} {o} %test%
-Directory List
Dir,c:\windows\system32\cmd.exe /C dir "{p}\*.{e}"
DirX,c:\windows\system32\cmd.exe /C dir *.*
Dir2,c:\windows\system32\cmd.exe /C dir c:\source\Python\*.*
;#label`light gray
-Program execution
/*.exe/Run,"{a}" I am running "{a}"
/*.py/Run,c:\bin\messagebox.exe I am running a Python program "{a}"
/*.py/Run 2,%use% "{a}"
/*.exe/Run 2,"{a}" I am running "{a}"
[/*.py/Test2]
Edit T,{o}\qeditor.exe "{a}"
Dir *,%comspec% /C dir {.}\*.*
'''
import sys
import os
import getopt
import fnmatch
import Tkinter
from Tkconstants import *
import tkMessageBox
from _winreg import *
import Do # Recipe: 577439
from Do2 import ExpandArg # Recipe: 577440
from DoCommand import BuildCommand # Recipe: 577441
import subprocess
mCmds = {}
def Expand(pCommand, pFilePath, pPrompt, pSep=','):
'''
Expand
'''
# ---- Standard macro expansion with added {pr} macro
# Can separate arguments with ',' fo force argument by argument
# expansion
if pCommand.find(pSep) >= 0:
try:
lArgs = pCommand.split(pSep)
lCommand = ''
lDefaultPath = pFilePath
for lArg in lArgs:
lVal = ExpandArg(lArg, pFilePath, lDefaultPath)
if lVal.find('{pr}') >= 0: # insert button prompt
lVal = lVal.replace('{pr}', pPrompt)
lCommand = lCommand + lVal + " "
except Exception, e:
print 'BBV1Ex',e
else:
lCommand = Do.Expand(pCommand, pFilePath)
if lCommand.find('{pr}') >= 0:
lCommand = lCommand.replace('{pr}', pPrompt)
# ---- Remove {} for compatibility with DoM.py
lCommand = lCommand.replace('{}', ' ')
# ---- Remaining macros will be replaced by command of same name
# in registry
lPos = lCommand.find("{")
if lPos >= 0:
lEndPos = lCommand.find("}")
if lEndPos > lPos:
try:
lKey = lCommand[lPos+1:lEndPos]
lDotPos = lKey.rfind('.')
if lDotPos > 0:
lExt = lKey[lDotPos]
lKey = lKey[0:lDotPos]
else:
lDotPos = pFilePath.rfind('.')
lExt = pFilePath[lDotPos:]
lReplace = BuildCommand(pFilePath, lKey, lExt, [])
lCommand = lCommand[0:lPos] + lReplace + lCommand[lEndPos+1:]
except:
pass
# ---- replace %1 and %*
else:
lPos = lCommand.rfind("%1")
if lPos >= 0:
lCommand = lCommand.replace("%1", pFilePath)
lPos = lCommand.rfind("%*")
if lPos >= 0:
lCommand = lCommand.replace("%*", "")
# ---- Remove !! used by DoM.py
lPos = lCommand.rfind('!!')
if lPos > 0:
lCommand = lCommand[0:lPos]
return lCommand
def showit(pEvent):
'View expansion of selected command'
lPrompt = pEvent.widget.cget("text")
lCommand = mCmds[lPrompt]
lCommand = Expand(lCommand, mFileName, lPrompt)
tkMessageBox.showinfo('Expanded Command:', lPrompt + " = " + lCommand)
def submitnow(pEvent):
'Execute selected command'
lPrompt = pEvent.widget.cget("text")
lCommand = mCmds[lPrompt]
lCommand = Expand(lCommand, mFileName, lPrompt)
lCommand = '"' + lCommand + '"'
subprocess.Popen(lCommand, shell=True)
def Commands():
return mCmds
def Build(pFrame, pMenuName, pFileName, pLabelList, pSubmit=submitnow, pCallBack=None,
pDefaultLabel='yellow', pDefaultButton='light blue', pSide=TOP, pFill=X):
'''
Build menu from menu file
The menu file may contain commands to set the current directory and to
set specified environment variables. This is done while the file is
being read. They are not dependent on the selected button. The last
command encountered will be active.
set environment variable name to value
;#set`name=value
set current directory to path
;#cd`path
set label background color
;#label`color
set button background color
;#button`color
'''
#
lPos = pFileName.rfind('\\')
if lPos >= 0:
lFileName = pFileName[lPos+1:].lower()
else:
lFileName = pFileName.lower()
lExist = os.path.exists(pFileName)
lFile = open(pMenuName,'r')
lFileText = lFile.readlines()
lFile.close
lDefaultButton = lButtonColor = pDefaultButton
lDefaultLabel = lLabelColor = pDefaultLabel
label = Tkinter.Label(pFrame, text=pFileName, bg=lLabelColor)
#label.pack(side=TOP, fill=X)
label.pack(side=pSide, fill=pFill)
lKeep = False
lSectionCount = 0
for lText in lFileText:
if len(lText) > 1:
if lText[0] != "[":
# ---- Menu splitter line
if lText[0] == "-" and lKeep:
if len(lText) > 1:
lPrompt = lText[1:-1]
if lPrompt.find('{') >= 0:
lPrompt = ExpandCmd(lPrompt, pFileName)
# ---- Display label for - line
# can be removed without affecting remainder of program
if len(lPrompt) > 0 and lKeep == True:
label= Tkinter.Label(pFrame, text=lPrompt, bg=lLabelColor)
#label.pack(side=TOP, fill=X)
label.pack(side=pSide, fill=pFill)
# ---- process special command
elif lText.startswith(";#") and (lSectionCount == 0 or lKeep == True):
lPos = lText.find("`") # <---- Fields are separated by ` (below ~)
if lPos > 0:
lCh = lText[2]
# ---- Set environment variable
if lCh == 's': # set environment variable
lEqPos = lText.find("=")
if lEqPos > lPos:
lKey = lText[lPos+1:lEqPos].strip()
lValue = lText[lEqPos+1:].strip()
if lValue.find('{') >= 0:
lValue = ExpandCmd(lValue, pFileName)
os.environ[lKey] = lValue
# ---- Change working directory
elif lCh == 'c': # set working directory
if lPos > 0 and lPos < len(lText):
lText = lText[lPos+1:].strip()
if lText.find('{') >= 0:
lText = ExpandCmd(lText, pFileName)
os.chdir(lText)
elif lCh == 'b':
lButtonColor = lText[lPos+1:].strip().lower()
if lSectionCount == 0:
lDefaultButton = lButtonColor
elif lCh == 'l':
lLabelColor = lText[lPos+1:].strip().lower()
if lSectionCount == 0:
lDefaultLabel = lLabelColor
elif pCallBack != None:
pCallBack(lText)
else: # ignore
pass
# ---- Comment
elif lText[0] == ";":
pass
# ---- Menu section is being skipped
elif lKeep == False:
pass
# ---- Command template
else:
lPos = lText.find(",")
if lPos > 0:
lPrompt = lText[:lPos]
lCommand = lText[lPos+1:]
# ---- Filter commands based on match with file name
# /*3.py/Run,c:\source\python31\pyrhonw.exe ...
# can be removed without affecting remainder of program
if lPrompt[0] == '/':
try:
(lMask, lPrompt) = lPrompt[1:].split('/')
if not fnmatch.fnmatch(lFileName, lMask.lower()):
continue
except Exception, e:
continue
# ---- Filter commands based on mismatch with file name
# !*3.py!Run,c:\source\python27\pythonw.exe ...
# can be removed without affecting remainder of program
elif lPrompt[0] == '!':
try:
(lMask, lPrompt) = lPrompt[1:].split('!')
if fnmatch.fnmatch(lFileName, lMask.lower()):
continue
except Exception, e:
continue
# ---- Create toolbar button
mCmds[lPrompt] = lCommand
try:
button = Tkinter.Button(pFrame, text=lPrompt, bg=lButtonColor)
except:
button = Tkinter.Button(pFrame, text=lPrompt)
#button.pack(side=TOP,fill=X)
button.pack(side=pSide, fill=pFill)
button.bind("<Button-1>", pSubmit) ### (1)
button.bind("<Button-3>", showit)
# ---- Section Header
else:
lSectionCount += 1
lLabelColor = lDefaultLabel
lButtonColor = lDefaultButton
if lText.find('{') >= 0:
lText = ExpandCmd(label, pFileName)
label = lText[1:-2].lower()
# ---- Filter section based on match with file name
# [/*3.py/Special]
# can be removed without affecting remainder of program
if label[0] == '/':
try:
(lMask, label) = label[1:].split('/')
if not fnmatch.fnmatch(lFileName, lMask.lower()):
lKeep = False
continue
except Exception, e:
lKeep = False
continue
# ---- Filter section based on mismatch with file name
# [!*3.py!Special]
# can be removed without affecting remainder of program
elif label[0] == '!':
try:
(lMask, label) = label[1:].split('!')
if fnmatch.fnmatch(lFileName, lMask.lower()):
lKeep = False
continue
except Exception, e:
lKeep = False
continue
# ---- label must begin with letter
elif (label[0] < 'a') or (label[0] > 'z'):
label = label[1:]
# ---- Select/Unselect section
if pLabelList == [] or label in pLabelList:
lKeep = True
label = Tkinter.Label(pFrame,text=label.capitalize(), bg=lLabelColor)
#label.pack(side=TOP, fill=X)
label.pack(side=pSide, fill=pFill)
else:
lKeep = False
label = Tkinter.Label(pFrame,text="", bg=lLabelColor)
#label.pack(side=TOP, fill=X)
label.pack(side=pSide, fill=pFill)
button = Tkinter.Button(pFrame,text='Exit', command=pFrame.quit, bg=lButtonColor)
#button.pack(side=TOP, fill=X)
button.pack(side=pSide, fill=pFill)
if __name__ == '__main__':
(mOptions, mArgs) = getopt.getopt(sys.argv[1:], 'b:d:e:hl:o')
# ---- Set run options
mDefaultLabel = 'light green'
mDefaultButton = 'light blue'
mOrient = VERTICAL
for (mKey, mValue) in mOptions:
if mKey == '-b':
mDefaultButton = mValue
elif mKey == '-d': # Set current directory
if mValue.find('}') >= 0:
mValue = Expand(mValue, mFileName)
os.chdir(mValue)
elif mKey == '-e': # Set environment variable
Do.setenviron(mValue, mFileName)
elif mKey == '-h':
print mHelpText
elif mKey == '-l':
mDefaultLabel = mValue
elif mKey == '-o':
mOrient = HORIZONTAL
tk = Tkinter.Tk()
if len(mArgs) > 0:
try:
mDefaultMenu = os.environ['MENUPATH']
if mVerbose:
print 'BB1Ex Default menu path', mDefaultMenu
except:
mDefaultMenu = ''
try:
mDefaultSection = os.environ['SECTION']
if mVerbose:
print 'BB1Ex Default section', mDefaultSection
except:
mDefaultSection = '' # <------- Default section - Change this
if mDefaultSection == '':
mDefaultSection = ''
# ---- First argument is absolute path to file to be processed
mFileName = mArgs[0]
mFileName = mFileName.replace("/","\\")
# ---- Second argument is absolute path to initialization menu
# Selected menu can depend on file path (ie. {o}\menus.ini) See do2.py
if len(mArgs) > 1:
mMenuArg = mArgs[1] # generate button bar
mMenuArg += ';' + mDefaultMenu
else:
mMenuArg = mDefaultMenu
# ---- Prevent QEditor from performing replacement, may be removed
mMenuArg = mMenuArg.replace("(","{")
mMenuArg = mMenuArg.replace(")","}")
# ---- Expand menu name and/or select from list
if mMenuArg.find('{') >= 0:
mMenuName = ExpandArg(mMenuArg, mFileName, mDefaultMenu)
elif mMenuArg.find(';') >= 0:
for mMenu in mMenuArg.split(';'):
if os.path.exists(mMenu):
mMenuName = mMenu
break
else:
mMenuName = mDefaultMenu
else:
mMenuName = mMenuArg
# ---- Remaining arguments list sections to include in menu
if len(mArgs) > 2:
mLabelList = []
for lItem in mArgs[2:]:
if lItem == '*':
break
if lItem == '-':
lItem = mDefaultSection
lItem = lItem.lower()
# ---- Prevent QEditor from performing replacement, may be removed
lItem = lItem.replace("(","{")
lItem = lItem.replace(")","}")
# ---- Selected section can be dependent on file path
if lItem.find('{') >= 0:
lItem = ExpandArg(lItem, mFileName, '')
mLabelList.append(lItem)
elif mDefaultSection != '':
mLabelList = [ mDefaultSection ]
else:
mLabelList = [ ]
# ---- Build buttonbar
mFrame = Tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
mFrame.pack(fill=BOTH, expand=1)
if mOrient == VERTICAL:
Build(mFrame, mMenuName, mFileName, mLabelList, pDefaultLabel=mDefaultLabel, pDefaultButton=mDefaultButton, pSide=TOP, pFill=X)
else:
Build(mFrame, mMenuName, mFileName, mLabelList, pDefaultLabel=mDefaultLabel, pDefaultButton=mDefaultButton, pSide=LEFT, pFill=Y)
# ---- Enter event loop
tk.mainloop()
else:
tkMessageBox.showinfo('Syntax Error','File name required')
|
osp/graphs/osp_graph.py | davidmcclure/open-syllabus-project | 220 | 40657 | <reponame>davidmcclure/open-syllabus-project
import networkx as nx
import random
from osp.common.utils import query_bar
from osp.graphs.graph import Graph
from osp.citations.models import Text, Citation, Text_Index
from osp.corpus.models import Document
from itertools import combinations
from peewee import fn
from clint.textui import progress
class OSP_Graph(Graph):
def add_edges(self, max_texts=20):
"""
For each syllabus, register citation pairs as edges.
Args:
max_texts (int): Ignore docs with > than N citations.
"""
text_ids = (
fn.array_agg(Text.id)
.coerce(False)
.alias('text_ids')
)
docs = (
Citation
.select(Citation.document, text_ids)
.join(Text)
.having(fn.count(Text.id) <= max_texts)
.where(Text.display==True)
.where(Text.valid==True)
.group_by(Citation.document)
)
for row in query_bar(docs):
for tid1, tid2 in combinations(row.text_ids, 2):
# If the edge exists, increment the weight.
if self.graph.has_edge(tid1, tid2):
self.graph[tid1][tid2]['weight'] += 1
# Otherwise, initialize the edge.
else:
self.graph.add_edge(tid1, tid2, weight=1)
def add_nodes(self):
"""
Register displayed texts.
"""
for t in progress.bar(Text_Index.rank_texts()):
text = t['text']
self.graph.add_node(text.id, dict(
label = text.pretty('title'),
author = text.pretty('surname'),
count = text.count,
score = t['score'],
))
def trim_unconnected_components(self):
"""
Remove all but the largest connected component.
"""
subgraphs = sorted(
nx.connected_component_subgraphs(self.graph),
key=len, reverse=True
)
self.graph = subgraphs[0]
def trim_texts_by_count(self, min_count=100):
"""
Remove all texts with counts below a threshold.
Args:
min_count (int)
"""
for tid, text in self.graph.nodes(data=True):
if text['count'] < min_count:
self.graph.remove_node(tid)
def trim_edges(self, keep=0.5):
"""
Randomly prune a certain percentage of edges.
Args:
keey (float)
"""
for tid1, tid2 in self.graph.edges():
if random.random() > keep:
self.graph.remove_edge(tid1, tid2)
|
RecoJets/JetProducers/python/ak4PFClusterJets_cfi.py | ckamtsikis/cmssw | 852 | 40706 | import FWCore.ParameterSet.Config as cms
from RecoJets.JetProducers.PFClusterJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
ak4PFClusterJets = cms.EDProducer(
"FastjetJetProducer",
PFClusterJetParameters,
AnomalousCellParameters,
jetAlgorithm = cms.string("AntiKt"),
rParam = cms.double(0.4)
)
|
template/python/base-class.py | hiroebe/sonictemplate-vim | 130 | 40710 | <gh_stars>100-1000
"""
{{_name_}}
"""
class {{_expr_:substitute('{{_input_:name}}', '\w\+', '\u\0', '')}}(object):
def __init__(self{{_cursor_}}):
def __repr__(self):
return
|
atlas/foundations_contrib/src/foundations_contrib/models/project_listing.py | DeepLearnI/atlas | 296 | 40745 |
class ProjectListing(object):
@staticmethod
def list_projects(redis_connection):
"""Returns a list of projects store in redis with their
creation timestamps
Arguments:
redis_connection {RedisConnection} -- Redis connection to use as a provider for data
Returns:
list -- The list of project names and creation dates
"""
from foundations_contrib.utils import string_from_bytes
projects = redis_connection.zrange('projects', 0, -1, withscores=True)
return [{'name': string_from_bytes(name), 'created_at': created_at} for name, created_at in projects]
@staticmethod
def find_project(redis_connection, project_name):
"""Returns a single of projects store in redis with it's
creation timestamp
Arguments:
redis_connection {RedisConnection} -- Redis connection to use as a provider for data
project_name {str} -- Name of the project to find
Returns:
dict -- The dictionary of the 2 attribute from the description above or None if the project does not exist
"""
created_at = redis_connection.zscore('projects', project_name)
if created_at is None:
return None
return {'name': project_name, 'created_at': created_at}
|
quantsbin/derivativepricing/namesnmapper.py | quantsbin/Quantsbin | 132 | 40746 | """
developed by Quantsbin - Jun'18
"""
from enum import Enum
class AssetClass(Enum):
EQOPTION = 'EqOption'
FXOPTION = 'FXOption'
FUTOPTION = 'FutOption'
COMOPTION = 'ComOption'
class DerivativeType(Enum):
VANILLA_OPTION = 'Vanilla Option'
class PricingModel(Enum):
BLACKSCHOLESMERTON = 'BSM'
BLACK76 = 'B76'
GK = 'GK'
MC_GBM = "MC_GBM"
MC_GBM_LSM = "MC_GBM_LSM"
BINOMIAL = "Binomial"
class UnderlyingParameters(Enum):
SPOT = "spot0"
VOLATILITY = "volatility"
PRICEDATE = "_pricing_date"
RF_RATE = "rf_rate"
CNV_YIELD = "cnv_yield"
COST_YIELD = "cost_yield"
UNEXPLAINED = "unexplained"
class RiskParameter(Enum):
DELTA = 'delta'
GAMMA = 'gamma'
THETA = 'theta'
VEGA = 'vega'
RHO = 'rho'
PHI = 'phi'
RHO_FOREIGN = 'rho_foreign'
RHO_CONV = 'rho_conv_yield'
class VanillaOptionType(Enum):
CALL = 'Call'
PUT = 'Put'
class ExpiryType(Enum):
AMERICAN = 'American'
EUROPEAN = 'European'
class UdlType(Enum):
INDEX = 'Index'
STOCK = 'Stock'
FX = 'Currency'
COMMODITY = 'Commodity'
FUTURES = 'Futures'
class DivType(Enum):
DISCRETE = 'Discrete'
YIELD = 'Yield'
OBJECT_MODEL = {
UdlType.STOCK.value: {ExpiryType.EUROPEAN.value: [PricingModel.BLACKSCHOLESMERTON.value, PricingModel.MC_GBM.value
, PricingModel.BINOMIAL.value],
ExpiryType.AMERICAN.value: [PricingModel.MC_GBM.value, PricingModel.BINOMIAL.value]}
, UdlType.FUTURES.value: {ExpiryType.EUROPEAN.value: [PricingModel.BLACK76.value, PricingModel.MC_GBM.value
, PricingModel.BINOMIAL.value],
ExpiryType.AMERICAN.value: [PricingModel.MC_GBM.value, PricingModel.BINOMIAL.value]}
, UdlType.FX.value: {ExpiryType.EUROPEAN.value: [PricingModel.GK.value, PricingModel.MC_GBM.value
, PricingModel.BINOMIAL.value],
ExpiryType.AMERICAN.value: [PricingModel.MC_GBM.value, PricingModel.BINOMIAL.value]}
, UdlType.COMMODITY.value: {ExpiryType.EUROPEAN.value: [PricingModel.GK.value, PricingModel.MC_GBM.value
, PricingModel.BINOMIAL.value],
ExpiryType.AMERICAN.value: [PricingModel.MC_GBM.value, PricingModel.BINOMIAL.value]}
}
DEFAULT_MODEL = {
UdlType.STOCK.value:
{DerivativeType.VANILLA_OPTION.value: {ExpiryType.EUROPEAN.value: PricingModel.BLACKSCHOLESMERTON.value,
ExpiryType.AMERICAN.value: PricingModel.BINOMIAL.value},
},
UdlType.FUTURES.value:
{DerivativeType.VANILLA_OPTION.value: {ExpiryType.EUROPEAN.value: PricingModel.BLACK76.value,
ExpiryType.AMERICAN.value: PricingModel.BINOMIAL.value},
},
UdlType.FX.value:
{DerivativeType.VANILLA_OPTION.value: {ExpiryType.EUROPEAN.value: PricingModel.GK.value,
ExpiryType.AMERICAN.value: PricingModel.BINOMIAL.value},
},
UdlType.COMMODITY.value:
{DerivativeType.VANILLA_OPTION.value: {ExpiryType.EUROPEAN.value: PricingModel.GK.value,
ExpiryType.AMERICAN.value: PricingModel.BINOMIAL.value},
}
}
IV_MODELS = [PricingModel.BLACKSCHOLESMERTON.value, PricingModel.BLACK76.value, PricingModel.GK.value]
ANALYTICAL_GREEKS = [PricingModel.BLACKSCHOLESMERTON.value, PricingModel.BLACK76.value, PricingModel.GK.value]
from . import pricingmodels as pm
MODEL_MAPPER = {
PricingModel.BLACKSCHOLESMERTON.value: pm.BSM,
PricingModel.BLACK76.value: pm.B76,
PricingModel.GK.value: pm.GK,
PricingModel.MC_GBM.value: pm.MonteCarloGBM,
PricingModel.BINOMIAL.value: pm.BinomialModel
}
|
tests/test_jwt.py | kschu91/pyoidc | 373 | 40779 | import os
from oic.utils.jwt import JWT
from oic.utils.keyio import build_keyjar
from oic.utils.keyio import keybundle_from_local_file
__author__ = "roland"
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys"))
keys = [
{"type": "RSA", "key": os.path.join(BASE_PATH, "cert.key"), "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["enc"]},
]
jwks, keyjar, kidd = build_keyjar(keys)
issuer = "https://fedop.example.org"
def _eq(l1, l2):
return set(l1) == set(l2)
def test_jwt_pack():
_jwt = JWT(keyjar, lifetime=3600, iss=issuer).pack()
assert _jwt
assert len(_jwt.split(".")) == 3
def test_jwt_pack_and_unpack():
srv = JWT(keyjar, iss=issuer)
_jwt = srv.pack(sub="sub")
info = srv.unpack(_jwt)
assert _eq(info.keys(), ["jti", "iat", "exp", "iss", "sub", "kid"])
class TestJWT(object):
"""Tests for JWT."""
def test_unpack_verify_key(self):
srv = JWT(keyjar, iss=issuer)
_jwt = srv.pack(sub="sub")
# Remove the signing key from keyjar
keyjar.remove_key("", "RSA", "")
# And add it back as verify
kb = keybundle_from_local_file(
os.path.join(BASE_PATH, "cert.key"), "RSA", ["ver"]
)
# keybundle_from_local_file doesn'assign kid, so assign manually
kb._keys[0].kid = kidd["sig"]["RSA"]
keyjar.add_kb("", kb)
info = srv.unpack(_jwt)
assert info["sub"] == "sub"
|
kili/queries/issue/queries.py | ASonay/kili-playground | 214 | 40799 | <gh_stars>100-1000
"""
Queries of issue queries
"""
def gql_issues(fragment):
"""
Return the GraphQL issues query
"""
return f'''
query ($where: IssueWhere!, $first: PageSize!, $skip: Int!) {{
data: issues(where: $where, first: $first, skip: $skip) {{
{fragment}
}}
}}
'''
GQL_ISSUES_COUNT = '''
query($where: IssueWhere!) {
data: countIssues(where: $where)
}
'''
|
server/bench-wrk/hge_wrk_bench.py | gh-oss-contributor/graphql-engine-1 | 27,416 | 40812 | from sportsdb_setup import HGETestSetup, HGETestSetupArgs
from run_hge import HGE
import graphql
import multiprocessing
import json
import os
import docker
import ruamel.yaml as yaml
import cpuinfo
import subprocess
import threading
import time
import datetime
from colorama import Fore, Style
from plot import run_dash_server
import webbrowser
import pathlib
from urllib.parse import urlparse, urlunparse
import boto3
fileLoc = os.path.dirname(os.path.abspath(__file__))
def uri_path_join(uri, *paths):
p = urlparse(uri)
new_path = os.path.join(p.path, *paths)
return urlunparse(p._replace(path=new_path))
class HGEWrkBench(HGETestSetup):
wrk_docker_image = 'hasura/wrk:v0.3'
# We'll bind mount the lua script dir to this directory within the wrk container:
lua_dir = '/tmp/bench_scripts'
rps_steps = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
def __init__(
self, pg_url, remote_pg_url, pg_docker_image, hge_url=None,
remote_hge_url=None, hge_docker_image=None,
hge_args=[], skip_stack_build=False,
graphql_queries_file='queries.graphql', connections=50,
duration=300, results_hge_url = None, results_hge_admin_secret = None
):
self.load_queries(graphql_queries_file)
super().__init__(
pg_url = pg_url,
remote_pg_url = remote_pg_url,
pg_docker_image = pg_docker_image,
hge_url = hge_url,
remote_hge_url = remote_hge_url,
hge_docker_image = hge_docker_image,
hge_args = hge_args,
skip_stack_build = skip_stack_build
)
self.connections = connections
self.duration = duration
self.results_hge_url = results_hge_url
self.results_hge_admin_secret = results_hge_admin_secret
self.extract_cpu_info()
# NOTE: we generally want to do this just once; otherwise if we happen
# to be editing the tree while this script is running the shasum will
# keep changing:
self.server_shasum = self.get_server_shasum()
def load_queries(self, graphql_queries_file):
self.graphql_queries_file = graphql_queries_file
with open(self.graphql_queries_file) as f:
queries = f.read()
self.query_names = []
self.queries = []
for oper in graphql.parse(queries).definitions:
self.query_names.append(oper.name.value)
self.queries.append(oper)
def get_wrk2_params(self):
cpu_count = multiprocessing.cpu_count()
return {
'threads': cpu_count,
'connections': self.connections,
'duration': self.duration
}
def get_current_user(self):
return '{}:{}'.format(os.geteuid(), os.getegid())
def wrk2_test(self, query, rps):
def upload_files(files):
if self.upload_root_uri:
p = urlparse(self.upload_root_uri)
if p.scheme == 's3':
bucket = p.netloc
key = p.path.lstrip('/')
s3_client = boto3.client('s3')
for (f, f_key) in files:
s3_client.upload_file(f, bucket, os.path.join(key, f_key))
query_str = graphql.print_ast(query)
params = self.get_wrk2_params()
print(Fore.GREEN + "Running benchmark wrk2 for at {} req/s (duration: {}) for query\n".format(rps, params['duration']), query_str + Style.RESET_ALL)
bench_script = os.path.join(self.lua_dir, 'bench-wrk2.lua')
graphql_url = self.hge.url + '/v1/graphql'
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
results_dir = self.results_root_dir
tests_path = [str(rps), timestamp]
results_dir = os.path.join(results_dir, *tests_path)
os.makedirs(results_dir, exist_ok=True)
wrk2_command = [
'wrk2',
'-R', str(rps),
'-t', str(params['threads']),
'-c', str(params['connections']),
'-d', str(params['duration']),
'--latency',
'-s', bench_script,
graphql_url,
query_str,
results_dir
]
volumes = self.get_scripts_vol()
volumes[results_dir] = {
'bind': results_dir,
'mode': 'rw'
}
self.docker_client = docker.from_env()
result = self.docker_client.containers.run(
self.wrk_docker_image,
detach = False,
stdout = True,
stderr = False,
command = wrk2_command,
network_mode = 'host',
environment = self.get_lua_env(),
volumes = volumes,
remove = True,
user = self.get_current_user()
).decode('ascii')
histogram_file = os.path.join(results_dir, 'latencies.hgrm')
histogram = self.get_latency_histogram(result, histogram_file)
summary_file = os.path.join(results_dir, 'summary.json')
with open(summary_file) as f:
summary = json.load(f)
latencies_file = os.path.join(results_dir, 'latencies')
def extract_data(v):
return v['data'] if isinstance(v, dict) and 'data' in v else v
tests_info = { k:extract_data(v) for (k, v) in self.gen_test_info(query, rps).items() }
tests_setup_file = os.path.join(results_dir, 'test_setup.json')
with open(tests_setup_file, 'w') as f:
f.write(json.dumps(tests_info, indent=2))
upload_files([
(x, os.path.join(*tests_path,y))
for (x,y) in [
(summary_file, 'summary.json'),
(latencies_file, 'latencies'),
(histogram_file, 'latencies.hgrm'),
(tests_setup_file, 'test_setup.json')
]
])
if self.upload_root_uri:
latencies_uri = uri_path_join(self.upload_root_uri, *tests_path, 'latencies')
else:
latencies_uri = pathlib.Path(latencies_file).as_uri()
self.insert_result(query, rps, summary, histogram, latencies_uri)
return (summary, histogram)
def get_latency_histogram(self, result, write_histogram_file):
const_true = lambda l : True
state_changes = {
'start' : {
(lambda l: 'Detailed Percentile spectrum' in l) : 'histogram_start'
},
'histogram_start': {
(lambda l: 'Value' in l and 'Percentile' in l): 'histogram_headers'
},
'histogram_headers': {
const_true: 'histogram_empty_line'
},
'histogram_empty_line' : {
const_true: 'histogram_values'
},
'histogram_values': {
(lambda l: l.strip().startswith('#')): 'histogram_summary'
},
'histogram_summary': {
(lambda l: not l.strip().startswith('#')): 'histogram_end'
}
}
state = 'start'
histogram = []
print(Fore.CYAN + "Latency histogram summary" + Style.RESET_ALL)
with open(write_histogram_file, 'w') as f:
for line in result.splitlines():
# Change the state
for (check, next_state) in state_changes[state].items():
if check(line):
state = next_state
break
if state == 'start':
continue
elif state == 'histogram_end':
break
if state == 'histogram_summary':
print(Fore.CYAN + line + Style.RESET_ALL)
if state in ['histogram_headers','histogram_values','histogram_summary']:
f.write(line+'\n')
if state == 'histogram_values':
(val, percentile, total_count, _) = line.strip().split()
histogram.append({
'percentile': float(percentile),
'latency': float(val),
'total_count': float(total_count)
})
return histogram
# The appropriate Lua env vars for execution within wrk container:
def get_lua_env(self):
return {
'LUA_PATH': '/usr/share/lua/5.1/?.lua;' +
os.path.join(self.lua_dir, '?.lua') + ';;',
'LUA_CPATH': '/usr/lib/lua/5.1/?.so;/usr/lib/x86_64-linux-gnu/lua/5.1/?.so;;'
}
def get_scripts_vol(self):
return {
os.path.join(fileLoc, 'wrk-websocket-server', 'bench_scripts'): {
'bind' : self.lua_dir,
'mode' : 'ro'
}
}
def max_rps_test(self, query):
query_str = graphql.print_ast(query)
print(Fore.GREEN + "(Compute maximum Request per second) Running wrk benchmark for query\n", query_str + Style.RESET_ALL)
self.hge.graphql_q(query_str) # Test query once for errors
bench_script = os.path.join(self.lua_dir + '/bench-wrk.lua')
graphql_url = self.hge.url + '/v1/graphql'
params = self.get_wrk2_params()
duration = 30
wrk_command = [
'wrk',
'-t', str(params['threads']),
'-c', str(params['connections']),
'-d', str(duration),
'--latency',
'-s', bench_script,
graphql_url,
query_str
]
self.docker_client = docker.from_env()
result = self.docker_client.containers.run(
self.wrk_docker_image,
detach = False,
stdout = False,
stderr = True,
command = wrk_command,
network_mode = 'host',
environment = self.get_lua_env(),
volumes = self.get_scripts_vol(),
remove = True,
user = self.get_current_user()
)
summary = json.loads(result)['summary']
# TODO explain this calculation. Why aren't we using wrk's reported 'max'? Should we call this avg_sustained_rps or something?
max_rps = round(summary['requests']/float(duration))
self.insert_max_rps_result(query, max_rps)
print("Max RPS", max_rps)
return max_rps
def get_version(self):
script = os.path.join(fileLoc, 'gen-version.sh')
return subprocess.check_output([script]).decode('ascii').strip()
def get_server_shasum(self):
script = os.path.join(fileLoc, 'get-server-sha.sh')
return subprocess.check_output([script]).decode('ascii').strip()
def extract_cpu_info(self):
self.cpu_info = cpuinfo.get_cpu_info()
for k in ['flags', 'python_version', 'hz_actual', 'hz_actual_raw']:
if self.cpu_info.get(k):
del self.cpu_info[k]
def get_results(self):
query = '''
query results {
latency: hge_bench_latest_results {
query_name
requests_per_sec
docker_image
version
latencies_uri
latency_histogram {
percentile
latency
}
}
max_rps: hge_bench_avg_query_max_rps {
query_name
docker_image
version
max_rps
}
}
'''
output = self.results_hge.graphql_q(query)
return output['data']
def set_cpu_info(self, insert_var):
cpu_key = self.cpu_info['brand'] + ' vCPUs: ' + str(self.cpu_info['count'])
insert_var['cpu']= {
'data' : {
'info': self.cpu_info,
'key': cpu_key
},
"on_conflict": {
"constraint": "cpu_info_pkey",
"update_columns": "key"
}
}
def set_query_info(self, insert_var, query):
insert_var["query"] = {
"data": {
"name" : query.name.value,
"query" : graphql.print_ast(query)
},
"on_conflict" : {
"constraint": "gql_query_query_key",
"update_columns": "query"
}
}
#TODO add executable shasum also
def set_version_info(self, insert_var):
if self.hge_docker_image:
insert_var["docker_image"] = self.hge_docker_image
else:
insert_var["version"] = self.get_version()
insert_var["server_shasum"] = self.server_shasum
insert_var['postgres_version'] = self.pg.get_server_version()
if self.scenario_name:
insert_var['scenario_name'] = self.scenario_name
def set_hge_args_env_vars(self, insert_var):
to_hide_env = ['HASURA_GRAPHQL_' + env for env in
[ 'ADMIN_SECRET', 'DATABASE_URL', 'JWT_SECRET']
]
env = { k:v for (k,v) in self.hge.get_hge_env().items() if (k.startswith('HASURA_GRAPHQL') and k not in to_hide_env) or k in ['GHCRTS'] }
args = self.hge.args
insert_var['hge_conf'] = {
'env': env,
'args': args
}
def gen_max_rps_insert_var(self, query, max_rps):
insert_var = dict()
self.set_cpu_info(insert_var)
self.set_query_info(insert_var, query)
self.set_version_info(insert_var)
self.set_hge_args_env_vars(insert_var)
insert_var['max_rps'] = max_rps
insert_var['wrk_parameters'] = self.get_wrk2_params()
return insert_var
def plot_results(self):
def open_plot_in_browser():
time.sleep(1)
webbrowser.open_new_tab('http://127.0.0.1:8050/')
threading.Thread(target=open_plot_in_browser).start()
run_dash_server(self.get_results())
# Collect info about the test environment
def gen_test_info(self, query, rps):
test_info = dict()
self.set_cpu_info(test_info)
self.set_query_info(test_info, query)
self.set_version_info(test_info)
self.set_hge_args_env_vars(test_info)
test_info["requests_per_sec"] = rps
test_info['wrk2_parameters'] = self.get_wrk2_params()
return test_info
def gen_result_insert_var(self, query, rps, summary, latency_histogram, latencies_uri):
insert_var = self.gen_test_info(query, rps)
insert_var["summary"] = summary
insert_var['latency_histogram'] = {
'data' : latency_histogram
}
insert_var['latencies_uri'] = latencies_uri
return insert_var
def insert_result(self, query, rps, summary, latency_histogram, latencies_uri):
result_var = self.gen_result_insert_var(query, rps, summary, latency_histogram, latencies_uri)
insert_query = """
mutation insertResult($result: hge_bench_results_insert_input!) {
insert_hge_bench_results(objects: [$result]){
affected_rows
}
}"""
variables = {'result': result_var}
self.results_hge.graphql_q(insert_query, variables)
def insert_max_rps_result(self, query, max_rps):
result_var = self.gen_max_rps_insert_var(query, max_rps)
insert_query = """
mutation insertMaxRps($result: hge_bench_query_max_rps_insert_input!) {
insert_hge_bench_query_max_rps(objects: [$result]){
affected_rows
}
}"""
variables = {'result': result_var}
self.results_hge.graphql_q(insert_query, variables)
def setup_results_schema(self):
if not self.results_hge_url:
self.results_hge_url = self.hge.url
self.results_hge_admin_secret = self.hge.admin_secret()
if self.results_hge_admin_secret:
results_hge_args = ['--admin-secret', self.results_hge_admin_secret]
else:
results_hge_args = []
self.results_hge = HGE(None, None, args=results_hge_args, log_file=None, url=self.results_hge_url)
results_table = {
'name' : 'results',
'schema': 'hge_bench'
}
if results_table in self.results_hge.get_all_tracked_tables():
return
schema_file = os.path.join(fileLoc, 'results_schema.yaml')
with open(schema_file) as f:
queries = yaml.safe_load(f)
self.results_hge.run_bulk(queries)
def run_query_benchmarks(self):
def get_results_root_dir(query):
if self.hge_docker_image:
ver_info = 'docker-tag-' + self.hge_docker_image.split(':')[1]
else:
ver_info = self.get_version()
query_name = query.name.value
# Store versioned runs under e.g. test_output/benchmark_runs/<hge_version>/
results_root_dir = os.path.abspath(os.path.join(self.work_dir, 'benchmark_runs'))
return os.path.join(results_root_dir, ver_info, query_name)
for query in self.queries:
try:
self.results_root_dir = get_results_root_dir(query)
max_rps = self.max_rps_test(query)
# The tests should definitely not be running very close to or higher than maximum requests per second
rps_steps = [ r for r in self.rps_steps if r < 0.6*max_rps]
print("Benchmarking queries with wrk2 for the following requests/sec", rps_steps)
for rps in rps_steps:
if rps < int(0.6*max_rps):
self.wrk2_test(query, rps)
except Exception:
print(Fore.RED + "Benchmarking Graphql Query '" + query.name.value + "' failed" + Style.RESET_ALL)
raise
def run_tests(self):
with self.graphql_engines_setup():
self.setup_results_schema()
if self.run_benchmarks:
self.run_query_benchmarks()
if not self.skip_plots:
self.plot_results()
class HGEWrkBenchArgs(HGETestSetupArgs):
def __init__(self):
self.set_arg_parse_options()
self.parse_args()
def set_arg_parse_options(self):
HGETestSetupArgs.set_arg_parse_options(self)
self.set_wrk_options()
def parse_args(self):
HGETestSetupArgs.parse_args(self)
self.parse_wrk_options()
def set_wrk_options(self):
def boolean_string(s):
s = s.lower()
if s not in {'false', 'true'}:
raise ValueError('Not a valid boolean string')
return s == 'true'
wrk_opts = self.arg_parser.add_argument_group('wrk')
wrk_opts.add_argument('--queries-file', metavar='HASURA_BENCH_QUERIES_FILE', help='Queries file for benchmarks', default='queries.graphql')
wrk_opts.add_argument('--connections', metavar='HASURA_BENCH_CONNECTIONS', help='Total number of open connections', default=50)
wrk_opts.add_argument('--duration', metavar='HASURA_BENCH_DURATION', help='Duration of tests in seconds', default=300)
wrk_opts.add_argument('--upload-root-uri', metavar='HASURA_BENCH_UPLOAD_ROOT_URI', help='The URI to which the latency results should be uploaded. Curently only s3 is supported', required=False)
wrk_opts.add_argument('--set-scenario-name', metavar='HASURA_BENCH_SCENARIO_NAME', help='Set a name for the test scenario. This will be shown in logs', required=False)
wrk_opts.add_argument('--results-hge-url', metavar='HASURA_BENCH_RESULTS_HGE_URL', help='The GraphQL engine to which the results should be uploaded', required=False)
wrk_opts.add_argument('--results-hge-admin-secret', metavar='HASURA_BENCH_RESULTS_HGE_ADMIN_SECRET', help='Admin secret of the GraphQL engine to which the results should be uploaded', required=False)
wrk_opts.add_argument('--skip-plots', help='Skip plotting', action='store_true', required=False)
wrk_opts.add_argument('--run-benchmarks', metavar='HASURA_BENCH_RUN_BENCHMARKS', help='Whether benchmarks should be run or not', default=True, type=boolean_string)
def get_s3_caller_identity(self):
return boto3.client('sts').get_caller_identity()
def parse_wrk_options(self):
self.connections, self.duration, self.graphql_queries_file, self.res_hge_url, upload_root_uri, self.res_hge_admin_secret, self.run_benchmarks, self.scenario_name = \
self.get_params([
('connections', 'HASURA_BENCH_CONNECTIONS'),
('duration', 'HASURA_BENCH_DURATION'),
('queries_file', 'HASURA_BENCH_QUERIES_FILE'),
('results_hge_url', 'HASURA_BENCH_RESULTS_HGE_URL'),
('upload_root_uri', 'HASURA_BENCH_UPLOAD_ROOT_URI'),
('results_hge_admin_secret', 'HASURA_BENCH_RESULTS_HGE_ADMIN_SECRET'),
('run_benchmarks', 'HASURA_BENCH_RUN_BENCHMARKS'),
('set_scenario_name', 'HASURA_BENCH_SCENARIO_NAME'),
])
self.upload_root_uri = None
if upload_root_uri:
p = urlparse(upload_root_uri)
if p.scheme == 's3':
# Check if aws credentials are set
self.get_s3_caller_identity()
self.upload_root_uri = upload_root_uri
self.skip_plots = self.parsed_args.skip_plots
class HGEWrkBenchWithArgs(HGEWrkBenchArgs, HGEWrkBench):
def __init__(self):
HGEWrkBenchArgs.__init__(self)
HGEWrkBench.__init__(
self,
pg_url = self.pg_url,
remote_pg_url = self.remote_pg_url,
pg_docker_image = self.pg_docker_image,
hge_url = self.hge_url,
remote_hge_url = self.remote_hge_url,
hge_docker_image = self.hge_docker_image,
hge_args = self.hge_args,
skip_stack_build = self.skip_stack_build,
graphql_queries_file = self.graphql_queries_file,
connections = self.connections,
duration = self.duration
)
if __name__ == "__main__":
bench = HGEWrkBenchWithArgs()
bench.run_tests()
|
warp/tests/test_import.py | NVIDIA/warp | 306 | 40816 | <filename>warp/tests/test_import.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# include parent path
import numpy as np
import math
import warp as wp
from warp.tests.test_base import *
import unittest
wp.init()
#from test_func import sqr
import warp.tests.test_func as test_func
@wp.kernel
def test_import_func():
# test a cross-module function reference is resolved correctly
x = test_func.sqr(2.0)
y = test_func.cube(2.0)
wp.expect_eq(x, 4.0)
wp.expect_eq(y, 8.0)
def register(parent):
devices = wp.get_devices()
class TestImport(parent):
pass
add_kernel_test(TestImport, kernel=test_import_func, name="test_import_func", dim=1, devices=devices)
return TestImport
if __name__ == '__main__':
c = register(unittest.TestCase)
#unittest.main(verbosity=2)
wp.force_load()
loader = unittest.defaultTestLoader
testSuite = loader.loadTestsFromTestCase(c)
testSuite.debug() |
genetic_algorithm/mutation.py | mig029/SnakeAI | 181 | 40822 | <filename>genetic_algorithm/mutation.py
# 9.3.2
# 11.2.1
# 12.4.3
import numpy as np
from typing import List, Union, Optional
from .individual import Individual
def gaussian_mutation(chromosome: np.ndarray, prob_mutation: float,
mu: List[float] = None, sigma: List[float] = None,
scale: Optional[float] = None) -> None:
"""
Perform a gaussian mutation for each gene in an individual with probability, prob_mutation.
If mu and sigma are defined then the gaussian distribution will be drawn from that,
otherwise it will be drawn from N(0, 1) for the shape of the individual.
"""
# Determine which genes will be mutated
mutation_array = np.random.random(chromosome.shape) < prob_mutation
# If mu and sigma are defined, create gaussian distribution around each one
if mu and sigma:
gaussian_mutation = np.random.normal(mu, sigma)
# Otherwise center around N(0,1)
else:
gaussian_mutation = np.random.normal(size=chromosome.shape)
if scale:
gaussian_mutation[mutation_array] *= scale
# Update
chromosome[mutation_array] += gaussian_mutation[mutation_array]
def random_uniform_mutation(chromosome: np.ndarray, prob_mutation: float,
low: Union[List[float], float],
high: Union[List[float], float]
) -> None:
"""
Randomly mutate each gene in an individual with probability, prob_mutation.
If a gene is selected for mutation it will be assigned a value with uniform probability
between [low, high).
@Note [low, high) is defined for each gene to help get the full range of possible values
@TODO: Eq 11.4
"""
assert type(low) == type(high), 'low and high must have the same type'
mutation_array = np.random.random(chromosome.shape) < prob_mutation
if isinstance(low, list):
uniform_mutation = np.random.uniform(low, high)
else:
uniform_mutation = np.random.uniform(low, high, size=chromosome.shape)
chromosome[mutation_array] = uniform_mutation[mutation_array]
def uniform_mutation_with_respect_to_best_individual(chromosome: np.ndarray, best_chromosome: np.ndarray, prob_mutation: float) -> None:
"""
Ranomly mutate each gene in an individual with probability, prob_mutation.
If a gene is selected for mutation it will nudged towards the gene from the best individual.
@TODO: Eq 11.6
"""
mutation_array = np.random.random(chromosome.shape) < prob_mutation
uniform_mutation = np.random.uniform(size=chromosome.shape)
chromosome[mutation_array] += uniform_mutation[mutation_array] * (best_chromosome[mutation_array] - chromosome[mutation_array])
def cauchy_mutation(individual: np.ndarray, scale: float) -> np.ndarray:
pass
def exponential_mutation(chromosome: np.ndarray, xi: Union[float, np.ndarray], prob_mutation: float) -> None:
mutation_array = np.random.random(chromosome.shape) < prob_mutation
# Fill xi if necessary
if not isinstance(xi, np.ndarray):
xi_val = xi
xi = np.empty(chromosome.shape)
xi.fill(xi_val)
# Change xi so we get E(0, 1), instead of E(0, xi)
xi_div = 1.0 / xi
xi.fill(1.0)
# Eq 11.17
y = np.random.uniform(size=chromosome.shape)
x = np.empty(chromosome.shape)
x[y <= 0.5] = (1.0 / xi[y <= 0.5]) * np.log(2 * y[y <= 0.5])
x[y > 0.5] = -(1.0 / xi[y > 0.5]) * np.log(2 * (1 - y[y > 0.5]))
# Eq 11.16
delta = np.empty(chromosome.shape)
delta[mutation_array] = (xi[mutation_array] / 2.0) * np.exp(-xi[mutation_array] * np.abs(x[mutation_array]))
# Update delta such that E(0, xi) = (1 / xi) * E(0 , 1)
delta[mutation_array] = xi_div[mutation_array] * delta[mutation_array]
# Update individual
chromosome[mutation_array] += delta[mutation_array]
def mmo_mutation(chromosome: np.ndarray, prob_mutation: float) -> None:
from scipy import stats
mutation_array = np.random.random(chromosome.shape) < prob_mutation
normal = np.random.normal(size=chromosome.shape) # Eq 11.21
cauchy = stats.cauchy.rvs(size=chromosome.shape) # Eq 11.22
# Eq 11.20
delta = np.empty(chromosome.shape)
delta[mutation_array] = normal[mutation_array] + cauchy[mutation_array]
# Update individual
chromosome[mutation_array] += delta[mutation_array] |
src/backend/common/models/tests/team_test.py | guineawheek/ftc-data-take-2 | 266 | 40828 | import pytest
from backend.common.models.team import Team
from backend.common.models.tests.util import (
CITY_STATE_COUNTRY_PARAMETERS,
LOCATION_PARAMETERS,
)
@pytest.mark.parametrize("key", ["frc177", "frc1"])
def test_valid_key_names(key: str) -> None:
assert Team.validate_key_name(key) is True
@pytest.mark.parametrize("key", ["bcr077", "frc 011", "frc711\\"])
def test_invalid_key_names(key: str) -> None:
assert Team.validate_key_name(key) is False
def test_key_name() -> None:
team = Team(id="frc254", team_number=254)
assert team.key_name == "frc254"
@pytest.mark.parametrize(LOCATION_PARAMETERS[0], LOCATION_PARAMETERS[1])
def test_location(
city: str, state: str, country: str, postalcode: str, output: str
) -> None:
team = Team(
city=city,
state_prov=state,
country=country,
postalcode=postalcode,
)
assert team.location == output
@pytest.mark.parametrize(
CITY_STATE_COUNTRY_PARAMETERS[0], CITY_STATE_COUNTRY_PARAMETERS[1]
)
def test_city_state_country(city: str, state: str, country: str, output: str) -> None:
team = Team(
city=city,
state_prov=state,
country=country,
)
assert team.city_state_country == output
def test_details_url() -> None:
team = Team(
id="frc254",
team_number=254,
)
assert team.details_url == "/team/254"
|
pymtl3/datatypes/bitstructs.py | kevinyuan/pymtl3 | 152 | 40842 | """
==========================================================================
bitstruct.py
==========================================================================
APIs to generate a bitstruct type. Using decorators and type annotations
to create bit struct is much inspired by python3 dataclass implementation.
Note that the implementation (such as the _CAPITAL constants to add some
private metadata) in this file is very similar to the **original python3
dataclass implementation**. The syntax of creating bit struct is very
similar to that of python3 dataclass.
https://github.com/python/cpython/blob/master/Lib/dataclasses.py
For example,
@bitstruct
class Point:
x : Bits4
y : Bits4
will automatically generate some methods, such as __init__, __str__,
__repr__, for the Point class.
Similar to the built-in dataclasses module, we also provide a
mk_bitstruct function for user to dynamically generate bit struct types.
For example,
mk_bitstruct( 'Pixel',{
'r' : Bits4,
'g' : Bits4,
'b' : Bits4,
},
name_space = {
'__str__' : lambda self: f'({self.r},{self.g},{self.b})'
}
)
is equivalent to:
@bitstruct
class Pixel:
r : Bits4
g : Bits4
b : Bits4
def __str__( self ):
return f'({self.r},{self.g},{self.b})'
Author : <NAME>, <NAME>
Date : Oct 19, 2019
"""
import functools
import keyword
import operator
import types
import warnings
import py
from pymtl3.extra.pypy import custom_exec
from .bits_import import *
from .helpers import concat
#-------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------
# Object with this attribute is considered as bit struct, as we assume
# only the bitstruct decorator will stamp this attribute to a class. This
# attribute also stores the field information and can be used for
# translation.
#
# The original dataclass use hasattr( cls, _FIELDS ) to check dataclass.
# We do this here as well
_FIELDS = '__bitstruct_fields__'
def is_bitstruct_inst( obj ):
"""Returns True if obj is an instance of a dataclass."""
return hasattr(type(obj), _FIELDS)
def is_bitstruct_class(cls):
"""Returns True if obj is a dataclass ."""
return isinstance(cls, type) and hasattr(cls, _FIELDS)
def get_bitstruct_inst_all_classes( obj ):
# list: put all types together
if isinstance( obj, list ):
return functools.reduce( operator.or_, [ get_bitstruct_inst_all_classes(x) for x in obj ] )
ret = { obj.__class__ }
# BitsN or int
if isinstance( obj, (Bits, int) ):
return ret
# BitStruct
assert is_bitstruct_inst( obj ), f"{obj} is not a valid PyMTL Bitstruct!"
return ret | functools.reduce( operator.or_, [ get_bitstruct_inst_all_classes(getattr(obj, v))
for v in obj.__bitstruct_fields__.keys() ] )
_DEFAULT_SELF_NAME = 's'
_ANTI_CONFLICT_SELF_NAME = '__bitstruct_self__'
#-------------------------------------------------------------------------
# _create_fn
#-------------------------------------------------------------------------
# A helper function that creates a function based on
# - fn_name : name of the function
# - args_lst : a list of arguments in string
# - body_lst : a list of statement of the function body in string
# Also note that this whole _create_fn thing is similar to the original
# dataclass implementation!
def _create_fn( fn_name, args_lst, body_lst, _globals=None ):
# Assemble argument string and body string
args = ', '.join(args_lst)
body = '\n'.join(f' {statement}' for statement in body_lst)
# Assemble the source code and execute it
src = f'def {fn_name}({args}):\n{body}'
if _globals is None: _globals = {}
_locals = {}
custom_exec( py.code.Source(src).compile(), _globals, _locals )
return _locals[fn_name]
#-------------------------------------------------------------------------
# _mk_init_arg
#-------------------------------------------------------------------------
# Creates a init argument string from a field.
#
# Shunning: I revamped the whole thing because they are indeed mutable
# objects.
def _mk_init_arg( name, type_ ):
# default is always None
if isinstance( type_, list ) or is_bitstruct_class( type_ ):
return f'{name} = None'
return f'{name} = 0'
#-------------------------------------------------------------------------
# _mk_init_body
#-------------------------------------------------------------------------
# Creates one line of __init__ body from a field
# to globals.
def _mk_init_body( self_name, name, type_ ):
def _recursive_generate_init( x ):
if isinstance( x, list ):
return f"[{', '.join( [ _recursive_generate_init(x[0]) ] * len(x) )}]"
return f"_type_{name}()"
if isinstance( type_, list ) or is_bitstruct_class( type_ ):
return f'{self_name}.{name} = {name} or {_recursive_generate_init(type_)}'
assert issubclass( type_, Bits )
return f'{self_name}.{name} = _type_{name}({name})'
#-------------------------------------------------------------------------
# _mk_tuple_str
#-------------------------------------------------------------------------
# Creates a tuple of string representations of each field. For example,
# if the self_name is 'self' and fields is [ 'x', 'y' ], it will return
# ('self.x', 'self.y'). This is used for creating the default __eq__ and
# __hash__ function.
def _mk_tuple_str( self_name, fields ):
return f'({",".join([f"{self_name}.{name}" for name in fields])},)'
#-------------------------------------------------------------------------
# _mk_init_fn
#-------------------------------------------------------------------------
# Creates a __init__ function based on fields. For example, if fields
# contains two field x (Bits4) and y (Bits4), _mk_init_fn will return a
# function that looks like the following:
#
# def __init__( s, x = 0, y = 0, z = None, p = None ):
# s.x = _type_x(x)
# s.y = _type_y(y)
# s.z = z or _type_z()
# s.p = p or [ _type_p(), _type_p() ]
#
# NOTE:
# _mk_init_fn also takes as argument the name of self in case there is a
# field with name 's' or 'self'.
#
# TODO: should we provide a __post_init__ function like dataclass does?
def _mk_init_fn( self_name, fields ):
# Register necessary types in _globals
_globals = {}
for name, type_ in fields.items():
if isinstance( type_, list ):
x = type_[0]
while isinstance( x, list ):
x = x[0]
_globals[ f"_type_{name}" ] = x
else:
assert issubclass( type_, Bits ) or is_bitstruct_class( type_ )
_globals[ f"_type_{name}" ] = type_
return _create_fn(
'__init__',
[ self_name ] + [ _mk_init_arg( *field ) for field in fields.items() ],
[ _mk_init_body( self_name, *field ) for field in fields.items() ],
_globals = _globals,
)
#-------------------------------------------------------------------------
# _mk_str_fn
#-------------------------------------------------------------------------
# Creates a __str__ function based on fields. For example, if fields
# contains two field x (Bits4) and y (Bits4), _mk_str_fn will return a
# function that looks like the following:
#
# def __str__( self ):
# return f'{self.x}:{self.y}'
def _mk_str_fn( fields ):
return _create_fn(
'__str__',
[ 'self' ],
[ 'return f"' +
':'.join([ f'{{self.{name}}}' for name in fields ]) + '"']
)
#-------------------------------------------------------------------------
# _mk_repr_fn
#-------------------------------------------------------------------------
# Creates a __repr__ function based on fields. For example, if fields
# contains two field x (Bits4) and y (Bits4), _mk_repr_fn will return a
# function that looks like the following:
#
# def __repr__( self ):
# return self.__class__.__name__ + f'(x={self.x!r}, y={self.y!r})'
def _mk_repr_fn( fields ):
return _create_fn(
'__repr__',
[ 'self' ],
[ 'return self.__class__.__name__ + f"(' +
','.join([ f'{{self.{name}!r}}' for name in fields ]) +
')"']
)
#-------------------------------------------------------------------------
# _mk_eq_fn
#-------------------------------------------------------------------------
# Creates a __eq__ function based on fields. By default it just compares
# each field. For example, if fields contains two field x (Bits4) and y
# (Bits4), _mk_eq_fn will return a function that looks like the
# following:
#
# def __eq__( self, other ):
# if other.__class__ is self.__class__:
# return (self.x,self.y,) == (other.x,other.y,)
# else:
# raise NotImplemented
def _mk_eq_fn( fields ):
self_tuple = _mk_tuple_str( 'self', fields )
other_tuple = _mk_tuple_str( 'other', fields )
return _create_fn(
'__eq__',
[ 'self', 'other' ],
[ f'return (other.__class__ is self.__class__) and {self_tuple} == {other_tuple}' ]
)
#-------------------------------------------------------------------------
# _mk_hash_fn
#-------------------------------------------------------------------------
# Creates a __hash__ function based on fields. By default it just hashes
# all fields. For example, if fields contains two field x (Bits4) and y
# (Bits4), _mk_hash_fn will return a function that looks like the
# following:
#
# def __hash__( self ):
# return hash((self.x,self.y,))
def _mk_hash_fn( fields ):
self_tuple = _mk_tuple_str( 'self', fields )
return _create_fn(
'__hash__',
[ 'self' ],
[ f'return hash({self_tuple})' ]
)
#--------------------------PyMTL3 specific--------------------------------
#-------------------------------------------------------------------------
# _mk_ff_fn
#-------------------------------------------------------------------------
# Creates __ilshift__ and _flip functions that looks like the following:
#
# def __ilshift__( self, other ):
# if self.__class__ is not other.__class__:
# other = self.__class__.from_bits( other.to_bits() )
# self.x <<= other.x
# self.y[0][0] <<= other.y[0][0]
#
# def _flip( self ):
# self.x._flip()
# self.y[i][j]._flip()
def _mk_ff_fn( fields ):
def _gen_list_ilshift_strs( type_, prefix='' ):
if isinstance( type_, list ):
ilshift_strs, flip_strs = [], []
for i in range(len(type_)):
ils, fls = _gen_list_ilshift_strs( type_[0], f"{prefix}[{i}]" )
ilshift_strs.extend( ils )
flip_strs.extend( fls )
return ilshift_strs, flip_strs
else:
return [ f"self.{prefix} <<= other.{prefix}" ], [f"self.{prefix}._flip()"]
ilshift_strs = [ 'if self.__class__ is not other.__class__:',
' other = self.__class__.from_bits( other.to_bits() )']
flip_strs = []
for name, type_ in fields.items():
ils, fls = _gen_list_ilshift_strs( type_, name )
ilshift_strs.extend( ils )
flip_strs.extend( fls )
return _create_fn(
'__ilshift__',
[ 'self', 'other' ],
ilshift_strs + [ "return self" ],
), _create_fn(
'_flip',
[ 'self' ],
flip_strs,
),
#-------------------------------------------------------------------------
# _mk_clone_fn
#-------------------------------------------------------------------------
# Creates clone function that looks like the following:
# Use this clone function in any place that you need to perform a
# deepcopy on a bitstruct.
#
# def clone( self ):
# return self.__class__( self.x.clone(), [ self.y[0].clone(), self.y[1].clone() ] )
def _gen_list_clone_strs( type_, prefix='' ):
if isinstance( type_, list ):
return "[" + ",".join( [ _gen_list_clone_strs( type_[0], f"{prefix}[{i}]" )
for i in range(len(type_)) ] ) + "]"
else:
return f"{prefix}.clone()"
def _mk_clone_fn( fields ):
clone_strs = [ 'return self.__class__(' ]
for name, type_ in fields.items():
clone_strs.append( " " + _gen_list_clone_strs( type_, f'self.{name}' ) + "," )
return _create_fn(
'clone',
[ 'self' ],
clone_strs + [ ')' ],
)
def _mk_deepcopy_fn( fields ):
clone_strs = [ 'return self.__class__(' ]
for name, type_ in fields.items():
clone_strs.append( " " + _gen_list_clone_strs( type_, f'self.{name}' ) + "," )
return _create_fn(
'__deepcopy__',
[ 'self', 'memo' ],
clone_strs + [ ')' ],
)
#-------------------------------------------------------------------------
# _mk_imatmul_fn
#-------------------------------------------------------------------------
# Creates @= function that copies the value over ...
# TODO create individual from_bits for imatmul and ilshift
# def __imatmul__( self, other ):
# if self.__class__ is not other.__class__:
# other = self.__class__.from_bits( other.to_bits() )
# self.x @= other.x
# self.y[0] @= other.y[0]
# self.y[1] @= other.y[1]
def _mk_imatmul_fn( fields ):
def _gen_list_imatmul_strs( type_, prefix='' ):
if isinstance( type_, list ):
ret = []
for i in range(len(type_)):
ret.extend( _gen_list_imatmul_strs( type_[0], f"{prefix}[{i}]" ) )
return ret
else:
return [ f"self.{prefix} @= other.{prefix}" ]
imatmul_strs = [ 'if self.__class__ is not other.__class__:',
' other = self.__class__.from_bits( other.to_bits() )']
for name, type_ in fields.items():
imatmul_strs.extend( _gen_list_imatmul_strs( type_, name ) )
return _create_fn(
'__imatmul__',
[ 'self', 'other' ],
imatmul_strs + [ "return self" ],
)
#-------------------------------------------------------------------------
# _mk_nbits_to_bits_fn
#-------------------------------------------------------------------------
# Creates nbits, to_bits function that copies the value over ...
#
# def to_bits( self ):
# return concat( self.x, self.y[0], self.y[1] )
#
# TODO packing order of array? x[0] is LSB or MSB of a list
# current we do LSB
def _mk_nbits_to_bits_fn( fields ):
def _gen_to_bits_strs( type_, prefix, start_bit ):
if isinstance( type_, list ):
to_strs = []
# The packing order is LSB, so we need to reverse the list to make x[-1] higher bits
for i in reversed(range(len(type_))):
start_bit, tos = _gen_to_bits_strs( type_[0], f"{prefix}[{i}]", start_bit )
to_strs.extend( tos )
return start_bit, to_strs
elif is_bitstruct_class( type_ ):
to_strs = []
for name, typ in getattr(type_, _FIELDS).items():
start_bit, tos = _gen_to_bits_strs( typ, f"{prefix}.{name}", start_bit )
to_strs.extend( tos )
return start_bit, to_strs
else:
end_bit = start_bit + type_.nbits
return end_bit, [ f"self.{prefix}" ]
to_bits_strs = []
total_nbits = 0
for name, type_ in fields.items():
total_nbits, tos = _gen_to_bits_strs( type_, name, total_nbits )
to_bits_strs.extend( tos )
return total_nbits, _create_fn( 'to_bits', [ 'self' ],
[ f"return concat({', '.join(to_bits_strs)})" ],
_globals={'concat':concat} )
#-------------------------------------------------------------------------
# _mk_from_bits_fn
#-------------------------------------------------------------------------
# Creates static method from_bits that creates a new bitstruct based on Bits
# and instance method _from_bits that copies the value over
#
# @staticmethod
# def from_bits( other ):
# return self.__class__( other[16:32], other[0:16] )
def _mk_from_bits_fns( fields, total_nbits ):
def _gen_from_bits_strs( type_, end_bit ):
if isinstance( type_, list ):
from_strs = []
# Since we are doing LSB for x[0], we need to unpack from the last
# element of the list, and then reverse it again to construct a list ...
for i in range(len(type_)):
end_bit, fs = _gen_from_bits_strs( type_[0], end_bit )
from_strs.extend( fs )
return end_bit, [ f"[{','.join(reversed(from_strs))}]" ]
elif is_bitstruct_class( type_ ):
if type_ in type_name_mapping:
type_name = type_name_mapping[ type_ ]
else:
type_name = f"_type{len(type_name_mapping)}"
type_name_mapping[ type_ ] = type_name
from_strs = []
for name, typ in getattr(type_, _FIELDS).items():
end_bit, fs = _gen_from_bits_strs( typ, end_bit )
from_strs.extend( fs )
return end_bit, [ f"{type_name}({','.join(from_strs)})" ]
else:
if type_ not in type_name_mapping:
type_name_mapping[ type_ ] = type_.__name__
else:
assert type_name_mapping[ type_ ] == type_.__name__
start_bit = end_bit - type_.nbits
return start_bit, [ f"other[{start_bit}:{end_bit}]" ]
from_bits_strs = []
end_bit = total_nbits
# This is to make sure we capture two types with the same name but different
# attributes
type_name_mapping = {}
type_count = 0
for _, type_ in fields.items():
end_bit, fs = _gen_from_bits_strs( type_, end_bit )
from_bits_strs.extend( fs )
assert end_bit == 0
_globals = { y: x for x,y in type_name_mapping.items() }
assert len(_globals) == len(type_name_mapping)
# TODO add assertion in bits
return _create_fn( 'from_bits', [ 'cls', 'other' ],
[ "assert cls.nbits == other.nbits, f'LHS bitstruct {cls.nbits}-bit <> RHS other {other.nbits}-bit'",
"other = other.to_bits()",
f"return cls({','.join(from_bits_strs)})" ], _globals )
#-------------------------------------------------------------------------
# _check_valid_array
#-------------------------------------------------------------------------
def _recursive_check_array_types( current ):
x = current[0]
if isinstance( x, list ):
x_len = len(x)
x_type = _recursive_check_array_types( x )
for y in current[1:]:
assert isinstance( y, list ) and len(y) == x_len
y_type = _recursive_check_array_types( y )
assert y_type is x_type
return x_type
assert issubclass( x, Bits ) or is_bitstruct_class( x )
for y in current[1:]:
assert y is x
return x
def _check_valid_array_of_types( arr ):
# Check if the provided list is a strict multidimensional array
try:
return _recursive_check_array_types( arr )
except Exception as e:
print(e)
return None
#-------------------------------------------------------------------------
# _check_field_annotation
#-------------------------------------------------------------------------
def _check_field_annotation( cls, name, type_ ):
# Make sure not default is annotated
if hasattr( cls, name ):
default = getattr( cls, name )
raise TypeError( "We don't allow subfields to have default value:\n"
f"- Field '{name}' of BitStruct {cls.__name__} has default value {default!r}." )
# Special case if the type is an instance of list
if isinstance( type_, list ):
if _check_valid_array_of_types( type_ ) is None:
raise TypeError( "The provided list spec should be a strict multidimensional ARRAY "
"with no varying sizes or types. All non-list elements should be VALID types." )
else:
# Now we work with types
if not isinstance( type_, type ):
raise TypeError(f"{type_} is not a type\n"\
f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}.")
# More specifically, Bits and BitStruct
if not issubclass( type_, Bits ) and not is_bitstruct_class( type_ ):
raise TypeError( "We currently only support BitsN, list, or another BitStruct as BitStruct field:\n"
f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}." )
#-------------------------------------------------------------------------
# _get_self_name
#-------------------------------------------------------------------------
# Return a self name based on fields.
def _get_self_name( fields ):
return( _ANTI_CONFLICT_SELF_NAME if _DEFAULT_SELF_NAME in fields else
_DEFAULT_SELF_NAME )
#-------------------------------------------------------------------------
# _process_cls
#-------------------------------------------------------------------------
# Process the input cls and add methods to it.
_bitstruct_hash_cache = {}
def _process_class( cls, add_init=True, add_str=True, add_repr=True,
add_hash=True ):
# Get annotations of the class
cls_annotations = cls.__dict__.get('__annotations__', {})
if not cls_annotations:
raise AttributeError( "No field is declared in the bit struct definition.\n"
f"Suggestion: check the definition of {cls.__name__} to"
" make sure it only contains 'field_name(string): Type(type).'" )
# Get field information from the annotation and prepare for hashing
fields = {}
hashable_fields = {}
def _convert_list_to_tuple( x ):
if isinstance( x, list ):
return tuple( [ _convert_list_to_tuple( y ) for y in x ] )
return x
reserved_fields = ['to_bits', 'from_bits', 'nbits']
for x in reserved_fields:
assert x not in cls.__dict__, f"Currently a bitstruct cannot have {reserved_fields}, but "\
f"{x} is provided as {cls.__dict__[x]}"
for a_name, a_type in cls_annotations.items():
assert a_name not in reserved_fields, f"Currently a bitstruct cannot have {reserved_fields}, but "\
f"{a_name} is annotated as {a_type}"
_check_field_annotation( cls, a_name, a_type )
fields[ a_name ] = a_type
hashable_fields[ a_name ] = _convert_list_to_tuple( a_type )
cls._hash = _hash = hash( (cls.__name__, *tuple(hashable_fields.items()),
add_init, add_str, add_repr, add_hash) )
if _hash in _bitstruct_hash_cache:
return _bitstruct_hash_cache[ _hash ]
_bitstruct_hash_cache[ _hash ] = cls
# Stamp the special attribute so that translation pass can identify it
# as bit struct.
setattr( cls, _FIELDS, fields )
# Add methods to the class
# Create __init__. Here I follow the dataclass convention that we only
# add our generated __init__ function when add_init is true and user
# did not define their own init.
if add_init:
if not '__init__' in cls.__dict__:
cls.__init__ = _mk_init_fn( _get_self_name(fields), fields )
# Create __str__
if add_str:
if not '__str__' in cls.__dict__:
cls.__str__ = _mk_str_fn( fields )
# Create __repr__
if add_repr:
if not '__repr__' in cls.__dict__:
cls.__repr__ = _mk_repr_fn( fields )
# Create __eq__. There is no need for a __ne__ method as python will
# call __eq__ and negate it.
# NOTE: if user overwrites __eq__ it may lead to different behavior for
# the translated verilog as in the verilog world two bit structs are
# equal only if all the fields are equal. We always try to add __eq__
if not '__eq__' in cls.__dict__:
cls.__eq__ = _mk_eq_fn( fields )
else:
w_msg = ( f'Overwriting {cls.__qualname__}\'s __eq__ may cause the '
'translated verilog behaves differently from PyMTL '
'simulation.')
warnings.warn( w_msg )
# Create __hash__.
if add_hash:
if not '__hash__' in cls.__dict__:
cls.__hash__ = _mk_hash_fn( fields )
# Shunning: add __ilshift__ and _flip for update_ff
assert not '__ilshift__' in cls.__dict__ and not '_flip' in cls.__dict__
cls.__ilshift__, cls._flip = _mk_ff_fn( fields )
# Shunning: add clone
assert not 'clone' in cls.__dict__ and not '__deepcopy__' in cls.__dict__
cls.clone = _mk_clone_fn( fields )
cls.__deepcopy__ = _mk_deepcopy_fn( fields )
# Shunning: add imatmul for assignment, as well as nbits/to_bits/from_bits
assert '__imatmul__' not in cls.__dict__ and 'to_bits' not in cls.__dict__ and \
'nbits' not in cls.__dict__ and 'from_bits' not in cls.__dict__
cls.__imatmul__ = _mk_imatmul_fn( fields )
cls.nbits, cls.to_bits = _mk_nbits_to_bits_fn( fields )
from_bits = _mk_from_bits_fns( fields, cls.nbits )
cls.from_bits = classmethod(from_bits)
assert not 'get_field_type' in cls.__dict__
def get_field_type( cls, name ):
if name in cls.__bitstruct_fields__:
return cls.__bitstruct_fields__[ name ]
raise AttributeError( f"{cls} has no field '{name}'" )
cls.get_field_type = classmethod(get_field_type)
# TODO: maybe add a to_bits and from bits function.
return cls
#-------------------------------------------------------------------------
# bitstruct
#-------------------------------------------------------------------------
# The actual class decorator. We add a * in the argument list so that the
# following argument can only be used as keyword arguments.
def bitstruct( _cls=None, *, add_init=True, add_str=True, add_repr=True, add_hash=True ):
def wrap( cls ):
return _process_class( cls, add_init, add_str, add_repr )
# Called as @bitstruct(...)
if _cls is None:
return wrap
# Called as @bitstruct without parens.
return wrap( _cls )
#-------------------------------------------------------------------------
# mk_bitstruct
#-------------------------------------------------------------------------
# Dynamically generate a bit struct class.
# TODO: should we add base parameters to support inheritence?
def mk_bitstruct( cls_name, fields, *, namespace=None, add_init=True,
add_str=True, add_repr=True, add_hash=True ):
# copy namespace since will mutate it
namespace = {} if namespace is None else namespace.copy()
# We assume fields is a dictionary and thus there won't be duplicate
# field names. So we only check if the field names are indeed strings
# and that they are not keywords.
annos = {}
for name, f in fields.items():
if not isinstance( name, str ) or not name.isidentifier():
raise TypeError( f'Field name {name!r} is not a valid identifier!' )
if keyword.iskeyword( name ):
raise TypeError( f'Field name {name!r} is a keyword!' )
annos[ name ] = f
namespace['__annotations__'] = annos
cls = types.new_class( cls_name, (), {}, lambda ns: ns.update( namespace ) )
return bitstruct( cls, add_init=add_init, add_str=add_str,
add_repr=add_repr, add_hash=add_hash )
|
utils/surface.py | fsanges/glTools | 165 | 40848 | <filename>utils/surface.py<gh_stars>100-1000
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.curve
import glTools.utils.component
import glTools.utils.mathUtils
import glTools.utils.matrix
import glTools.utils.shape
import glTools.utils.stringUtils
import math
def isSurface(surface):
'''
Check if the specified object is a nurbs surface or transform parent of a surface
@param surface: Object to query
@type surface: str
'''
# Check object exists
if not mc.objExists(surface): return False
# Check shape
if mc.objectType(surface) == 'transform': surface = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
if mc.objectType(surface) != 'nurbsSurface': return False
# Return result
return True
def getSurfaceFn(surface):
'''
Create an MFnNurbsSurface class object from the specified nurbs surface
@param surface: Surface to create function class for
@type surface: str
'''
# Checks
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
if mc.objectType(surface) == 'transform':
surface = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
# Get MFnNurbsSurface
selection = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getSelectionListByName(surface,selection)
surfacePath = OpenMaya.MDagPath()
selection.getDagPath(0,surfacePath)
surfaceFn = OpenMaya.MFnNurbsSurface()
surfaceFn.setObject(surfacePath)
# Return result
return surfaceFn
def chordLength(surface,param=0.0,direction='u'):
'''
Return the length of a surface isoparm given a parameter and a direction
@param surface: Surface to query closest point from
@type surface: str
@param param: The parameter on the surface to query length of
@type param: float
@param direction: Direction along the surface to measure length of
@type direction: str
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
# Duplicate surface curve
curve = mc.duplicateCurve(surface+'.'+direction+'['+str(param)+']',ch=0,rn=0,local=0)
# Measure curve length
length = mc.arclen(curve[0])
# Cleanup
mc.delete(curve)
# Return result
return length
def closestPoint(surface,pos=(0,0,0)):
'''
Return closest point on surface to target position
@param surface: Surface to query closest point from
@type surface: str
@param pos: Position to query surface from
@type pos: tuple/list
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
# Get point world position
pos = glTools.utils.base.getPosition(pos)
pt = OpenMaya.MPoint(pos[0],pos[1],pos[2],1.0)
# Get surface function set
surfFn = getSurfaceFn(surface)
# Get uCoord and vCoord pointer objects
uCoord = OpenMaya.MScriptUtil()
uCoord.createFromDouble(0.0)
uCoordPtr = uCoord.asDoublePtr()
vCoord = OpenMaya.MScriptUtil()
vCoord.createFromDouble(0.0)
vCoordPtr = vCoord.asDoublePtr()
# get closest uCoord to edit point position
surfFn.closestPoint(pt,uCoordPtr,vCoordPtr,True,0.0001,OpenMaya.MSpace.kWorld)
return (OpenMaya.MScriptUtil(uCoordPtr).asDouble(),OpenMaya.MScriptUtil(vCoordPtr).asDouble())
def distToSurface(surface,pos=(0,0,0)):
'''
'''
# Get point world position
pos = glTools.utils.base.getPosition(pos)
# Get closest point to surface
uv = closestPoint(surface,pos)
pt = mc.pointOnSurface(surface,u=uv[0],v=uv[1],p=True)
# Get distance to surface point
dist = glTools.utils.mathUtils.distanceBetween(pos,pt)
# Return Result
return dist
def snapPtsToSurface(surface,pointList):
'''
@param surface: Nurbs surface to snap points to
@type surface: str
@param pointList: Point to snap to the specified surface
@type pointList: list
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
# Check points
pointList = mc.ls(pointList,fl=True)
# Transform types
transform = ['transform','joint','ikHandle','effector']
# Snap points
for pt in pointList:
# Check Transform
if transform.count(mc.objectType(pt)):
snapToSurface(surface,pt,0.0,0.0,True,snapPivot=False)
continue
# Move Point
pos = mc.pointPosition(pt)
(uParam,vParam) = closestPoint(surface,pos)
sPt = mc.pointOnSurface(surface,u=uParam,v=vParam,position=True)
mc.move(sPt[0],sPt[1],sPt[2],pt,ws=True,a=True)
def locatorSurface(surface,controlPoints=[],locatorScale=0.075,prefix=''):
'''
Drive the control point positions of a surface with a set of control locators
@param surface: Input surface to connect locators positions to
@type surface: str
@param controlPoints: List of control points to be driven by locators. If left as default (None), all control points will be connected.
@type controlPoints: list
@param locatorScale: Scale of the locators relative to the area of the surface
@type locatorScale: float
@param prefix: Name prefix for newly created nodes
@type prefix: str
'''
# Check surface
if not glTools.utils.surface.isSurface(surface):
raise Exception('Object '+surface+' is not a valid surface!')
if mc.objectType(surface) == 'transform':
surface = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
# Check prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(surface)
# Calculate locator scale
locatorScale *= math.sqrt(glTools.utils.surface.surfaceArea(surface))
# Get Control Points
if not controlPoints:
controlPoints = glTools.utils.component.getComponentIndexList(surface)[surface]
else:
controlPoints = glTools.utils.component.getComponentIndexList(controlPoints)[surface]
# Create locators and connect to control points
locatorList = []
for cv in controlPoints:
# Get index (string)
ind = glTools.utils.component.getSingleIndex(surface,cv)
indStr = glTools.utils.stringUtils.stringIndex(ind,2)
# Create locator
loc = prefix+'_cv'+indStr+'_loc'
loc = mc.spaceLocator(n=loc)[0]
locatorList.append(loc)
mc.setAttr(loc+'.localScale',locatorScale,locatorScale,locatorScale)
# Get control point world position
pos = mc.pointPosition(surface+'.cv['+str(cv[0])+']['+str(cv[1])+']')
mc.setAttr(loc+'.t',pos[0],pos[1],pos[2])
mc.makeIdentity(loc,apply=True,t=1,r=1,s=1,n=0)
# Connect locator position to control point
mc.connectAttr(loc+'.worldPosition[0]',surface+'.controlPoints['+str(ind)+']')
return locatorList
def surfaceArea(surface,worldSpace=True):
'''
Calculates the surface area of a specified nurbs surface.
@param surface: Nurbs surface to calculate the surface area for
@type surface: str
@param worldSpace: Calculate the surface area in world or local space units
@type worldSpace: bool
'''
# Check Surface
if not mc.objExists(surface): raise Exception('Object '+surface+' does not exist!')
if mc.objectType(surface) == 'transform':
surfaceShape = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
if mc.objectType(surfaceShape) != 'nurbsSurface':
raise Exception('Object '+surface+' is not a valid nurbs surface!')
surface = surfaceShape
# Get MFnNurbsSurface
surfaceFn = getSurfaceFn(surface)
# Get surface area
area = 0.0
if worldSpace: area = surfaceFn.area(OpenMaya.MSpace.kWorld)
else: area = surfaceFn.area(OpenMaya.MSpace.kObject)
# Return result
return area
def snapToSurface(surface,obj,uValue=0.0,vValue=0.0,useClosestPoint=False,snapPivot=False):
'''
Snap an object (or transform pivot) to a specified point on a surface.
@param surface: Curve to snap to
@type surface: str
@param obj: Object to move to point on surface
@type obj: str
@param uValue: U Paramater value of the surface to snap to
@type uValue: float
@param vValue: V Paramater value of the surface to snap to
@type vValue: float
@param useClosestPoint: Use the closest point on the surface instead of the specified uv parameter
@type useClosestPoint: bool
@param snapPivot: Move only the objects pivot to the surface point
@type snapPivot: bool
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!!')
# Check uValue/vValue
minu = mc.getAttr(surface+'.minValueU')
maxu = mc.getAttr(surface+'.maxValueU')
minv = mc.getAttr(surface+'.minValueV')
maxv = mc.getAttr(surface+'.maxValueV')
# Closest Point
if useClosestPoint:
pos = mc.xform(obj,q=True,ws=True,rp=True)
(uValue,vValue) = closestPoint(surface,pos)
# Verify surface parameter
if uValue < minu or uValue > maxu: raise Exception('U paramater '+str(uValue)+' is not within the U parameter range for '+surface+'!!')
if vValue < minv or vValue > maxv: raise Exception('V paramater '+str(vValue)+' is not within the V parameter range for '+surface+'!!')
# Get surface point position
pnt = mc.pointPosition(surface+'.uv['+str(uValue)+']['+str(vValue)+']')
# Snap to Curve
piv = mc.xform(obj,q=True,ws=True,rp=True)
if snapPivot: mc.xform(obj,piv=pnt,ws=True)
else: mc.move(pnt[0]-piv[0],pnt[1]-piv[1],pnt[2]-piv[2],obj,r=True,ws=True)
def orientToSurface( surface,
obj,
uValue = 0.0,
vValue = 0.0,
useClosestPoint = False,
tangentUAxis = 'x',
tangentVAxis = 'y',
alignTo = 'u' ):
'''
Orient object to a specified point on a surface.
@param surface: Surface to orient object to
@type surface: str
@param obj: Object to orient
@type obj: str
@param uValue: U Paramater value of the surface to orient to
@type uValue: float
@param vValue: V Paramater value of the surface to orient to
@type vValue: float
@param useClosestPoint: Use the closest point on the surface instead of the specified uv parameter
@type useClosestPoint: bool
@param tangentUAxis: Basis axis that will be derived from the U tangent of the surface point
@type tangentUAxis: str
@param tangentVAxis: Basis axis that will be derived from the V tangent of the surface point
@type tangentVAxis: str
@param upAxis: Basis axis that will be derived from the upVector
@type upAxis: str
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!!')
# Check Obj
transform = ['transform','joint','ikHandle','effector']
if not transform.count(mc.objectType(obj)):
raise Exception('Object '+obj+' is not a valid transform!!')
# Check uValue/vValue
minu = mc.getAttr(surface+'.minValueU')
maxu = mc.getAttr(surface+'.maxValueU')
minv = mc.getAttr(surface+'.minValueV')
maxv = mc.getAttr(surface+'.maxValueV')
# Closest Point
if useClosestPoint:
pos = mc.xform(obj,q=True,ws=True,rp=True)
(uValue,vValue) = closestPoint(surface,pos)
# Verify surface parameter
if uValue < minu or uValue > maxu: raise Exception('U paramater '+str(uValue)+' is not within the U parameter range for '+surface+'!!')
if vValue < minv or vValue > maxv: raise Exception('V paramater '+str(uValue)+' is not within the V parameter range for '+surface+'!!')
# Check object
if not mc.objExists(obj): raise Exception('Object '+obj+' does not exist!!')
rotateOrder = mc.getAttr(obj+'.ro')
# Get tangents at surface point
tanU = mc.pointOnSurface(surface,u=uValue,v=vValue,ntu=True)
tanV = mc.pointOnSurface(surface,u=uValue,v=vValue,ntv=True)
# Build rotation matrix
aimVector = tanU
if alignTo == 'v': aimVector = tanV
upVector = tanV
if alignTo == 'v': upVector = tanU
aimAxis = tangentUAxis
if alignTo == 'v': aimAxis = tangentVAxis
upAxis = tangentVAxis
if alignTo == 'v': upAxis = tangentUAxis
mat = glTools.utils.matrix.buildRotation(aimVector,upVector,aimAxis,upAxis)
rot = glTools.utils.matrix.getRotation(mat,rotateOrder)
# Orient object to surface
mc.rotate(rot[0],rot[1],rot[2],obj,a=True,ws=True)
def rebuild(surface,spansU=0,spansV=0,fullRebuildU=False,fullRebuildV=False,rebuildUfirst=True,replaceOrig=False):
'''
Do brute force surface rebuild for even parameterization
@param surface: Nurbs surface to rebuild
@type surface: str
@param spansU: Number of spans along U. If 0, keep original value.
@type spansU: int
@param spansV: Number of spans along V. If 0, keep original value.
@type spansV: int
@param replaceOrig: Replace original surface, or create new rebuilt surface.
@type replaceOrig: bool
'''
# ==========
# - Checks -
# ==========
# Check surface
if not isSurface(surface):
raise Exception('Object "'+surface+'" is not a valid surface!')
# Check spans
if not spansU: spansU = mc.getAttr(surface+'.spansU')
if not spansV: spansV = mc.getAttr(surface+'.spansV')
# =============
# - Rebuild U -
# =============
# Get V range
if rebuildUfirst:
dir = 'u'
opp = 'v'
spans = spansU
min = mc.getAttr(surface+'.minValueV')
max = mc.getAttr(surface+'.maxValueV')
else:
dir = 'v'
opp = 'u'
spans = spansV
min = mc.getAttr(surface+'.minValueU')
max = mc.getAttr(surface+'.maxValueU')
val = min + (max - min) * 0.5
# Caluculate surface length
iso_crv = mc.duplicateCurve(surface+'.'+opp+'['+str(val)+']',ch=0,rn=0,local=0)[0]
iso_len = mc.arclen(iso_crv)
iso_inc = iso_len / spans
# Get spaced isoparm list
curveFn = glTools.utils.curve.getCurveFn(iso_crv)
iso_list = [surface+'.'+dir+'['+str(curveFn.findParamFromLength(iso_inc*i))+']' for i in range(spans+1)]
mc.delete(iso_crv)
# Check full rebuild
if fullRebuildV:
# Extract isoparm curves
iso_crv_list = [mc.duplicateCurve(iso,ch=False,rn=False,local=False)[0] for iso in iso_list]
# Rebuild isoparm curves
for iso_crv in iso_crv_list:
mc.rebuildCurve(iso_crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=1,s=0,d=3,tol=0)
# Loft final surface
int_surface = mc.loft(iso_crv_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Delete intermediate curves
mc.delete(iso_crv_list)
else:
# Loft intermediate surface
int_surface = mc.loft(iso_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# =============
# - Rebuild V -
# =============
# Get V range (intermediate surface)
if rebuildUfirst:
dir = 'u'
opp = 'v'
spans = spansV
min = mc.getAttr(int_surface+'.minValueU')
max = mc.getAttr(int_surface+'.maxValueU')
else:
dir = 'v'
opp = 'u'
spans = spansU
min = mc.getAttr(int_surface+'.minValueV')
max = mc.getAttr(int_surface+'.maxValueV')
val = min + (max - min) * 0.5
# Caluculate surface length (intermediate surface)
iso_crv = mc.duplicateCurve(int_surface+'.'+opp+'['+str(val)+']',ch=0,rn=0,local=0)[0]
iso_len = mc.arclen(iso_crv)
iso_inc = iso_len / spans
# Get spaced isoparm list
curveFn = glTools.utils.curve.getCurveFn(iso_crv)
iso_list = [int_surface+'.'+dir+'['+str(curveFn.findParamFromLength(iso_inc*i))+']' for i in range(spans+1)]
mc.delete(iso_crv)
# Check full rebuild
if fullRebuildU:
# Extract isoparm curves
iso_crv_list = [mc.duplicateCurve(iso,ch=False,rn=False,local=False)[0] for iso in iso_list]
# Rebuild isoparm curves
for iso_crv in iso_crv_list:
mc.rebuildCurve(iso_crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=1,s=0,d=3,tol=0)
# Loft final surface
rebuild_surface = mc.loft(iso_crv_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Delete intermediate curves
mc.delete(iso_crv_list)
else:
# Loft final surface
rebuild_surface = mc.loft(iso_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Rename rebuilt surface
rebuild_surface = mc.rename(rebuild_surface,surface+'_rebuild')
rebuild_surfaceShape = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
mc.delete(int_surface)
# Re-parameterize 0-1
mc.rebuildSurface(rebuild_surface,ch=False,rpo=True,dir=2,rt=0,end=1,kr=0,kcp=1,kc=1,tol=0,fr=0)
# Initialize return value
outputShape = rebuild_surfaceShape
# ====================
# - Replace Original -
# ====================
if replaceOrig:
"""
# Get original shape
shapes = glTools.utils.shape.getShapes(surface,nonIntermediates=True,intermediates=False)
if not shapes:
# Find Intermediate Shapes
shapes = glTools.utils.shape.listIntermediates(surface)
# Check shapes
if not shapes:
raise Exception('Unable to determine shape for surface "'+surface+'"!')
# Check connections
if mc.listConnections(shapes[0]+'.create',s=True,d=False):
# Find Intermediate Shapes
shapes = glTools.utils.shape.findInputShape(shapes[0])
"""
# Check history
shapes = mc.listRelatives(surface,s=True,ni=True,pa=True)
if not shapes: raise Exception('Unable to determine shape for surface "'+surface+'"!')
shape = shapes[0]
shapeHist = mc.listHistory(shape)
if shapeHist.count(shape): shapeHist.remove(shape)
if shapeHist: print('Surface "" contains construction history, creating new shape!')
# Override shape info and delete intermediate
mc.connectAttr(rebuild_surfaceShape+'.local',shape+'.create',f=True)
outputShape = shape
# =================
# - Return Result -
# =================
return outputShape
def rebuild_old(surface,spansU=0,spansV=0,fullRebuildU=False,fullRebuildV=False,replaceOrig=False):
'''
Do brute force surface rebuild for even parameterization
@param surface: Nurbs surface to rebuild
@type surface: str
@param spansU: Number of spans along U. If 0, keep original value.
@type spansU: int
@param spansV: Number of spans along V. If 0, keep original value.
@type spansV: int
@param replaceOrig: Replace original surface, or create new rebuilt surface.
@type replaceOrig: bool
'''
# Check surface
if not isSurface(surface):
raise Exception('Object "'+surface+'" is not a valid surface!')
# Check spans
if not spansU: spansU = mc.getAttr(surface+'.spansU')
if not spansV: spansV = mc.getAttr(surface+'.spansV')
# -------------
# - Rebuild V -
# Get V range
minu = mc.getAttr(surface+'.minValueU')
maxu = mc.getAttr(surface+'.maxValueU')
u = minu + (maxu - minu) * 0.5
# Extract isoparm curve
iso_crv = mc.duplicateCurve(surface+'.u['+str(u)+']',ch=0,rn=0,local=0)[0]
iso_len = mc.arclen(iso_crv)
iso_inc = iso_len / spansV
curveFn = glTools.utils.curve.getCurveFn(iso_crv)
iso_list = [surface+'.v['+str(curveFn.findParamFromLength(iso_inc*i))+']' for i in range(spansV+1)]
mc.delete(iso_crv)
# Check full rebuild
if fullRebuildU:
# Extract isoparm curves
iso_crv_list = [mc.duplicateCurve(iso,ch=False,rn=False,local=False)[0] for iso in iso_list]
# Rebuild isoparm curves
for iso_crv in iso_crv_list:
mc.rebuildCurve(iso_crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=1,s=0,d=3,tol=0)
# Loft final surface
int_surface = mc.loft(iso_crv_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Delete intermediate curves
mc.delete(iso_crv_list)
else:
# Loft intermediate surface
int_surface = mc.loft(iso_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# -------------
# - Rebuild U -
# Get V range (intermediate surface)
minv = mc.getAttr(int_surface+'.minValueV')
maxv = mc.getAttr(int_surface+'.maxValueV')
v = minv + (maxv - minv) * 0.5
# Extract isoparm curve (intermediate surface)
iso_crv = mc.duplicateCurve(int_surface+'.v['+str(v)+']',ch=0,rn=0,local=0)[0]
iso_len = mc.arclen(iso_crv)
iso_inc = iso_len / spansU
curveFn = glTools.utils.curve.getCurveFn(iso_crv)
iso_list = [int_surface+'.u['+str(curveFn.findParamFromLength(iso_inc*i))+']' for i in range(spansU+1)]
mc.delete(iso_crv)
# Check full rebuild
if fullRebuildV:
# Extract isoparm curves
iso_crv_list = [mc.duplicateCurve(iso,ch=False,rn=False,local=False)[0] for iso in iso_list]
# Rebuild isoparm curves
for iso_crv in iso_crv_list:
mc.rebuildCurve(iso_crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=1,s=0,d=3,tol=0)
# Loft final surface
rebuild_surface = mc.loft(iso_crv_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Delete intermediate curves
mc.delete(iso_crv_list)
else:
# Loft final surface
rebuild_surface = mc.loft(iso_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
rebuild_surface = mc.rename(rebuild_surface,surface+'_rebuild')
rebuild_surfaceShape = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
mc.delete(int_surface)
# Initialize return value
outputShape = rebuild_surfaceShape
# --------------------
# - Replace Original -
if replaceOrig:
"""
# Get original shape
shapes = glTools.utils.shape.getShapes(surface,nonIntermediates=True,intermediates=False)
if not shapes:
# Find Intermediate Shapes
shapes = glTools.utils.shape.listIntermediates(surface)
# Check shapes
if not shapes:
raise Exception('Unable to determine shape for surface "'+surface+'"!')
# Check connections
if mc.listConnections(shapes[0]+'.create',s=True,d=False):
# Find Intermediate Shapes
shapes = glTools.utils.shape.findInputShape(shapes[0])
"""
# Check history
shapes = mc.listRelatives(surface,s=True,ni=True,pa=True)
if not shapes: raise Exception('Unable to determine shape for surface "'+surface+'"!')
shape = shapes[0]
shapeHist = mc.listHistory(shape)
if shapeHist.count(shape): shapeHist.remove(shape)
if shapeHist: print('Surface "" contains construction history, creating new shape!')
# Override shape info and delete intermediate
mc.connectAttr(rebuild_surfaceShape+'.local',shape+'.create',f=True)
outputShape = shape
# Return result
return outputShape
def intersect(surface,source,direction):
'''
Return the intersection point on a specified nurbs surface given a source point and direction
@param surface: Nurbs surface to perform intersection on
@type surface: str
@param source: Source point for the intersection ray
@type source: list or tuple or str
@param direction: Direction of the intersection ray intersection
@type direction: list or tuple
'''
# Get surfaceFn
surfaceFn = getSurfaceFn(surface)
# Get source point
source = glTools.utils.base.getMPoint(source)
# Get direction vector
direction = OpenMaya.MVector(direction[0],direction[1],direction[2])
# Calculate intersection
hitPt = OpenMaya.MPoint()
hit = surfaceFn.intersect(source,direction,None,None,hitPt,0.0001,OpenMaya.MSpace.kWorld,False,None,None)
if not hit:
print 'No intersection found!'
hitPt = OpenMaya.MPoint.origin
# Return intersection hit point
return [hitPt[0],hitPt[1],hitPt[2]]
def projectToSurface(surface,targetSurface,direction='u',keepOriginal=False,prefix=''):
'''
Project the edit points of the specified nurbs surface to another nurbs or polygon object
@param surface: Surface to project
@type surface: str
@param targetSurface: Surface to project onto
@type targetSurface: str
@param direction: Surface direction to extract isoparm curves from
@type direction: str
@param keepOriginal: Create new surface or replace original
@type keepOriginal: bool
@param prefix: Name prefix for all created nodes
@type prefix: str
'''
# Check surface
if not mc.objExists(surface):
raise Exception('Surface "'+surface+'" does not exist!!')
if not isSurface(surface):
raise Exception('Object "'+surface+'" is not a valid nurbs surface!!')
# Check target surface
if not mc.objExists(targetSurface):
raise Exception('Target surface "'+targetSurface+'" does not exist!!')
# Check prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(surface)
# Check direction
direction = direction.upper()
if (direction != 'U') and (direction != 'V'):
raise Exception('Invalid surface direction specified! Must specify either "u" or "v"!!')
# Get surface information
spans = mc.getAttr(surface+'.spans'+direction)
minVal = mc.getAttr(surface+'.minValue'+direction)
maxVal = mc.getAttr(surface+'.maxValue'+direction)
# Create main surface group
mainGrp = mc.createNode('transform',n=prefix+'_grp')
# Extract curves
curveList = []
curveGrpList = []
curveLocList = []
geomConstraintList = []
spanInc = (maxVal - minVal)/spans
for i in range(spans+1):
# Curve prefix
strInd = glTools.utils.stringUtils.stringIndex(i,2)
crvPrefix = prefix+'_crv'+strInd
# Create curve group
curveGrp = crvPrefix+'_grp'
curveGrp = mc.createNode('transform',n=curveGrp)
curveGrp = mc.parent(curveGrp,mainGrp)[0]
curveGrpList.append(curveGrp)
# Get surface curve
srfCurveName = crvPrefix+'_crv'
srfCurve = mc.duplicateCurve(surface+'.'+direction.lower()+'['+str(i*spanInc)+']',ch=0,rn=0,local=0,n=srfCurveName)
srfCurve = mc.parent(srfCurve[0],curveGrp)[0]
curveList.append(srfCurve)
# Generate curve locators
curveLocatorList = glTools.utils.curve.locatorEpCurve(srfCurve,locatorScale=0.05,prefix=crvPrefix)
curveLocatorList = mc.parent(curveLocatorList,curveGrp)
curveLocList.append(curveLocatorList)
# Create geometry constraints
for loc in curveLocatorList:
geomConstraint = crvPrefix+'_geometryConstraint'
geomConstraint = mc.geometryConstraint(targetSurface,loc,n=geomConstraint)
geomConstraintList.append(geomConstraint[0])
# Center group pivot
mc.xform(curveGrp,cp=True)
# Delete original surface
surfaceName = prefix+'_surface'
if not keepOriginal:
surfaceName = surface
mc.delete(surface)
# Loft new surface
surfaceLoft = mc.loft(curveList,ch=1,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)
surface = mc.rename(surfaceLoft[0],surface)
surface = mc.parent(surface,mainGrp)[0]
mc.reorder(surface,f=True)
loft = mc.rename(surfaceLoft[1],prefix+'_loft')
# Return result
return[surface,loft,curveList,curveGrpList,curveLocList,geomConstraintList]
def rebuildFromExistingIsoparms(surface,direction='u',degree=3,close=False,keepHistory=False):
'''
Build a new nurbs surface from an existing surfaces isoparms
@param surface: Surface to build from
@type surface: str
@param direction: Surface direction to build from
@type direction: str
@param degree: Degree to build new surface to
@type degree: int
@param close: Close lofted surface
@type close: bool
@param keepHistory: Keep loft surface history
@type keepHistory: bool
'''
# Check surface
if not mc.objExists(surface):
raise Exception('Surface "'+surface+'" does not exist!!')
if not isSurface(surface):
raise Exception('Object "'+surface+'" is not a valid nurbs surface!!')
# Check direction
direction = direction.lower()
if not direction == 'u' and not direction == 'v':
raise Exception('Invalid surface direction! Accepted values are "u" and "v"!')
# Get surface details
surfFn = getSurfaceFn(surface)
spans = mc.getAttr(surface+'.spans'+direction.upper())
degree = mc.getAttr(surface+'.degree'+direction.upper())
form = mc.getAttr(surface+'.form'+direction.upper())
knots = OpenMaya.MDoubleArray()
if direction == 'u': surfFn.getKnotsInU(knots)
if direction == 'v': surfFn.getKnotsInV(knots)
# Build iso list for surface rebuild
if degree > 1: knots = knots[(degree-1):-(degree-1)]
isoList = [surface+'.'+direction+'['+str(i)+']' for i in knots]
if not close and form:
#isoList.append(isoList[0])
isoList[-1] = isoList[-1] - 0.0001
# Loft new rebuild surface
rebuild = mc.loft(isoList,ch=keepHistory,u=True,c=close,ar=False,d=degree,ss=1,rn=False,po=False,rsn=(direction=='v'))
rebuild = mc.rename(rebuild[0],surface+'_rebuild')
# Return result
return rebuild
def rebuildFromIsoparms(surface,spansU=0,spansV=0,degree=3,keepHistory=False):
'''
Build a new nurbs surface from an existing surfaces isoparms
@param surface: Surface to build from
@type surface: str
@param direction: Surface direction to build from
@type direction: str
@param degree: Degree to build new surface to
@type degree: int
@param keepHistory: Keep loft surface history
@type keepHistory: bool
'''
# Check Surface
if not mc.objExists(surface):
raise Exception('Surface "'+surface+'" does not exist!!')
if not isSurface(surface):
raise Exception('Object "'+surface+'" is not a valid nurbs surface!!')
# Initialize function pointers
uMinPtr = OpenMaya.MScriptUtil().asDoublePtr()
uMaxPtr = OpenMaya.MScriptUtil().asDoublePtr()
vMinPtr = OpenMaya.MScriptUtil().asDoublePtr()
vMaxPtr = OpenMaya.MScriptUtil().asDoublePtr()
# Get surface details
surfFn = getSurfaceFn(surface)
surfFn.getKnotDomain(uMinPtr,uMaxPtr,vMinPtr,vMaxPtr)
uMin = OpenMaya.MScriptUtil(uMinPtr).asDouble()
uMax = OpenMaya.MScriptUtil(uMaxPtr).asDouble()
vMin = OpenMaya.MScriptUtil(vMinPtr).asDouble()
vMax = OpenMaya.MScriptUtil(vMaxPtr).asDouble()
uDif = uMax - uMin
vDif = vMax - vMin
# Get surface form
closeU = bool(mc.getAttr(surface+'.formU'))
closeV = bool(mc.getAttr(surface+'.formV'))
# Check spans
if not spansU: spansU = surfFn.numKnotsInU()
if not spansV: spansV = surfFn.numKnotsInV()
# Get new knot values
uList = []
vList = []
uInc = uDif/(spansU-int(not closeU))
vInc = vDif/(spansV-int(not closeV))
for u in range(spansU): uList.append(uMin+(uInc*u))
for v in range(spansV): vList.append(vMin+(vInc*v))
# Rebuild in U
uLoft = mc.loft([surface+'.u['+str(i)+']' for i in uList],close=closeU,degree=degree)
uSurface = uLoft[0]
# Rebuld in V
vLoft = mc.loft([uSurface+'.v['+str(i)+']' for i in vList],close=closeV,degree=degree)
rebuildSurface = vLoft[0]
# Return result
return rebuildSurface
|
menpo3d/base.py | apapaion/menpo3d | 134 | 40854 | import os
from pathlib import Path
def menpo3d_src_dir_path():
r"""The path to the top of the menpo3d Python package.
Useful for locating where the data folder is stored.
Returns
-------
path : str
The full path to the top of the Menpo3d package
"""
return Path(os.path.abspath(__file__)).parent
|
language/orqa/ops/orqa_ops_test.py | Xtuden-com/language | 1,199 | 40863 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ORQA ops."""
from language.orqa import ops as orqa_ops
import tensorflow.compat.v1 as tf
class OrqaOpsTest(tf.test.TestCase):
def test_reader_inputs(self):
concat_inputs = orqa_ops.reader_inputs(
question_token_ids=[0, 1],
block_token_ids=[[2, 3, 4], [5, 6, 0]],
block_lengths=[3, 2],
block_token_map=[[1, 2, 5], [1, 3, 4]],
answer_token_ids=[[3, 4], [7, 0]],
answer_lengths=[2, 1],
cls_token_id=10,
sep_token_id=11,
max_sequence_len=10)
self.assertAllEqual(
concat_inputs.token_ids.numpy(),
[[10, 0, 1, 11, 2, 3, 4, 11, 0, 0], [10, 0, 1, 11, 5, 6, 11, 0, 0, 0]])
self.assertAllEqual(
concat_inputs.mask.numpy(),
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]])
self.assertAllEqual(
concat_inputs.segment_ids.numpy(),
[[0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]])
self.assertAllEqual(
concat_inputs.block_mask.numpy(),
[[0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0]])
self.assertAllEqual(concat_inputs.token_map.numpy(),
[[-1, -1, -1, -1, 1, 2, 5, -1, -1, -1],
[-1, -1, -1, -1, 1, 3, -1, -1, -1, -1]])
self.assertAllEqual(concat_inputs.gold_starts.numpy(), [[5], [-1]])
self.assertAllEqual(concat_inputs.gold_ends.numpy(), [[6], [-1]])
def test_has_answer(self):
result = orqa_ops.has_answer(blocks=["abcdefg", "hijklmn"], answers=["hij"])
self.assertAllEqual(result.numpy(), [False, True])
if __name__ == "__main__":
tf.test.main()
|
Firmware/adctest.py | sanielfishawy/ODrive | 1,068 | 40867 | <reponame>sanielfishawy/ODrive<gh_stars>1000+
import numpy as np
import matplotlib.pyplot as plt
adchist = [(0, 137477),
(1, 98524),
(2, 71744),
(3, 60967),
(4, 44372),
(5, 46348),
(6, 19944),
(7, 10092),
(8, 13713),
(9, 11182),
(10, 6903),
(11, 4072),
(12, 2642),
(13, 968),
(14, 296),
(15, 166),
(16, 17),
(17, 2),
(-1, 39662),
(-2, 43502),
(-3, 57596),
(-4, 33915),
(-5, 25611),
(-6, 10880),
(-7, 8237),
(-8, 3518),
(-9, 4789),
(-10, 4689),
(-11, 6345),
(-12, 3901),
(-13, 5781),
(-14, 4803),
(-15, 6428),
(-16, 3563),
(-17, 4478),
(-18, 976),
(-19, 491)]
adchist.sort()
adchist = np.array(adchist)
plt.figure()
plt.bar(adchist[:,0], adchist[:,1])
plt.show() |
api/tests/integration/tests/todo/load_utf8.py | epam/Indigo | 204 | 40954 | <reponame>epam/Indigo
# coding=utf-8
import sys
sys.path.append('../../common')
from env_indigo import *
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
print("****** Load molfile with UTF-8 characters in Data S-group ********")
m = indigo.loadMoleculeFromFile(joinPathPy("molecules/sgroups_utf8.mol", __file__))
indigo.setOption("molfile-saving-mode", "2000")
res = m.molfile()
m = indigo.loadMolecule(res)
# TODO: Fails on IronPython 2.7.9:
# - M SED 1 single-value-бензол
# + M SED 1 single-value-������
if isIronPython():
from System.Text import Encoding
from System import Console
print(m.molfile())
print(res)
print(m.cml())
# reload(sys)
# sys.setdefaultencoding('utf-8')
# sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Console.WriteLine(m.molfile().encode("utf-8-sig"))
# print(Encoding.UTF8.GetString(Encoding.Default.GetBytes(m.molfile().encode("utf-8-sig"))))
# Console.Write(Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(m.molfile().encode("utf-8"))))
# Console.Write(Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(res.encode("utf-8"))))
# Console.Write(Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(m.cml().encode("utf-8"))))
# m.saveMolfile("test.mol")
# with codecs.open(joinPathPy("test.mol", __file__), "r", "utf-8-sig") as temp:
# print(temp.read()[510:])
# with codecs.open('test', 'w', "utf-8") as f:
# f.write(m.molfile())
# Console.WriteLine(m.molfile())
# f.write(repr(Encoding.UTF8.GetString(Encoding.Default.GetBytes(m.molfile()))))
# f.write(temp.read())
# f.write(Encoding.UTF8.GetString(Encoding.Default.GetBytes(m.molfile().encode('utf-8'))))
# Console.Write(str(temp.read()).encode('utf-8'))
else:
if sys.version_info[0] < 3:
print(m.molfile().encode("utf-8"))
print(res.encode("utf-8"))
print(m.cml().encode("utf-8"))
else:
print(m.molfile())
print(res)
print(m.cml())
|
Sorting/sort list strings with numbers.py | DazEB2/SimplePyScripts | 117 | 40967 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Генерация списка
items = ['KMS1.kmch.pos.out_dE_%s.mx' % i for i in range(20)]
# Перемешивание элементов списка
import random
random.shuffle(items)
print(items)
# Обычная сортировка не работает
print(sorted(items))
print()
def get_number_1(x):
return int(x.split('.')[-2].split('_')[-1])
def get_number_2(x):
import re
match = re.search('KMS1.kmch.pos.out_dE_(\d+).mx', x)
return int(match.group(1))
print(sorted(items, key=get_number_1))
print(sorted(items, key=get_number_2))
|
SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_cff.py | ckamtsikis/cmssw | 852 | 40977 | <reponame>ckamtsikis/cmssw<filename>SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_cff.py
import FWCore.ParameterSet.Config as cms
# Define EcalSelectiveReadoutProducer module as "simEcalDigis" with default settings
from SimCalorimetry.EcalSelectiveReadoutProducers.ecalDigis_cfi import *
|
pythran/tests/cython/setup_tax.py | davidbrochart/pythran | 1,647 | 40978 | from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "tax",
ext_modules = cythonize('tax.pyx'),
script_name = 'setup.py',
script_args = ['build_ext', '--inplace']
)
import tax
import numpy as np
print(tax.tax(np.ones(10)))
|
src/pretalx/api/permissions.py | lili668668/pretalx | 418 | 40984 | from rest_framework.permissions import SAFE_METHODS, BasePermission
class ApiPermission(BasePermission):
def _has_permission(self, view, obj, request):
event = getattr(request, "event", None)
if not event: # Only true for root API view
return True
if request.method in SAFE_METHODS:
read_permission = getattr(view, "read_permission_required", None)
if read_permission:
return request.user.has_perm(read_permission, obj)
return True
write_permission = getattr(view, "write_permission_required", None)
if write_permission:
return request.user.has_perm(write_permission, obj)
return False
def has_permission(self, request, view):
return self._has_permission(view, getattr(request, "event", None), request)
def has_object_permission(self, request, view, obj):
return self._has_permission(view, obj, request)
|
odinw/download.py | microsoft/GLIP | 295 | 41002 | import argparse
import os
argparser = argparse.ArgumentParser()
argparser.add_argument("--dataset_names", default="all", type=str) # "all" or names joined by comma
argparser.add_argument("--dataset_path", default="DATASET/odinw", type=str)
args = argparser.parse_args()
root = "https://vlpdatasets.blob.core.windows.net/odinw/odinw/odinw_35"
all_datasets = ["AerialMaritimeDrone", "AmericanSignLanguageLetters", "Aquarium", "BCCD", "ChessPieces", "CottontailRabbits", "DroneControl", "EgoHands", "HardHatWorkers", "MaskWearing", "MountainDewCommercial", "NorthAmericaMushrooms", "OxfordPets", "PKLot", "Packages", "PascalVOC", "Raccoon", "ShellfishOpenImages", "ThermalCheetah", "UnoCards", "VehiclesOpenImages", "WildfireSmoke", "boggleBoards", "brackishUnderwater", "dice", "openPoetryVision", "pistols", "plantdoc", "pothole", "selfdrivingCar", "thermalDogsAndPeople", "vector", "websiteScreenshots"]
datasets_to_download = []
if args.dataset_names == "all":
datasets_to_download = all_datasets
else:
datasets_to_download = args.dataset_names.split(",")
for dataset in datasets_to_download:
if dataset in all_datasets:
print("Downloading dataset: ", dataset)
os.system("wget " + root + "/" + dataset + ".zip" + " -O " + args.dataset_path + "/" + dataset + ".zip")
os.system("unzip " + args.dataset_path + "/" + dataset + ".zip -d " + args.dataset_path)
os.system("rm " + args.dataset_path + "/" + dataset + ".zip")
else:
print("Dataset not found: ", dataset)
|
tests/test_experiment.py | movermeyer/pyexperiment | 220 | 41026 | <reponame>movermeyer/pyexperiment
"""Tests the experiment module of pyexperiment
Written by <NAME>
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import argparse
import io
import mock
import tempfile
import logging
import multiprocessing
from pyexperiment import experiment
from pyexperiment.utils.stdout_redirector import stdout_redirector
from pyexperiment import state
from pyexperiment import conf
from pyexperiment import Logger
from pyexperiment import log
class TestExperimentBasic(unittest.TestCase):
"""Test the experiment module's basic functions
"""
def test_main_runs_function(self):
"""Test running main calls function
"""
run = [False]
def custom_function():
"""User function
"""
run[0] = True
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "custom_function"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertTrue(run[0])
self.assertEqual(len(buf.getvalue()), 0)
def test_main_prints_result(self):
"""Test running main prints the result of a function
"""
def custom_function():
"""User function
"""
return "Foo"
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "custom_function"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r'Foo')
def test_main_shows_commands(self):
"""Test running main shows commands
"""
def default_function():
"""Default function
"""
pass
def custom_function1():
"""User function
"""
pass
def custom_function2():
"""User function
"""
pass
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_commands"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(default=default_function,
commands=[custom_function1, custom_function2])
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r"default_function")
self.assertRegexpMatches(buf.getvalue(), r"custom_function1")
self.assertRegexpMatches(buf.getvalue(), r"custom_function2")
def test_main_not_enough_arguments(self):
"""Test running main without command
"""
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main()
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r"[Nn]ot enough arguments")
def test_main_runs_default(self):
"""Test running main with default command
"""
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test"]
run = [False]
def custom_function():
"""User function
"""
run[0] = True
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(default=custom_function)
self.assertEqual(len(buf.getvalue()), 0)
self.assertTrue(run[0])
def test_main_complains_default(self):
"""Test running main with default command taking an argument
"""
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test"]
def custom_function(_argument):
"""User function that takes an argument
"""
pass
self.assertRaises(
TypeError,
experiment.main,
default=custom_function)
def test_main_runs_other_function(self):
"""Test running main with default command and other function
"""
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test",
"custom_function2"]
run = [False, False]
def custom_function():
"""User function
"""
run[0] = True
def custom_function2():
"""User function2
"""
run[1] = True
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(default=custom_function,
commands=[custom_function2])
self.assertEqual(len(buf.getvalue()), 0)
self.assertFalse(run[0])
self.assertTrue(run[1])
def test_main_does_not_run_function(self):
"""Test running main does not call unnecessary function but complains
"""
run = [False]
def custom_function():
"""User function
"""
run[0] = True
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "help"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertFalse(run[0])
self.assertNotEqual(len(buf.getvalue()), 0)
def test_main_gives_help(self):
"""Test running help shows docstring
"""
run = [False]
def custom_function():
"""This should be printed!!
"""
run[0] = True
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "help", "custom_function"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertFalse(run[0])
self.assertIn("This should be printed!!", buf.getvalue())
def test_main_complains_on_help(self):
"""Test running help complains on help for wrong command
"""
def custom_function():
"""Foo function
"""
pass
# Monkey patch arg parser here
argparse._sys.argv = [ # pylint: disable=W0212
"test", "help", "foo"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[custom_function])
self.assertRegexpMatches(buf.getvalue(), r"[cC]ommand")
self.assertRegexpMatches(buf.getvalue(), r"not")
self.assertRegexpMatches(buf.getvalue(), r"foo")
def test_main_runs_test(self):
"""Test running main calls tests when needed
"""
class ExampleTest(unittest.TestCase):
"""Test case for the test
"""
pass
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "test"]
with mock.patch.object(unittest, 'TextTestRunner') as mock_method:
experiment.main(commands=[], tests=[ExampleTest])
self.assertEqual(mock_method.call_count, 1)
def test_main_shows_test(self):
"""Test running main shows tests when needed
"""
class ExampleTest(unittest.TestCase):
"""Test case for the test
"""
pass
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_tests"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(tests=[ExampleTest])
self.assertRegexpMatches(buf.getvalue(), r"ExampleTest")
def test_main_shows_no_test(self):
"""Test running main complains if there are no tests
"""
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_tests"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(tests=[])
self.assertRegexpMatches(buf.getvalue(), r"No tests available")
def test_main_doesnt_test_on_help(self):
"""Test running main does not call tests when not needed
"""
class ExampleTest(unittest.TestCase):
"""Test case for the test
"""
pass
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-h"]
buf = io.StringIO()
with stdout_redirector(buf):
with mock.patch.object(unittest, 'TextTestRunner') as mock_method:
try:
experiment.main(commands=[], tests=[ExampleTest])
self.assertEqual(mock_method.call_count, 0)
except SystemExit:
pass
@mock.patch('pyexperiment.experiment.embed_interactive')
def test_main_runs_interactive(self, mock_interactive):
"""Test running main runs interactive session
"""
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--interactive"]
experiment.main(commands=[], tests=[])
self.assertTrue(mock_interactive.call_count == 1)
def test_main_shows_empty_state(self):
"""Test running main shows empty state
"""
with tempfile.NamedTemporaryFile() as temp:
state['bla'] = 12
del state['bla']
state.save(temp.name)
spec = ('[pyexperiment]\n'
'state_filename = string(default=%s)' % temp.name)
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_state"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(config_spec=spec)
self.assertRegexpMatches(buf.getvalue(), r"[Ss]tate empty")
def test_main_shows_default_state(self):
"""Test running main shows the default state
"""
with tempfile.NamedTemporaryFile() as temp:
state['bla'] = 12
state.save(temp.name)
spec = ('[pyexperiment]\n'
'state_filename = string(default=%s)' % temp.name)
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_state"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(config_spec=spec)
self.assertRegexpMatches(buf.getvalue(), r"bla")
self.assertRegexpMatches(buf.getvalue(), r"12")
def test_main_shows_other_state(self):
"""Test running main shows state from file
"""
with tempfile.NamedTemporaryFile() as temp:
state['foo'] = 42
state.save(temp.name)
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_state", temp.name]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main()
self.assertRegexpMatches(buf.getvalue(), r"foo")
self.assertRegexpMatches(buf.getvalue(), r"42")
def test_main_shows_config(self):
"""Test running main shows the configuration
"""
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "show_config"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main()
self.assertRegexpMatches(buf.getvalue(), r"\[pyexperiment\]")
self.assertRegexpMatches(buf.getvalue(), r"n_processes")
def test_main_saves_config(self):
"""Test running main saves the configuration
"""
with tempfile.NamedTemporaryFile() as temp:
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "save_config", temp.name]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main()
lines = open(temp.name).readlines()
self.assertNotEqual(len(lines), 0)
self.assertRegexpMatches("".join(lines), r"\[pyexperiment\]")
self.assertRegexpMatches("".join(lines), r"n_processes")
self.assertRegexpMatches(buf.getvalue(), r'Wrote configuration')
class TestExperimentOverrides(unittest.TestCase):
"""Test the experiment module's option overriding
"""
def test_main_overrides_option(self):
"""Test running main called with -o works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
self.assertEqual(conf['bla'], 'foo')
conf['bla'] = 'bla'
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-o", "bla", "foo", "foo_fun"]
self.assertFalse(called[0])
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['bla'], 'foo')
def test_main_no_processes_default(self):
"""Test running main called without -j works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "foo_fun"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.n_processes'],
multiprocessing.cpu_count())
def test_main_no_processes_simple(self):
"""Test running main called with -j works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-j", "42", "foo_fun"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.n_processes'],
42)
def test_main_no_processes_long(self):
"""Test running main called with --processes works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--processes", "44", "foo_fun"]
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.n_processes'],
44)
class TestExperimentLogging(unittest.TestCase):
"""Test the experiment's logging context
"""
def setUp(self):
"""Set up the test
"""
self.log_stream = io.StringIO()
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(self.log_stream)
log.reset_instance()
conf.reset_instance()
def test_main_logs_console(self):
"""Test running main logs as expected
"""
argparse._sys.argv = [ # pylint: disable=W0212
"test"]
def hello():
"""Logs a message
"""
log.fatal("Hello")
experiment.main(default=hello)
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
self.assertRegexpMatches(self.log_stream.getvalue(), r'Hello')
def test_main_prints_timings(self):
"""Test running main logs timings as expected
"""
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-o", "pyexperiment.print_timings", "True"]
def hello():
"""Logs a message
"""
with log.timed("bla"):
pass
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(default=hello)
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r'bla')
def test_main_prints_timings_simple(self):
"""Test running main logs timings as expected with --print_timings
"""
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--print-timings"]
def hello():
"""Logs a message
"""
with log.timed("bla"):
pass
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(default=hello)
self.assertNotEqual(len(buf.getvalue()), 0)
self.assertRegexpMatches(buf.getvalue(), r'bla')
def test_main_logs_file(self):
"""Test running main logs as expected
"""
conf['pyexperiment.rotate_n_logs'] = 0
argparse._sys.argv = [ # pylint: disable=W0212
"test"]
def hello():
"""Logs a message
"""
log.debug("Hello")
with tempfile.NamedTemporaryFile() as temp:
conf['pyexperiment.log_filename'] = temp.name
conf['pyexperiment.log_to_file'] = True
experiment.main(default=hello)
lines = open(temp.name).readlines()
self.assertNotEqual(len(lines), 0)
self.assertRegexpMatches("".join(lines), r'Hello')
self.assertEqual(len(self.log_stream.getvalue()), 0)
def test_main_verbosity_debug(self):
"""Test running main called with -v works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "-v", "foo_fun"]
self.assertFalse(called[0])
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.verbosity'], 'DEBUG')
def test_main_overrides_verbosity(self):
"""Test running main called with --verbosity works as expected
"""
called = [False]
def foo_fun():
"""Foo function
"""
called[0] = True
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--verbosity", "DEBUG", "foo_fun"]
self.assertFalse(called[0])
buf = io.StringIO()
with stdout_redirector(buf):
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.verbosity'], 'DEBUG')
called[0] = False
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "--verbosity", "WARNING", "foo_fun"]
self.assertFalse(called[0])
experiment.main(commands=[foo_fun])
self.assertTrue(called[0])
self.assertEqual(conf['pyexperiment.verbosity'], 'WARNING')
def test_logger_after_exception(self):
"""Test logger closing correctly after exception
"""
# Monkey patch log closing
close_old = Logger.Logger.close
called = [False]
def close(self):
"""Close the logger and record it"""
close_old(self)
called[0] = True
Logger.Logger.close = close
def foo_fun():
"""Foo function
"""
raise RuntimeError
# Monkey patch arg parser
argparse._sys.argv = [ # pylint: disable=W0212
"test", "foo_fun"]
try:
experiment.main(commands=[foo_fun])
except RuntimeError:
pass
else:
raise AssertionError("RuntimeError not raised")
# Make sure logger is closed
self.assertTrue(called[0])
Logger.Logger.close = close_old
if __name__ == '__main__':
unittest.main()
|
utils/general.py | bfortuner/VOCdetect | 336 | 41055 | <reponame>bfortuner/VOCdetect
import uuid
def gen_unique_id(prefix='', length=5):
return prefix + str(uuid.uuid4()).upper().replace('-','')[:length]
def get_class_name(obj):
invalid_class_names = ['function']
classname = obj.__class__.__name__
if classname is None or classname in invalid_class_names:
classname = obj.__name__
return classname
def dict_to_html(dd, level=0):
"""
Convert dict to html using basic html tags
"""
import simplejson
text = ''
for k, v in dd.items():
text += '<br>' + ' '*(4*level) + '<b>%s</b>: %s' % (k, dict_to_html(v, level+1) if isinstance(v, dict) else (simplejson.dumps(v) if isinstance(v, list) else v))
return text
def dict_to_html_ul(dd, level=0):
"""
Convert dict to html using ul/li tags
"""
import simplejson
text = '<ul>'
for k, v in dd.items():
text += '<li><b>%s</b>: %s</li>' % (k, dict_to_html_ul(v, level+1) if isinstance(v, dict) else (simplejson.dumps(v) if isinstance(v, list) else v))
text += '</ul>'
return text
|
setup.py | KevinMusgrave/pytorch-adapt | 131 | 41092 | import sys
import setuptools
sys.path.insert(0, "src")
import pytorch_adapt
with open("README.md", "r") as fh:
long_description = fh.read()
extras_require_ignite = ["pytorch-ignite == 0.5.0.dev20220221"]
extras_require_lightning = ["pytorch-lightning"]
extras_require_record_keeper = ["record-keeper >= 0.9.31"]
extras_require_timm = ["timm"]
extras_require_docs = [
"mkdocs-material",
"mkdocstrings[python]",
"griffe",
"mkdocs-gen-files",
"mkdocs-section-index",
"mkdocs-literate-nav",
]
extras_require_dev = ["black", "isort", "nbqa", "flake8"]
setuptools.setup(
name="pytorch-adapt",
version=pytorch_adapt.__version__,
author="<NAME>",
description="Domain adaptation made easy. Fully featured, modular, and customizable.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/KevinMusgrave/pytorch-adapt",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.0",
install_requires=[
"numpy",
"torch",
"torchvision",
"torchmetrics",
"pytorch-metric-learning >= 1.3.1.dev0",
],
extras_require={
"ignite": extras_require_ignite,
"lightning": extras_require_lightning,
"record-keeper": extras_require_record_keeper,
"timm": extras_require_timm,
"docs": extras_require_docs,
"dev": extras_require_dev,
},
)
|
bh_modules/erlangcase.py | jfcherng-sublime/ST-BracketHighlighter | 1,047 | 41093 | <gh_stars>1000+
"""
BracketHighlighter.
Copyright (c) 2013 - 2016 <NAME> <<EMAIL>>
License: MIT
"""
from BracketHighlighter.bh_plugin import import_module
lowercase = import_module("bh_modules.lowercase")
def validate(*args):
"""Check if bracket is lowercase."""
return lowercase.validate(*args)
|
codigo/Live171/exemplo_04.py | BrunoPontesLira/live-de-python | 572 | 41118 | <gh_stars>100-1000
d = {'a': 1, 'c': 3}
match d:
case {'a': chave_a, 'b': _}:
print(f'chave A {chave_a=} + chave B')
case {'a': _} | {'c': _}:
print('chave A ou C')
case {}:
print('vazio')
case _:
print('Não sei')
|
sionna/channel/apply_time_channel.py | NVlabs/sionna | 163 | 41129 | <filename>sionna/channel/apply_time_channel.py
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Layer for applying channel responses to channel inputs in the time domain"""
import tensorflow as tf
import numpy as np
import scipy
from sionna.utils import insert_dims
from .awgn import AWGN
class ApplyTimeChannel(tf.keras.layers.Layer):
# pylint: disable=line-too-long
r"""ApplyTimeChannel(num_time_samples, l_tot, add_awgn=True, dtype=tf.complex64, **kwargs)
Apply time domain channel responses ``h_time`` to channel inputs ``x``,
by filtering the channel inputs with time-variant channel responses.
This class inherits from the Keras `Layer` class and can be used as layer
in a Keras model.
For each batch example, ``num_time_samples`` + ``l_tot`` - 1 time steps of a
channel realization are required to filter the channel inputs.
The channel output consists of ``num_time_samples`` + ``l_tot`` - 1
time samples, as it is the result of filtering the channel input of length
``num_time_samples`` with the time-variant channel filter of length
``l_tot``. In the case of a single-input single-output link and given a sequence of channel
inputs :math:`x_0,\cdots,x_{N_B}`, where :math:`N_B` is ``num_time_samples``, this
layer outputs
.. math::
y_b = \sum_{\ell = 0}^{L_{\text{tot}}} x_{b-\ell} \bar{h}_{b,\ell} + w_b
where :math:`L_{\text{tot}}` corresponds ``l_tot``, :math:`w_b` to the additive noise, and
:math:`\bar{h}_{b,\ell}` to the :math:`\ell^{th}` tap of the :math:`b^{th}` channel sample.
This layer outputs :math:`y_b` for :math:`b` ranging from 0 to
:math:`N_B + L_{\text{tot}} - 1`, and :math:`x_{b}` is set to 0 for :math:`b \geq N_B`.
For multiple-input multiple-output (MIMO) links, the channel output is computed for each antenna
of each receiver and by summing over all the antennas of all transmitters.
Parameters
----------
num_time_samples : int
Number of time samples forming the channel input (:math:`N_B`)
l_tot : int
Length of the channel filter (:math:`L_{\text{tot}} = L_{\text{max}} - L_{\text{min}} + 1`)
add_awgn : bool
If set to `False`, no white Gaussian noise is added.
Defaults to `True`.
dtype : tf.DType
Complex datatype to use for internal processing and output.
Defaults to `tf.complex64`.
Input
-----
(x, h_time, no) or (x, h_time):
Tuple:
x : [batch size, num_tx, num_tx_ant, num_time_samples], tf.complex
Channel inputs
h_time : [batch size, num_rx, num_rx_ant, num_tx, num_tx_ant, num_time_samples + l_tot - 1, l_tot], tf.complex
Channel responses.
For each batch example, ``num_time_samples`` + ``l_tot`` - 1 time steps of a
channel realization are required to filter the channel inputs.
no : Scalar or Tensor, tf.float
Scalar or tensor whose shape can be broadcast to the shape of the channel outputs: [batch size, num_rx, num_rx_ant, num_time_samples + l_tot - 1].
Only required if ``add_awgn`` is set to `True`.
The noise power ``no`` is per complex dimension. If ``no`` is a
scalar, noise of the same variance will be added to the outputs.
If ``no`` is a tensor, it must have a shape that can be broadcast to
the shape of the channel outputs. This allows, e.g., adding noise of
different variance to each example in a batch. If ``no`` has a lower
rank than the channel outputs, then ``no`` will be broadcast to the
shape of the channel outputs by adding dummy dimensions after the
last axis.
Output
-------
y : [batch size, num_rx, num_rx_ant, num_time_samples + l_tot - 1], tf.complex
Channel outputs.
The channel output consists of ``num_time_samples`` + ``l_tot`` - 1
time samples, as it is the result of filtering the channel input of length
``num_time_samples`` with the time-variant channel filter of length
``l_tot``.
"""
def __init__(self, num_time_samples, l_tot, add_awgn=True,
dtype=tf.complex64, **kwargs):
super().__init__(trainable=False, dtype=dtype, **kwargs)
self._add_awgn = add_awgn
# The channel transfert function is implemented by first gathering from
# the vector of transmitted baseband symbols
# x = [x_0,...,x_{num_time_samples-1}]^T the symbols that are then
# multiplied by the channel tap coefficients.
# We build here the matrix of indices G, with size
# `num_time_samples + l_tot - 1` x `l_tot` that is used to perform this
# gathering.
# For example, if there are 4 channel taps
# h = [h_0, h_1, h_2, h_3]^T
# and `num_time_samples` = 10 time steps then G would be
# [[0, 10, 10, 10]
# [1, 0, 10, 10]
# [2, 1, 0, 10]
# [3, 2, 1, 0]
# [4, 3, 2, 1]
# [5, 4, 3, 2]
# [6, 5, 4, 3]
# [7, 6, 5, 4]
# [8, 7, 6, 5]
# [9, 8, 7, 6]
# [10, 9, 8, 7]
# [10,10, 9, 8]
# [10,10, 10, 9]
# Note that G is a Toeplitz matrix.
# In this example, the index `num_time_samples`=10 corresponds to the
# zero symbol. The vector of transmitted symbols is padded with one
# zero at the end.
first_colum = np.concatenate([ np.arange(0, num_time_samples),
np.full([l_tot-1], num_time_samples)])
first_row = np.concatenate([[0], np.full([l_tot-1], num_time_samples)])
self._g = scipy.linalg.toeplitz(first_colum, first_row)
def build(self, input_shape): #pylint: disable=unused-argument
if self._add_awgn:
self._awgn = AWGN(dtype=self.dtype)
def call(self, inputs):
if self._add_awgn:
x, h_time, no = inputs
else:
x, h_time = inputs
# Preparing the channel input for broadcasting and matrix multiplication
x = tf.pad(x, [[0,0], [0,0], [0,0], [0,1]])
x = insert_dims(x, 2, axis=1)
x = tf.gather(x, self._g, axis=-1)
# Apply the channel response
y = tf.reduce_sum(h_time*x, axis=-1)
y = tf.reduce_sum(tf.reduce_sum(y, axis=4), axis=3)
# Add AWGN if requested
if self._add_awgn:
y = self._awgn((y, no))
return y
|
fbssdc/ast.py | Eijebong/binjs-ref | 391 | 41143 | <reponame>Eijebong/binjs-ref<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import doctest
import json
import os
import subprocess
import idl
BINJS_SIGNATURE = b'BINJS\x02'
def load_ast(filename):
with open(filename) as inp:
return json.loads(inp.read())
def load_test_ast(filename):
return load_ast(os.path.join(os.path.dirname(__file__), 'test-data', filename))
# Map ASTs to something which includes all of the data.
# Values:
# - tagged object (with "type")
# - int
# - float
# - string
# - bool
# - None
# - list
# TODO: Add a "specials" for objects added by AST mutilations, like LazyIOUs.
class AstVisitor(object):
def __init__(self, types):
self.types = types
def visit(self, ty, value):
'''Visits value with declared type ty.
Declared types may be much broader than the actual type of value.
The encoder must narrow this uncertainty for the decoder to make
coordinated decisions.
'''
if type(value) in [bool, float, int, str, type(None)]:
self.visit_primitive(ty, value)
elif type(value) is list:
# FIXME: When es6 IDL uses ... or FrozenArray<...>, unpack and
# narrow the type to this value.
assert isinstance(ty, idl.TyFrozenArray), str(ty)
self.visit_list(ty, value)
elif type(value) is dict:
actual_ty = self.types.interfaces[value['type']]
self.visit_struct(ty, actual_ty, value)
else:
assert False, f'unreachable: {type(value)}'
def visit_list(self, ty, xs):
for i, x in enumerate(xs):
self.visit_list_item(ty.element_ty, i, x)
def visit_list_item(self, ty, i, x):
self.visit(ty, x)
def visit_struct(self, declared_ty, actual_ty, obj):
for i, attr in enumerate(actual_ty.attributes()):
self.visit_field(actual_ty, obj, i, attr)
def visit_field(self, struct_ty, obj, i, attr):
self.visit(attr.resolved_ty, obj[attr.name])
def visit_primitive(self, ty, value):
pass
# This type is not used but it is useful as a unit test for AstVisitor.
class AstStringIndexer(AstVisitor):
'''
>>> types = idl.parse_es6_idl()
>>> tree = load_test_ast('y5R7cnYctJv.js.dump')
>>> visitor = AstStringIndexer(types)
>>> visitor.visit(types.interfaces['Script'], tree)
>>> len(visitor.strings)
1330
>>> visitor.strings[10:14]
['IdentifierExpression', 'CavalryLogger', 'start_js', 'ArrayExpression']
'''
def __init__(self, types):
super().__init__(types)
self.strings = list()
def visit_primitive(self, ty, value):
super().visit_primitive(ty, value)
if type(value) is str:
self.strings.append(value)
# When a lazy node is deported, this breadcrumb is dropped in its
# place. References to lazy functions are deserialized in order so
# there is no need to add indexes to them.
# TODO: Make this a singleton.
class LazyIOU(object):
pass
if __name__ == '__main__':
doctest.testmod()
|
CountingGridsPy/tests/time_models/time_gpuvscpu.py | microsoft/browsecloud | 159 | 41149 | <filename>CountingGridsPy/tests/time_models/time_gpuvscpu.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
import numpy as np
import torch
import os
import cProfile
from CountingGridsPy.models import CountingGridModel, CountingGridModelWithGPU
class TimeGPUvsCPU(object):
def __init__(self):
SEED = "03071994"
np.random.seed(int(SEED))
M, N = [5000, 1000]
extentSize = 40
self.data = np.round(np.random.random((M, N)) * 10)
self.extent = np.array([extentSize, extentSize])
self.window = np.array([5, 5])
self.pi_init = np.ones([extentSize] * 2 + [N]) / N
self.cpuModel = CountingGridModel(self.extent, self.window)
self.gpuModel = CountingGridModelWithGPU(self.extent, self.window)
def run_nolayers(self):
numIters = 50
device = torch.device("cuda:0")
outfileForGPU = "gpuProfile.txt"
gpuJob = '''self.gpuModel.fit(
self.data,
max_iter=numIters,
pi=torch.tensor(self.pi_init, device=device, dtype=torch.double),
layers=1
)
'''
cProfile.runctx(gpuJob, globals(), locals(), outfileForGPU)
outfileForCPU = "cpuProfile.txt"
cpuJob = '''self.cpuModel.fit(
self.data,
max_iter=numIters,
returnSumSquareDifferencesOfPi=False,
pi=np.copy(self.pi_init),
layers=1
)
'''
cProfile.runctx(cpuJob, globals(), locals(), outfileForCPU)
def run_withlayers(self):
numIters = 50
device = torch.device("cuda:0")
outfileForGPU = "gpu2LayersProfile.txt"
gpuJob = '''self.gpuModel.fit(
self.data,
max_iter=numIters,
pi=torch.tensor(self.pi_init, device=device, dtype=torch.double),
layers=2,
writeOutput=False
)
'''
cProfile.runctx(gpuJob, globals(), locals(), outfileForGPU)
outfileForCPU = "cpu2LayersProfile.txt"
cpuJob = '''self.cpuModel.fit(
self.data,
max_iter=numIters,
returnSumSquareDifferencesOfPi=False,
pi=np.copy(self.pi_init),
layers=2,
writeOutput=False
)
'''
cProfile.runctx(cpuJob, globals(), locals(), outfileForCPU)
if __name__ == "__main__":
o = TimeGPUvsCPU()
o.run_withlayers()
|
external/lemonade/dist/examples/calc/calc.py | almartin82/bayeslite | 964 | 41185 | <reponame>almartin82/bayeslite<filename>external/lemonade/dist/examples/calc/calc.py
import sys
def generateGrammar():
from lemonade.main import generate
from os.path import join, dirname
from StringIO import StringIO
inputFile = join(dirname(__file__), "gram.y")
outputStream = StringIO()
generate(inputFile, outputStream)
return outputStream.getvalue()
# generate and import our grammar
exec generateGrammar() in globals()
#
# the lexer
#
tokenType = {
'+': PLUS,
'-': MINUS,
'/': DIVIDE,
'*': TIMES,
}
def tokenize(input):
import re
tokenText = re.split("([+-/*])|\s*", input)
for text in tokenText:
if text is None:
continue
type = tokenType.get(text)
if type is None:
type = NUM
value = float(text)
else:
value = None
yield (type, value)
return
#
# the delegate
#
class Delegate(object):
def accept(self):
return
def parse_failed(self):
assert False, "Giving up. Parser is hopelessly lost..."
def syntax_error(self, token):
print >>sys.stderr, "Syntax error!"
return
#
# reduce actions
#
def sub(self, a, b): return a - b
def add(self, a, b): return a + b
def mul(self, a, b): return a * b
def div(self, a, b): return a / b
def num(self, value): return value
def print_result(self, result):
print result
return
p = Parser(Delegate())
#p.trace(sys.stdout, "# ")
if len(sys.argv) == 2:
p.parse(tokenize(sys.argv[1]))
else:
print >>sys.stderr, "usage: %s EXPRESSION" % sys.argv[0]
|
examples/pybullet/gym/pybullet_envs/minitaur/agents/scripts/configs.py | felipeek/bullet3 | 9,136 | 41203 | <reponame>felipeek/bullet3
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example configurations using the PPO algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-variable
from pybullet_envs.minitaur.agents import ppo
from pybullet_envs.minitaur.agents.scripts import networks
def default():
"""Default configuration for PPO."""
# General
algorithm = ppo.PPOAlgorithm
num_agents = 10
eval_episodes = 25
use_gpu = False
# Network
network = networks.ForwardGaussianPolicy
weight_summaries = dict(all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_mean_factor = 0.05
init_logstd = -1
# Optimization
update_every = 25
policy_optimizer = 'AdamOptimizer'
value_optimizer = 'AdamOptimizer'
update_epochs_policy = 50
update_epochs_value = 50
policy_lr = 1e-4
value_lr = 3e-4
# Losses
discount = 0.985
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 1e6 # 1M
return locals()
def cheetah():
"""Configuration for MuJoCo's half cheetah task."""
locals().update(default())
# Environment
env = 'HalfCheetah-v1'
max_length = 1000
steps = 1e7 # 10M
return locals()
def walker():
"""Configuration for MuJoCo's walker task."""
locals().update(default())
# Environment
env = 'Walker2d-v1'
max_length = 1000
steps = 1e7 # 10M
return locals()
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v1'
max_length = 1000
steps = 1e7 # 10M
return locals()
def hopper():
"""Configuration for MuJoCo's hopper task."""
locals().update(default())
# Environment
env = 'Hopper-v1'
max_length = 1000
steps = 2e7 # 20M
return locals()
def ant():
"""Configuration for MuJoCo's ant task."""
locals().update(default())
# Environment
env = 'Ant-v1'
max_length = 1000
steps = 5e7 # 50M
return locals()
def humanoid():
"""Configuration for MuJoCo's humanoid task."""
locals().update(default())
# Environment
env = 'Humanoid-v1'
max_length = 1000
steps = 5e7 # 50M
return locals()
|
test/test_util.py | jeffw-github/autoprotocol-python | 113 | 41233 | <reponame>jeffw-github/autoprotocol-python
import json
class TestUtils:
@staticmethod
def read_json_file(file_path: str):
file = open("./test/data/{0}".format(file_path))
data = json.load(file)
return json.dumps(data, indent=2, sort_keys=True)
|
code/get_ordinals.py | lebronlambert/Information_Extraction_DeepRL | 251 | 41244 | <gh_stars>100-1000
import pickle
import inflect
p = inflect.engine()
words = set(['first','second','third','fourth','fifth','sixth','seventh','eighth','ninth','tenth','eleventh','twelfth','thirteenth','fourteenth','fifteenth',
'sixteenth','seventeenth','eighteenth','nineteenth','twentieth','twenty-first','twenty-second','twenty-third','twenty-fourth','twenty-fifth'])
pickle.dump(words, open("../data/constants/word_ordinals.p", "wb")) |
plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py | andkononykhin/plenum | 148 | 41257 | <reponame>andkononykhin/plenum
from plenum.common.util import hexToFriendly, randomString
from stp_core.common.log import getlogger
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.pool_transactions.helper import sdk_send_update_node, sdk_pool_refresh, \
sdk_add_new_steward_and_node
from plenum.test.test_node import TestNode, checkNodesConnected
from stp_core.network.port_dispenser import genHa
from plenum.common.config_helper import PNodeConfigHelper
logger = getlogger()
def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet,
tdir, tconf,
sdk_pool_handle,
sdk_wallet_client,
sdk_wallet_steward):
new_steward_wallet, new_node = \
sdk_add_new_steward_and_node(looper,
sdk_pool_handle,
sdk_wallet_steward,
'AnotherSteward' + randomString(4),
'AnotherNode' + randomString(4),
tdir,
tconf)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
sdk_pool_refresh(looper, sdk_pool_handle)
node_new_ha, client_new_ha = genHa(2)
logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha,
client_new_ha))
# Making the change HA txn an confirming its succeeded
node_dest = hexToFriendly(new_node.nodestack.verhex)
sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
node_dest, new_node.name,
node_new_ha.host, node_new_ha.port,
client_new_ha.host, client_new_ha.port)
# Stopping existing nodes
for node in txnPoolNodeSet:
node.stop()
looper.removeProdable(node)
# Starting nodes again by creating `Node` objects since that simulates
# what happens when starting the node with script
restartedNodes = []
for node in txnPoolNodeSet[:-1]:
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf, ha=node.nodestack.ha,
cliha=node.clientstack.ha)
looper.add(restartedNode)
restartedNodes.append(restartedNode)
# Starting the node whose HA was changed
config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir)
node = TestNode(new_node.name,
config_helper=config_helper,
config=tconf,
ha=node_new_ha, cliha=client_new_ha)
looper.add(node)
restartedNodes.append(node)
looper.run(checkNodesConnected(restartedNodes))
waitNodeDataEquality(looper, node, *restartedNodes[:-1])
sdk_pool_refresh(looper, sdk_pool_handle)
sdk_ensure_pool_functional(looper, restartedNodes, sdk_wallet_client, sdk_pool_handle)
|
setup.py | tgsmith61591/smite | 113 | 41261 | # -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
#
# Setup the SMRT module
from __future__ import print_function, absolute_import, division
from distutils.command.clean import clean
# from setuptools import setup # DO NOT use setuptools!!!!!!
import shutil
import os
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# Hacky, adopted from sklearn. This sets a global variable
# so smrt __init__ can detect if it's being loaded in the setup
# routine, so it won't load submodules that haven't yet been built.
builtins.__SMRT_SETUP__ = True
# metadata
DISTNAME = 'smrt'
DESCRIPTION = 'Handle class imbalance intelligently by using Variational Autoencoders ' \
'to generate synthetic observations of your minority class.'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
LICENSE = 'new BSD'
# import restricted version
import smrt
VERSION = smrt.__version__
# get the installation requirements:
with open('requirements.txt') as req:
REQUIREMENTS = req.read().split(os.linesep)
# Custom clean command to remove build artifacts -- adopted from sklearn
class CleanCommand(clean):
description = "Remove build artifacts from the source tree"
# this is mostly in case we ever add a Cython module to SMRT
def run(self):
clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c & .so files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk(DISTNAME):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
print('Removing file: %s' % filename)
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
# this is for FORTRAN modules, which some of my other packages have used in the past...
for dirname in dirnames:
if dirname == '__pycache__' or dirname.endswith('.so.dSYM'):
print('Removing directory: %s' % dirname)
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
def configuration(parent_package='', top_path=None):
# we know numpy is a valid import now
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage(DISTNAME)
return config
def do_setup():
# setup the config
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Scikit-learn users',
'Programming Language :: Python',
'Topic :: Machine Learning',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7'
],
keywords='sklearn scikit-learn tensorflow auto-encoders neural-networks class-imbalance',
# packages=[DISTNAME],
# install_requires=REQUIREMENTS,
cmdclass=cmdclass)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg-info',
'--version',
'clean'))):
# For these actions, NumPy is not required
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else: # we DO need numpy
try:
from numpy.distutils.core import setup
except ImportError:
raise RuntimeError('Need numpy to build %s' % DISTNAME)
# add the config to the metadata
metadata['configuration'] = configuration
# call setup on the dict
setup(**metadata)
if __name__ == '__main__':
do_setup()
|
applications/CableNetApplication/python_scripts/edge_cable_element_process.py | lkusch/Kratos | 778 | 41268 | <reponame>lkusch/Kratos<gh_stars>100-1000
import KratosMultiphysics as KratosMultiphysics
import KratosMultiphysics.CableNetApplication as CableNetApplication
from KratosMultiphysics import Logger
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return EdgeCableElementProcess(Model, settings["Parameters"])
class custom_node:
def __init__(self,start_distance,kratos_node):
self.start_distance = start_distance
self.kratos_node = kratos_node
def return_distance_to_line_start(self):
return self.start_distance
def return_node_distance_to_line_start(node):
return node.return_distance_to_line_start()
class EdgeCableElementProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings ):
KratosMultiphysics.Process.__init__(self)
default_settings = KratosMultiphysics.Parameters("""
{
"edge_sub_model_part_name" : "Structure.example_part",
"element_type" : "cable",
"node_id_order" : [1,2,3],
"element_id" : 1,
"property_id" : 1
}
""")
default_settings.ValidateAndAssignDefaults(settings)
self.edge_model_part = Model[settings["edge_sub_model_part_name"].GetString()]
node_list = settings["node_id_order"].GetVector()
if len(node_list)==0:
node_list = self.CreateCorrectNodeOrder()
settings["node_id_order"].SetVector(node_list)
self.edge_cable_element_process = CableNetApplication.EdgeCableElementProcess(self.edge_model_part, settings)
def ExecuteInitialize(self):
self.edge_cable_element_process.ExecuteInitialize()
Logger.PrintInfo("Initialized","EdgeCableElementProcess")
def CreateCorrectNodeOrder(self):
## find start/end nodes and calculate total distance
max_distance,end_points = 0, []
for node_i in self.edge_model_part.Nodes:
for node_j in self.edge_model_part.Nodes:
distance_i = (node_i.X0 - node_j.X0)*(node_i.X0 - node_j.X0)
distance_i += (node_i.Y0 - node_j.Y0)*(node_i.Y0 - node_j.Y0)
distance_i += (node_i.Z0 - node_j.Z0)*(node_i.Z0 - node_j.Z0)
distance_i = distance_i**0.5
if distance_i>max_distance:
max_distance=distance_i
end_points = [node_i,node_j]
## create sorted node_list
custom_node_list = []
for node_i in self.edge_model_part.Nodes:
distance_i = (node_i.X0 - end_points[0].X0)*(node_i.X0 - end_points[0].X0)
distance_i += (node_i.Y0 - end_points[0].Y0)*(node_i.Y0 - end_points[0].Y0)
distance_i += (node_i.Z0 - end_points[0].Z0)*(node_i.Z0 - end_points[0].Z0)
distance_i = distance_i**0.5
custom_node_i = custom_node(distance_i,node_i)
custom_node_list.append(custom_node_i)
sorted_node_list = sorted(custom_node_list, key=return_node_distance_to_line_start)
return [node.kratos_node.Id for node in sorted_node_list] |
tf_verify/spatial/t_2_norm_transformer.py | Neelanjana314/eran | 254 | 41271 | <reponame>Neelanjana314/eran<filename>tf_verify/spatial/t_2_norm_transformer.py
"""
Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from itertools import product
import numpy as np
import torch
from mpmath import polyroots
from spatial.t_inf_norm_transformer import TInfNormTransformer
from spatial.t_norm_transformer import TNormTransformer
from spatial.interpolation import interpolate
class T2NormTransformer(TNormTransformer):
def add_norm_constraints(self, model, vx, vy):
model.addConstr(vx + vy <= math.sqrt(2) * self.delta)
model.addConstr(vx - vy <= math.sqrt(2) * self.delta)
model.addConstr(-vx + vy <= math.sqrt(2) * self.delta)
model.addConstr(-vx - vy <= math.sqrt(2) * self.delta)
def compute_candidates(self):
delta_sqr = self.delta ** 2
radius = math.ceil(self.delta)
for row, col in product(range(-radius, radius), repeat=2):
lb_row, ub_row = row, row + 1
lb_col, ub_col = col, col + 1
interpolation_region = [[lb_col, ub_col], [lb_row, ub_row]]
distances_row = sorted((abs(lb_row), abs(ub_row)))
distances_col = sorted((abs(lb_col), abs(ub_col)))
# no overlap with adversarial region
if distances_row[0] ** 2 + distances_col[0] ** 2 >= delta_sqr:
continue
flows = list()
flows_by_channel = list()
# full overlap with interpolation region
if distances_row[1] ** 2 + distances_col[1] ** 2 <= delta_sqr:
flows = [
torch.tensor([lb_col, lb_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device),
torch.tensor([ub_col, lb_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device),
torch.tensor([lb_col, ub_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device),
torch.tensor([ub_col, ub_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device)
]
else:
if lb_col ** 2 + lb_row ** 2 <= delta_sqr:
flows.append(
torch.tensor([lb_col, lb_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device)
)
if ub_col ** 2 + lb_row ** 2 <= delta_sqr:
flows.append(
torch.tensor([ub_col, lb_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device)
)
if lb_col ** 2 + ub_row ** 2 <= delta_sqr:
flows.append(
torch.tensor([lb_col, ub_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device)
)
if ub_col ** 2 + ub_row ** 2 <= delta_sqr:
flows.append(
torch.tensor([ub_col, ub_row]).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device)
)
box_row = sorted((lb_row, ub_row), key=abs)
box_col = sorted((lb_col, ub_col), key=abs)
candidates = list()
row_sign = -1 if row < 0 else 1
col_sign = -1 if col < 0 else 1
if box_col[0] ** 2 <= delta_sqr:
candidates.append([
box_col[0],
row_sign * math.sqrt(delta_sqr - box_col[0] ** 2)
])
if box_col[1] ** 2 <= delta_sqr:
candidates.append([
box_col[1],
row_sign * math.sqrt(delta_sqr - box_col[1] ** 2)
])
if box_row[0] ** 2 <= delta_sqr:
candidates.append([
col_sign * math.sqrt(delta_sqr - box_row[0] ** 2),
box_row[0]
])
if box_row[1] ** 2 <= delta_sqr:
candidates.append([
col_sign * math.sqrt(delta_sqr - box_row[1] ** 2),
box_row[1]
])
endpoints = [
candidate for candidate in candidates if self.in_box(
candidate, lb_col, ub_col, lb_row, ub_row
)
]
for endpoint in endpoints:
flows.append(
torch.tensor(endpoint).repeat(
self.batch_size, self.height, self.width, 1
).float().to(self.device)
)
flows_by_channel = self.compute_extremum_on_arc(
col=col, row=row, endpoints=endpoints,
interpolation_region=interpolation_region
)
for flow in flows:
candidate = interpolate(self.images, flow)
for channel in range(self.channels):
self.candidates[channel].append(candidate[:, channel])
self.candidate_flows[channel].append(flow)
for channel, flows in enumerate(flows_by_channel):
for flow in flows:
self.candidates[channel].append(
interpolate(self.images, flow)[:, channel]
)
self.candidate_flows[channel].append(flow)
def in_box(self, point, lb_x, ub_x, lb_y, ub_y):
return (lb_x <= point[0] <= ub_x) and (lb_y <= point[1] <= ub_y)
def compute_extremum_on_arc(self, col, row, endpoints,
interpolation_region):
(lb_col, ub_col), (lb_row, ub_row) = interpolation_region
alpha = interpolate(
self.images.double(),
torch.tensor([lb_col, lb_row]).double().to(self.device)
)
beta = interpolate(
self.images.double(),
torch.tensor([ub_col, lb_row]).double().to(self.device)
)
gamma = interpolate(
self.images.double(),
torch.tensor([lb_col, ub_row]).double().to(self.device)
)
delta = interpolate(
self.images.double(),
torch.tensor([ub_col, ub_row]).double().to(self.device)
)
# a = torch.add(
# alpha * ub_col * ub_row - beta * lb_col * ub_row,
# delta * lb_col * lb_row - gamma * ub_col * lb_row
# )
b = (beta - alpha) * ub_row + (gamma - delta) * lb_row
c = (gamma - alpha) * ub_col + (beta - delta) * lb_col
d = alpha - beta - gamma + delta
e = - b / (2 * d)
f = b * b / (4 * d * d)
g = c / d
h = e * e + f
j = (self.delta ** 2 - h) ** 2 - 4 * f * e * e
k = - 2 * g * ((self.delta ** 2 - h) + 2 * e * e)
l = g * g - 4 * ((self.delta ** 2 - h) + e * e)
m = 4 * g
n = torch.full_like(m, 4).double().to(self.device)
flows = [
[
torch.zeros(
self.batch_size, self.height, self.width, 2
).float().to(self.device) for _ in range(16)
] for channel in range(self.channels)
]
for batch in range(self.batch_size):
for channel in range(self.channels):
for height in range(self.height):
for width in range(self.width):
b_val = b[batch, channel, height, width].item()
c_val = c[batch, channel, height, width].item()
d_val = d[batch, channel, height, width].item()
if math.isclose(d_val, 0, abs_tol=1e-6):
if (c_val == 0) or (b_val == 0):
continue
denominator = math.sqrt(b_val ** 2 + c_val ** 2)
x = b_val * self.delta / denominator
y = c_val * self.delta / denominator
flows[channel][0][batch, height, width, 0] = x
flows[channel][0][batch, height, width, 1] = y
flows[channel][1][batch, height, width, 0] = x
flows[channel][1][batch, height, width, 1] = -y
flows[channel][2][batch, height, width, 0] = -x
flows[channel][2][batch, height, width, 1] = y
flows[channel][3][batch, height, width, 0] = -x
flows[channel][3][batch, height, width, 1] = -y
continue
coeffs = [
n[batch, channel, height, width].item(),
m[batch, channel, height, width].item(),
l[batch, channel, height, width].item(),
k[batch, channel, height, width].item(),
j[batch, channel, height, width].item()
]
roots = polyroots(coeffs, maxsteps=500, extraprec=100)
for idx, root in enumerate(roots):
root = complex(root)
if not math.isclose(root.imag, 0, abs_tol=1e-7):
continue
x = float(root.real)
if self.delta ** 2 < x ** 2:
continue
y = math.sqrt(self.delta ** 2 - x ** 2)
i = 4 * idx
flows[channel][i + 0][batch, height, width, 0] = x
flows[channel][i + 0][batch, height, width, 1] = y
flows[channel][i + 1][batch, height, width, 0] = x
flows[channel][i + 1][batch, height, width, 1] = -y
flows[channel][i + 2][batch, height, width, 0] = -x
flows[channel][i + 2][batch, height, width, 1] = y
flows[channel][i + 3][batch, height, width, 0] = -x
flows[channel][i + 3][batch, height, width, 1] = -y
for channel in range(self.channels):
for idx in range(16):
vx = flows[channel][idx][:, :, :, 0]
vy = flows[channel][idx][:, :, :, 1]
box_col_constraint = (lb_col <= vx) & (vx <= ub_col)
box_row_constraint = (lb_row <= vy) & (vy <= ub_row)
box_constraint = box_col_constraint & box_row_constraint
flows[channel][idx][:, :, :, 0] = torch.where(
box_constraint, vx, torch.zeros_like(vx)
)
flows[channel][idx][:, :, :, 1] = torch.where(
box_constraint, vy, torch.zeros_like(vy)
)
return flows
def linear_constraints(self):
return TInfNormTransformer(
self.images, self.delta
).linear_constraints()
|
landlab/components/species_evolution/zone_controller.py | amanaster2/landlab | 257 | 41274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ZoneController of SpeciesEvolver."""
import numpy as np
from scipy.ndimage.measurements import label
from .record import Record
from .zone import Zone, _update_zones
from .zone_taxon import ZoneTaxon
class ZoneController(object):
"""Controls zones and populates them with taxa.
This object manages 'zones' that are used to evaluate the spatial aspect of
taxa. A zone represents a portion of a model grid. It is made up of
spatially continuous grid nodes.
This controller creates zones using the initialization parameter,
``zone_function``. This function identifies all of the grid nodes where
zones are to be created. A zone is created for each cluster of spatially
continuous nodes. Zones are updated also using this function when the
``run_one_step`` method of this controller is called.
The structure of an example model grid is diagrammed below to demonstrate
how zones are created. The grid contains six columns and six rows. In this
example, the function returns True where node values are greater than 0.
Nodes marked with an ``*`` are nodes that will belong to a zone because the
mask is True at these nodes. All other nodes are marked with a ``·``. A
zone is created for each cluster of continuous nodes where the mask is True.
::
values mask returned
evaluated by zone function
0 0 0 0 0 0 · · · · · ·
0 0 0 5 4 2 · · · * * *
0 6 0 4 6 0 · * · * * ·
0 2 0 0 0 0 · * · · · ·
0 0 4 0 4 0 · · * · * ·
0 0 0 0 0 0 · · · · · ·
The above example is continued in the following four diagrams that
demonstrate how individual zones are identified. Each zone is marked with a
``x``, ``o``, ``+``, or ``@``. Clusters can be identified using ``D8``
where diagonal neighbors are included or ``D4`` where diagonal neighbors
are excluded. A minimum zone area can be enforced with the ``minimum_area``
initialization parameter.
::
D8 D4 D8 D4
min area = 0 min area = 0 min area = 2 min area = 2
· · · · · · · · · · · · · · · · · · · · · · · ·
· · · + + + · · · + + + · · · + + + · · · + + +
· x · + + · · x · + + · · x · + + · · x · + + ·
· x · · · · · x · · · · · x · · · · · x · · · ·
· · x · o · · · @ · o · · · x · · · · · · · · ·
· · · · · · · · · · · · · · · · · · · · · · · ·
The grid perimeter affects zone creation. Zones can include perimeter nodes
(the nodes along grid edges), although because perimeter nodes are not
associated with cells that have area, perimeter nodes do not contribute to
the area summation of clusters. The area summation only takes into account
the cells associated with core nodes.
Creation of zones along boundaries is illustrated below. A zone extent mask
different from the above example was produced by the hypothetical zone
function in this example. Again ``*`` indicates where a zone can exist.
Distinct zones include the symbols, ``$`` and ``#`` in addition to the
symbols defined above. Individual zone masks and the count of zones are
affected by the use of ``D8`` or ``D4`` along with the minimum area
parameter, especially when zone clusters are along the grid parameter.
::
zone function D8 D4 D8 D4
returned mask min area = 0 min area = 0 min_area = 2 min_area = 2
* · · * * · + · · x x · + · · x x · + · · · · · · · · · · ·
· * · · * · · + · · x · · # · · x · · + · · · · · # · · · ·
· * · · · · · + · · · · · # · · · · · + · · · · · # · · · ·
* · · · * * + · · · o o $ · · · o o + · · · o o · · · · o o
· · · · * · · · · · o · · · · · o · · · · · o · · · · · o ·
· * * · · · · @ @ · · · · @ @ · · · · · · · · · · · · · · ·
By default, ``ZoneTaxon`` are used with this controller, and the
following paragraphs make that assumption. See the documentation of the
populate methods to learn how to use other types. Speciation of
``ZoneTaxon`` objects occurs when the taxon exists in more than one zone
once the allopatric wait time has been exceeded in that zone. See
``ZoneTaxon`` documentation for more about allopatric wait time.
A different example grid demonstrates here the temporal connectivity of
zones. The grid represents the time, ``T0`` with the nodes of a zone
marked with ``x``. The following examples will use D8 neighborhoods and a
minimum zone area of 0.
::
T0
· · · · · ·
· · · · · ·
· x x x x ·
· x x x x ·
· · · · · ·
· · · · · ·
Below are variations of the grid at a later time, ``T1`` in four variations
where each contains one zone. In ``T1a``, ``T1b``, and ``T1c`` the zone
stayed the same, moved, and changed size, respectively. Taxa migrate with
the zone when at least one zone node overlaps between the two time steps.
However, in ``T1d``, no nodes overlaps, therefore taxa do not disperse from
the zone in T0 to the zone in T1d.
::
T1a T1b T1c T1d
· · · · · · · · · · · · · · · · · · · · · · · ·
· + + + + · · · · · · · · · + + · · · · · · · ·
· + + + + · · + + + + · · + + + + · · · · · · ·
· · · · · · · + + + + · · · · · + · · + + + + ·
· · · · · · · · · · · · · · · · · · · + + + + ·
· · · · · · · · · · · · · · · · · · · · · · · ·
Another ``T1`` variation, now demonstrating two zones, ``+`` and ``x``.
Multiple zones overlapping a zone in the prior time step can be interpreted
as a zone that fragmented, which may affect resident taxa. The number of
zone fragmentations can be viewed in the ``record_data_frame`` attribute.
In the T1e example, the fragmentation count for time 1 would be 2 because
2 zones that fragmented from a prior zone were recognized at this time.
::
T1e
· · · · · ·
· · · · + ·
· x · · + ·
· x x · · ·
· x x · · ·
· · · · · ·
The controller had to decide which of the two clusters of continuous nodes
in T1 should be designated as the same zone in T0. This decision must be
made in general when multiple clusters overlap the same zone in the prior
time step. The zone in the current time step that overlaps the prior time
step zone the most becomes the same zone in the earlier time step. In this
example, the cluster to the right overlapped four nodes and the left
cluster overlapped only one node, therefore the right cluster became the
star zone. This is merely for creating new zones objects.
The grid diagrammed below continues from T1e. The continuous nodes
overlapped two zones in T1e. When multiple zones overlap, one zone is
assumed to be the prior zone and the others are considered captured zones.
The number of zone captures can be viewed in the ``record_data_frame``
attribute.
::
T2
· · · · · ·
· · · · · ·
· x x x x ·
· x x x · ·
· · · · · ·
· · · · · ·
The controller had to again decide which of the two clusters of continuous
nodes in T1e should be designated as the same zone in T2. This decision
must be made in general when multiple clusters in the prior time step
overlap a zone in the current time step. The zone in the prior time step
that overlaps the current time step zone the most becomes the zone in the
earlier time step. In this example, the cluster to the left overlapped two
nodes and the right cluster overlapped only one node, therefore the new
zone keeps the designation of the left cluster. However, this is merely for
creating new zone objects.
ZoneController is currently designed for use with only the grid type,
``RasterModelGrid``.
Examples
--------
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components.species_evolution import ZoneController
The first example uses the default parameters of ZoneController.
Create a model grid and an elevation field for this grid.
>>> mg = RasterModelGrid((3, 7))
>>> z = mg.add_zeros('topographic__elevation', at='node')
Set elevation to 1 for some nodes.
>>> z[[9, 10, 11, 12]] = 1
Define a zone function that returns a boolean array where `True` values
indicate the nodes where zones can be created.
>>> def zone_func(grid):
... z = grid.at_node['topographic__elevation']
... return z == 1
Instantiate ZoneController. Only one zone exists because the nodes that
were set to one are adjacent to each other in the grid.
>>> zc = ZoneController(mg, zone_func)
>>> zc.record_data_frame[['time', 'zones']]
time zones
0 0 1
Populate each zone with a taxon.
>>> taxon = zc.populate_zones_uniformly(1)
>>> len(taxon)
1
A change in elevation is forced to demonstrate a zone fragmentation, and
then the zones are updated by advancing the record time by 1000.
>>> z[10] = 0
>>> zc.run_one_step(1000)
Two zones now exist because the zone in time 0 fragmented into two zones.
>>> zc.record_data_frame[['time', 'zones', 'fragmentations']]
time zones fragmentations
0 0 1 NaN
1 1000 2 2.0
A change in elevation is forced again, this time to demonstrate zone
capture where multiple zones are overlapped by a zone in the later time
step. Statistics of the capture can be attained with ``record_data_frame``.
>>> z[10] = 1
>>> zc.run_one_step(1000)
>>> zc.record_data_frame[['time', 'zones', 'captures',
... 'area_captured_sum', 'area_captured_max']]
time zones captures area_captured_sum area_captured_max
0 0 1 NaN NaN NaN
1 1000 2 0.0 0.0 0.0
2 2000 1 1.0 2.0 2.0
The follow example demonstrates non-default ZoneController parameters.
>>> mg = RasterModelGrid((3, 7))
>>> z = mg.add_zeros('topographic__elevation', at='node')
Similar to the prior example, define a zone function that returns a boolean
array where `True` values indicate the nodes where zones can be created.
>>> def zone_func(grid):
... z = grid.at_node['topographic__elevation']
... return z == 1
Set elevation to 1 for nodes so that two clusters of nodes within the zone
mask exist.
>>> z[[9, 10, 12]] = 1
Instantiate ZoneController with options.
>>> zc = ZoneController(mg, zone_func, minimum_area=2, initial_time=100)
Only one zone exist, despite two clusters of nodes meeting the zone
definition, because the ``minimum_area`` was set to 2. Also, the first
time in the record was set by the ``initial_time`` parameter.
>>> zc.record_data_frame[['time', 'zones']]
time zones
0 100 1
"""
def __init__(
self,
grid,
zone_function,
minimum_area=0,
neighborhood_structure="D8",
initial_time=0,
**kwargs
):
"""Initialize the controller.
Parameters
----------
grid : RasterModelGrid
A Landlab RasterModelGrid.
zone_function : function
A function that return a mask of the total zone extent. The first
input parameter of this function must be `grid`.
minimum_area : float, optional
The minimum area of the zones that will be created.
neighborhood_structure : {'D8', 'D4'}, optional
The structure describes how zones are identified. The default,
'D8' evaluates the eight neighboring nodes. The diagonal
neighboring nodes are excluded when 'D4' is selected.
initial_time : float, int, optional
The initial time. The unit of time is unspecified within the
controller. The default is 0.
kwargs
Keyword arguments for ``zone_function``. Do not include ``grid``
in kwargs because ``grid``, the first parameter of this method, is
automatically added to ``kwargs``.
"""
# Set parameters.
self._grid = grid
self._zone_func = zone_function
self._zone_params = kwargs
self._min_area = minimum_area
self._record = Record(initial_time)
if neighborhood_structure in ["D8", "D4"]:
self._neighborhood_struct = neighborhood_structure
else:
raise ValueError("`neighborhood_structure` must be 'D8' or 'D4'")
# Set record initial values.
self._record.set_value("zones", np.nan)
self._record.set_value("fragmentations", np.nan)
self._record.set_value("captures", np.nan)
self._record.set_value("area_captured_sum", np.nan)
self._record.set_value("area_captured_max", np.nan)
# Include `grid` in the zone params dictionary.
self._zone_params["grid"] = self._grid
# Set initial zones.
initial_zone_extent = self._zone_func(**self._zone_params)
self._zones = self._get_zones_with_mask(initial_zone_extent)
self._record.set_value("zones", len(self._zones))
@property
def zones(self):
"""The zones of the ZoneController."""
return self._zones
@property
def record_data_frame(self):
"""A DataFrame of ZoneController variables over time.
Each row is data of a model time step. The step time is recorded in the
`time` column. The columns, `zones`, `fragmentations`, and `captures`
are the count of these variables at a given time. `area_captured_sum`
is the summation of captures over a time. `area_captured_max` is the
maximum area captured of a single capture during a time.
"""
return self._record.data_frame
def populate_zones_uniformly(self, count, taxon_type=ZoneTaxon, **kwargs):
"""Populate each zone with the same type and count of taxa.
Parameters
----------
count : int
The count of taxon to populate to each zone.
taxon_type : type of Taxon
A Taxon type that takes a Zone as its first parameter.
kwargs : dictionary
Keyword arguments of ``taxon_type``.
"""
taxa = []
for z in self._zones:
taxa.extend([taxon_type([z], **kwargs) for _ in range(count)])
return taxa
def run_one_step(self, dt):
"""Update the zones for a single timestep.
This method advances time in the record and determines the connectivity
of zones between the current and prior time steps.
Parameters
----------
dt : float
The model time step duration.
"""
self._record.advance_time(dt)
# Resolve the spatiotemporal connectivity of the prior time step zones
# to the new zones.
prior_zones = self._zones
zone_mask = self._zone_func(**self._zone_params)
new_zones = self._get_zones_with_mask(zone_mask)
self._zones = _update_zones(self._grid, prior_zones, new_zones, self._record)
self._record.set_value("zones", len(self._zones))
def _get_zones_with_mask(self, mask):
"""Get zones using a mask.
Parameters
----------
mask : ndarray
A boolean array with the grid number of nodes where `True` values
are nodes within the extent of all the zones to be created.
Returns
-------
list of Zones
The discrete zones identified in the mask.
"""
# Label clusters of `True` values in `mask`.
if self._neighborhood_struct == "D8":
s = 3 * [[1, 1, 1]]
elif self._neighborhood_struct == "D4":
s = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
cluster_arr, cluster_ct = label(mask.reshape(self._grid.shape), structure=s)
# Create zones for clusters.
zones = []
for i in range(1, cluster_ct + 1):
mask = (cluster_arr == i).flatten()
cluster_area = self._grid.cell_area_at_node[mask].sum()
if cluster_area >= self._min_area:
zones.append(Zone(self, mask))
return zones
|
torchbenchmark/models/fastNLP/fastNLP/core/batch.py | Chillee/benchmark | 2,693 | 41275 | <gh_stars>1000+
r"""
batch 模块实现了 fastNLP 所需的 :class:`~fastNLP.core.batch.DataSetIter` 类。
"""
__all__ = [
"BatchIter",
"DataSetIter",
"TorchLoaderIter",
]
import atexit
import abc
from numbers import Number
import numpy as np
import torch
import torch.utils.data
from collections import defaultdict
from .dataset import DataSet
from .sampler import SequentialSampler, Sampler
from ._logger import logger
_python_is_exit = False
def _set_python_is_exit():
global _python_is_exit
_python_is_exit = True
atexit.register(_set_python_is_exit)
def _pad(batch_dict, dataset, as_numpy):
result = {}
for n, vlist in batch_dict.items():
f = dataset.field_arrays[n]
if f.padder is None:
result[n] = np.array(vlist)
else:
res = f.pad(vlist)
if not as_numpy:
res, _ = _to_tensor(res, field_dtype=f.dtype)
result[n] = res
return result
class DataSetGetter:
r"""
传递给torch.utils.data.DataLoader获取数据,DataLoder会传入int的idx获取数据(调用这里的__getitem__()函数)。
"""
def __init__(self, dataset: DataSet, as_numpy=False):
self.dataset = dataset
self.as_numpy = as_numpy
self.idx_list = list(range(len(dataset)))
self.x_names = {n for n, f in dataset.get_all_fields().items() if f.is_input}
self.y_names = {n for n, f in dataset.get_all_fields().items() if f.is_target}
def __getitem__(self, idx: int):
# mapping idx to sampled idx
idx = self.idx_list[idx]
ins = self.dataset[idx]
return idx, ins
def __len__(self):
return len(self.dataset)
def collate_fn(self, ins_list: list):
r"""
:param batch: [[idx1, x_dict1, y_dict1], [idx2, x_dict2, y_dict2], [xx, xx, xx]]
:return:
"""
indices = []
sin_x, sin_y = defaultdict(list), defaultdict(list)
# 收集需要关注的field的数据
for idx, ins in ins_list:
indices.append(idx)
for n, v in ins.items():
if n in self.x_names:
sin_x[n].append(v)
if n in self.y_names:
sin_y[n].append(v)
# 根据情况,进行pad
sin_x = _pad(sin_x, dataset=self.dataset, as_numpy=self.as_numpy)
sin_y = _pad(sin_y, dataset=self.dataset, as_numpy=self.as_numpy)
if not self.dataset.collater.is_empty():
bx, by = self.dataset._collate_batch(ins_list)
sin_x.update(bx)
sin_y.update(by)
return indices, sin_x, sin_y
def __getattr__(self, item):
if hasattr(self.dataset, item):
return getattr(self.dataset, item)
else:
raise AttributeError("'DataSetGetter' object has no attribute '{}'".format(item))
class SamplerAdapter(torch.utils.data.Sampler):
r"""
用于传入torch.utils.data.DataLoader中,DataLoader会调用__iter__()方法获取index(一次只取一个int)
"""
def __init__(self, sampler, dataset):
super().__init__(dataset)
self.sampler = sampler
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __iter__(self):
return iter(self.sampler(self.dataset))
class BatchIter:
r"""
Trainer用于迭代数据的类。继承该类,并实现get_num_batches(), get_batch_indices(), num_batches(), __iter__()方法以及dataset属性。
"""
def __init__(self, dataset, batch_size=1, sampler=None,
num_workers=0, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None, collate_fn=None,
batch_sampler=None):
if isinstance(sampler, Sampler): # 如果时fastNLP的sampler需要adapt一下
sampler = SamplerAdapter(sampler=sampler or SequentialSampler(), dataset=dataset)
self.sampler = sampler
self.batch_sampler = batch_sampler
# DataLoader的collate_fn输入是List[],里面的元素是dataset[index]返回的结果
if collate_fn is None:
# pytoch <= 1.1 中不能设置collate_fn=None
self.dataiter = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, sampler=self.sampler,
num_workers=num_workers,
pin_memory=pin_memory, drop_last=drop_last,
timeout=timeout, worker_init_fn=worker_init_fn,
batch_sampler=batch_sampler)
else:
self.dataiter = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, sampler=self.sampler,
collate_fn=collate_fn, num_workers=num_workers,
pin_memory=pin_memory, drop_last=drop_last,
timeout=timeout, worker_init_fn=worker_init_fn,
batch_sampler=batch_sampler)
# 以sampler的数量为准,因为DistributedSampler的时候每个进程上并不是所有的数据都用上了
if self.batch_sampler is None:
self._num_batches = self.get_num_batches(len(self.dataiter.sampler), batch_size, drop_last)
else:
self._num_batches = len(self.batch_sampler)
self.batch_size = batch_size
self.cur_batch_indices = None
@property
def num_batches(self):
return self._num_batches
@num_batches.setter
def num_batches(self, value):
self._num_batches = value
def init_iter(self):
pass
@staticmethod
def get_num_batches(num_samples, batch_size, drop_last):
r"""
计算batch的数量。用于前端显示进度
:param int num_samples:
:param int batch_size:
:param bool drop_last: 如果最后一个batch没有batch_size这么多,是否就丢掉。
:return:
"""
num_batches = num_samples // batch_size
if not drop_last and (num_samples % batch_size > 0):
num_batches += 1
return num_batches
def get_batch_indices(self):
r"""
获取最近输出的batch的index。用于溯源当前batch的数据
:return:
"""
return self.cur_batch_indices
def __len__(self):
return self.num_batches
@property
def dataset(self):
r"""
获取正在参与iterate的dataset
:return:
"""
return self.dataiter.dataset
@abc.abstractmethod
def __iter__(self):
r"""
用于实际数据循环的类,返回值需要为两个dict, 第一个dict中的内容会认为是input, 第二个dict中的内容会认为是target
:return:
"""
raise NotImplemented
class DataSetIter(BatchIter):
r"""
DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出,通过使用DataSetIter,可以不需要考虑
输入的padding(由DataSet中每列的Padder决定了)以及不需要考虑将数据转为tensor。
组成 `x` 和 `y`::
batch = DataSetIter(data_set, batch_size=16, sampler=SequentialSampler())
num_batch = len(batch)
for batch_x, batch_y in batch:
# do stuff ...
"""
def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False, num_workers=0, pin_memory=False,
drop_last=False, timeout=0, worker_init_fn=None, batch_sampler=None):
r"""
:param dataset: :class:`~fastNLP.DataSet` 对象, 数据集
:param int batch_size: 取出的batch大小
:param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.
Default: ``None``
:param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`.
Default: ``False``
:param int num_workers: 使用多少个进程来预处理数据
:param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。
:param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个
:param timeout: 生成一个batch的timeout值
:param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。
:param batch_sampler: 当每次batch取出的数据数量不一致时,可以使用该sampler。batch_sampler每次iter应该输出一个list的index。
当batch_sampler不为None时,参数batch_size, sampler, drop_last会被忽略。
"""
assert isinstance(dataset, DataSet)
dataset = DataSetGetter(dataset, as_numpy)
collate_fn = dataset.collate_fn
if batch_sampler is not None:
batch_size = 1
sampler = None
drop_last = False
super().__init__(
dataset=dataset, batch_size=batch_size, sampler=sampler,
num_workers=num_workers, pin_memory=pin_memory,
drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn,
collate_fn=collate_fn, batch_sampler=batch_sampler
)
def __iter__(self):
self.init_iter()
for indices, batch_x, batch_y in self.dataiter:
self.cur_batch_indices = indices
yield batch_x, batch_y
class TorchLoaderIter(BatchIter):
r"""
与DataSetIter类似,但可以用于非fastNLP的数据容器对象,以及可以实现完全自定义的生成batch的方式,然后与Trainer,Tester可以实现
与DataSetIter一样的对接。
需要保证传入的数据容器实现了实现了以下的方法
Example::
import random
from fastNLP import TorchLoaderIter
import torch
class UdfDataSet:
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx): # 必须实现的方法,输入参数是一个int,范围为[0, len(self))
x = [random.random() for _ in range(3)]
y = random.random()
return x,y
def __len__(self): # 需要实现该方法返回值需要是一个int数据
return self.num_samples
# 需要实现collact_fn将数据转换为tensor
def collate_fn(data_list):
# [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list
xs, ys = [], []
for l in data_list:
x, y = l
xs.append(x)
ys.append(y)
# 不需要转移到gpu,Trainer或Tester会将其转移到model所在的device
x,y = torch.FloatTensor(xs), torch.FloatTensor(ys)
return {'x':x, 'y':y}, {'y':y} # 第一个dict中内容类似于DataSet中的input列,第二个dict的内容类似于target列
udf_dataset = UdfDataSet(10)
dataset = TorchLoaderIter(udf_dataset, collate_fn=collate_fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x, y):
return {'loss':torch.pow(self.fc(x).squeeze(-1)-y, 2).sum()}
def predict(self, x):
return {'pred':self.fc(x).squeeze(0)}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset,
metrics=AccuracyMetric(target='y'), use_tqdm=False)
trainer.train(load_best_model=False)
除此之外,还可以通过该方法实现OnTheFly的训练,如下面的代码所示
Example::
import tempfile
import random
import torch
tmp_file_handler, tmp_file_path = tempfile.mkstemp(text=True)
try:
num_samples, data = 10, []
for _ in range(num_samples):
x, y = [random.random() for _ in range(3)], random.random()
data.append(x + [y])
with open(tmp_file_path, 'w') as f:
for d in data:
f.write(' '.join(map(str, d)) + '\n')
class FileDataSet:
def __init__(self, tmp_file):
num_samples = 0
line_pos = [0] # 对应idx是某一行对应的位置
self.tmp_file_handler = open(tmp_file, 'r', encoding='utf-8')
line = self.tmp_file_handler.readline()
while line:
if line.strip():
num_samples += 1
line_pos.append(self.tmp_file_handler.tell())
line = self.tmp_file_handler.readline()
self.tmp_file_handler.seek(0)
self.num_samples = num_samples
self.line_pos = line_pos
def __getitem__(self, idx):
line_start, line_end = self.line_pos[idx], self.line_pos[idx + 1]
self.tmp_file_handler.seek(line_start)
line = self.tmp_file_handler.read(line_end - line_start).strip()
values = list(map(float, line.split()))
x, y = values[:3], values[-1]
return x, y
def __len__(self):
return self.num_samples
def collate_fn(data_list):
# [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list
xs, ys = [], []
for l in data_list:
x, y = l
xs.append(x)
ys.append(y)
x, y = torch.FloatTensor(xs), torch.FloatTensor(ys)
return {'x': x, 'y': y}, {'y': y} # 第一个dict中内容类似于DataSet中的input列,第二个dict的内容类似于target列
file_data = FileDataSet(tmp_file_path)
dataset = TorchLoaderIter(file_data, collate_fn=collate_fn)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x, y):
return {'loss': torch.pow(self.fc(x).squeeze(-1) - y, 2).sum()}
def predict(self, x):
return {'pred': self.fc(x).squeeze(0)}
model = Model()
trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset,
metrics=AccuracyMetric(target='y'), use_tqdm=False, n_epochs=2)
trainer.train(load_best_model=False)
finally:
import os
if os.path.exists(tmp_file_path):
os.remove(tmp_file_path)
"""
def __init__(self, dataset, collate_fn, batch_size=1, sampler=None,
num_workers=0, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None,
batch_sampler=None):
r"""
:param dataset: 实现了__getitem__和__len__方法的数据容器。
:param callable collate_fn: 用于将样本组合成batch的函数。输入为[dataset[idx1], dataset[idx2], ...], 即dataset中
__getitem__返回值组成的list,返回值必须为两个dict,其中第一个dict会被认为是input,第二个dict中的内容被认为是target。
需要转换为tensor的数据,需要在collate_fn中转化,但不需要转移到对应device。
:param int batch_size: 取出的batch大小
:param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.
Default: ``None``
:param int num_workers: 使用多少个进程来预处理数据
:param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。
:param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个
:param timeout: 生成一个batch的timeout值
:param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。
:param batch_sampler: 当每次batch取出的数据数量不一致时,可以使用该sampler。batch_sampler每次iter应该输出一个list的index。
当batch_sampler不为None时,参数batch_size, sampler, drop_last会被忽略。
"""
assert len(dataset) > 0
assert collate_fn is not None, "You must pass collate_fn to pad the batch."
if batch_sampler is not None:
batch_size = 1
sampler = None
drop_last = False
super().__init__(
dataset=dataset, batch_size=batch_size, sampler=sampler,
num_workers=num_workers, pin_memory=pin_memory,
drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn,
collate_fn=collate_fn, batch_sampler=batch_sampler
)
def __iter__(self):
self.init_iter()
for batch_x, batch_y in self.dataiter:
self.cur_batch_indices = None
yield batch_x, batch_y
def _to_tensor(batch, field_dtype):
r"""
:param batch: np.array()
:param field_dtype: 数据类型
:return: batch, flag. 如果传入的数据支持转为tensor,返回的batch就是tensor,且flag为True;如果传入的数据不支持转为tensor,
返回的batch就是原来的数据,且flag为False
"""
try:
if field_dtype is not None and isinstance(field_dtype, type)\
and issubclass(field_dtype, Number) \
and not isinstance(batch, torch.Tensor):
new_batch = torch.as_tensor(batch)
flag = True
else:
new_batch = batch
flag = False
if torch.is_tensor(new_batch):
if 'float' in new_batch.dtype.__repr__():
new_batch = new_batch.float()
elif 'int' in new_batch.dtype.__repr__():
new_batch = new_batch.long()
return new_batch, flag
except Exception as e:
raise e
|
hungarian_tf_tests.py | shaolinkhoa/rec-attend-public | 118 | 41277 | import numpy as np
import tensorflow as tf
import unittest
hungarian_module = tf.load_op_library("hungarian.so")
class HungarianTests(unittest.TestCase):
def test_min_weighted_bp_cover_1(self):
W = np.array([[3, 2, 2], [1, 2, 0], [2, 2, 1]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 1, 1])
c_1_t = np.array([1, 1, 0])
M_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
pass
def test_min_weighted_bp_cover_2(self):
W = np.array([[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([5, 6, 5])
c_1_t = np.array([0, 0, 0, 2])
M_t = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_3(self):
W = np.array([[5, 0, 2], [3, 1, 0], [0, 5, 0]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 0, 4])
c_1_t = np.array([3, 1, 0])
M_t = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_4(self):
W = np.array([[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0],
[2, 2, 1]]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([[2, 0, 4], [2, 1, 1]])
c_1_t = np.array([[3, 1, 0], [1, 1, 0]])
M_t = np.array([[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0],
[0, 0, 1]]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_real_values_1(self):
# Test the while loop terminates with real values.
W = np.array(
[[0.90, 0.70, 0.30, 0.20, 0.40, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.80, 0.75, 0.92, 0.10, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.78, 0.85, 0.66, 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.42, 0.55, 0.23, 0.43, 0.33, 0.002, 0.001, 0.001, 0.001, 0.001],
[0.64, 0.44, 0.33, 0.33, 0.34, 0.001, 0.002, 0.001, 0.001, 0.001],
[0.22, 0.55, 0.43, 0.43, 0.14, 0.001, 0.001, 0.002, 0.001, 0.001],
[0.43, 0.33, 0.34, 0.22, 0.14, 0.001, 0.001, 0.001, 0.002, 0.001],
[0.33, 0.42, 0.23, 0.13, 0.43, 0.001, 0.001, 0.001, 0.001, 0.002],
[0.39, 0.24, 0.53, 0.56, 0.89, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.12, 0.34, 0.82, 0.82, 0.77, 0.001, 0.001, 0.001, 0.001, 0.001]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
M_t = np.array(
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])
self.assertTrue((M == M_t).all())
def test_real_values_2(self):
W = np.array([[
0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662,
0.0137996, 0.00403898, 0.0123786, 1e-05
], [
0.00604229, 0.0126071, 0.0117400, 0.0124528, 0.00808971, 0.0162703,
0.0138028, 0.00403935, 0.0123812, 1e-05
], [
0.00604234, 0.0126073, 0.0117402, 0.012453, 0.00808980, 0.0162706,
0.0138030, 0.00403937, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_3(self):
W = np.array([[
0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, 0.0177026,
0.0289461, 0.0214768, 0.0101898, 1e-05
], [
0.00302875, 0.003217, 0.0217628, 0.00836405, 0.0256229, 0.0177137,
0.0289468, 0.0214719, 0.0101904, 1e-05
], [
0.00302897, 0.00321726, 0.0217636, 0.00836369, 0.0256217, 0.0177148,
0.0289468, 0.0214714, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_4(self):
W = np.array([[
1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, 1e-05,
1e-05, 1e-05, 3.9034e-05
], [
1e-05, 3.42696e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1.0122e-05,
3.43236e-05, 1e-05
], [
1e-05, 0.0426792, 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05,
1e-05, 1e-05, 0.102463
], [
1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05, 1e-05,
1e-05, 1.00007e-05
], [
1e-05, 4.22947e-05, 0.00062168, 0.623917, 1.03468e-05, 0.00588984,
1.00004e-05, 1.44433e-05, 1.00014e-05, 0.000213425
], [
1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, 0.000485082, 1e-05,
1e-05, 1.00002e-05, 1e-05
], [
1e-05, 1e-05, 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05,
1.13251e-05
], [
1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,
1e-05
], [
1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, 1.03587e-05,
0.150301, 1e-05, 1.00045e-05
], [
1e-05, 3.97901e-05, 1e-05, 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05,
2.42828e-05, 1e-05, 1.10529e-05
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_5(self):
W = np.array([[
1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05, 1e-05,
1e-05
], [
0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, 0.019553, 1e-05,
1e-05, 1e-05
], [
0.002148, 1e-05, 1e-05, 1.6e-05, 0.651536, 2e-05, 7.4e-05, 0.002359,
1e-05, 1e-05
], [
3.8e-05, 1e-05, 0.000592, 4.7e-05, 0.09173, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05,
1e-05
], [
0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, 1.1e-05,
0.000919, 1e-05
], [
1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.028816, 1e-05
], [
1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_6(self):
W = np.array([[
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(HungarianTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
bcs-ui/backend/templatesets/legacy_apps/configuration/k8s/constants.py | laodiu/bk-bcs | 599 | 41278 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.templatesets.legacy_apps.instance.funutils import update_nested_dict
from ..constants import FILE_DIR_PATTERN, KRESOURCE_NAMES, NUM_VAR_PATTERN
# 资源名称
K8S_RES_NAME_PATTERN = "^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"
# 挂载卷名称限制
VOLUMR_NAME_PATTERN = "^[a-zA-Z{]{1}[a-zA-Z0-9-_{}]{0,254}$"
# TODO 验证变量的情况
PORT_NAME_PATTERN = "^[a-zA-Z{]{1}[a-zA-Z0-9-{}_]{0,254}$"
# configmap/secret key 名称限制
KEY_NAME_PATTERN = "^[.a-zA-Z{]{1}[a-zA-Z0-9-_.{}]{0,254}$"
# 亲和性验证
AFFINITY_MATCH_EXPRESSION_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"required": ["key", "operator"],
"properties": {
"key": {"type": "string", "minLength": 1},
"operator": {"type": "string", "enum": ["In", "NotIn", "Exists", "DoesNotExist", "Gt", "Lt"]},
"values": {"type": "array", "items": {"type": "string", "minLength": 1}},
},
"additionalProperties": False,
},
}
POD_AFFINITY_TERM_SCHEMA = {
"type": "object",
"properties": {
"labelSelector": {"type": "object", "properties": {"matchExpressions": AFFINITY_MATCH_EXPRESSION_SCHEMA}},
"namespaces": {"type": "array", "items": {"type": "string"}},
"topologyKey": {"type": "string"},
},
"additionalProperties": False,
}
POD_AFFINITY_SCHEMA = {
"type": "object",
"properties": {
"requiredDuringSchedulingIgnoredDuringExecution": {"type": "array", "items": POD_AFFINITY_TERM_SCHEMA},
"preferredDuringSchedulingIgnoredDuringExecution": {
"type": "array",
"items": {
"type": "object",
"required": ["podAffinityTerm"],
"properties": {
"weight": {
"oneOf": [
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 1, "maximum": 100},
]
},
"podAffinityTerm": POD_AFFINITY_TERM_SCHEMA,
},
},
},
},
"additionalProperties": False,
}
# 健康检查 & 就绪检查
K8S_CHECK_SCHEMA = {
"type": "object",
"required": ["initialDelaySeconds", "periodSeconds", "timeoutSeconds", "failureThreshold", "successThreshold"],
"properties": {
"initialDelaySeconds": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 0}]
},
"periodSeconds": {"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 1}]},
"timeoutSeconds": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 1}]
},
"failureThreshold": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 1}]
},
"successThreshold": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 1}]
},
"exec": {"type": "object", "properties": {"command": {"type": "string"}}},
"tcpSocket": {"type": "object", "properties": {"port": {"oneOf": [{"type": "number"}, {"type": "string"}]}}},
"httpGet": {
"type": "object",
"properties": {
"port": {"oneOf": [{"type": "number"}, {"type": "string"}]},
"path": {"type": "string"},
"httpHeaders": {
"type": "array",
"items": {
"type": "object",
"properties": {"name": {"type": "string"}, "value": {"type": "string"}},
},
},
},
},
},
}
INIT_CONTAINER_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"required": ["name", "image", "imagePullPolicy", "volumeMounts", "ports", "resources"],
"properties": {
"name": {"type": "string", "minLength": 1},
"image": {"type": "string", "minLength": 1},
"imagePullPolicy": {"type": "string", "enum": ["Always", "IfNotPresent", "Never"]},
"volumeMounts": {
"type": "array",
"items": {
"type": "object",
"required": ["name", "mountPath", "readOnly"],
"properties": {
"name": {"type": "string", "pattern": VOLUMR_NAME_PATTERN},
"mountPath": {"type": "string", "pattern": FILE_DIR_PATTERN},
"readOnly": {"type": "boolean"},
},
},
},
"ports": {
"type": "array",
"items": {
"type": "object",
"required": ["name", "containerPort"],
"properties": {
"name": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": PORT_NAME_PATTERN},
]
},
"containerPort": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 1, "maximum": 65535},
]
},
},
},
},
"command": {"type": "string"},
"args": {"type": "string"},
# 环境变量前端统一存放在 webCache.env_list 中,有后台组装为 env & envFrom
"env": {
"type": "array",
"items": {
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string", "minLength": 1},
"value": {"type": "string"},
"valueFrom": {
"type": "object",
"properties": {
"fieldRef": {
"type": "object",
"required": ["fieldPath"],
"properties": {"fieldPath": {"type": "string"}},
},
"configMapKeyRef": {
"type": "object",
"required": ["name", "key"],
"properties": {
"name": {"type": "string", "minLength": 1},
"key": {"type": "string", "minLength": 1},
},
},
"secretKeyRef": {
"type": "object",
"required": ["name", "key"],
"properties": {
"name": {"type": "string", "minLength": 1},
"key": {"type": "string", "minLength": 1},
},
},
},
},
},
},
},
"envFrom": {
"type": "array",
"items": {
"type": "object",
"properties": {
"configMapRef": {
"type": "object",
"properties": {"name": {"type": "string", "minLength": 1}},
},
"secretRef": {"type": "object", "properties": {"name": {"type": "string", "minLength": 1}}},
},
},
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpu": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 0},
]
},
"memory": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "number", "minimum": 0},
{"type": "string", "pattern": NUM_VAR_PATTERN},
]
},
},
},
"requests": {
"type": "object",
"properties": {
"cpu": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "number", "minimum": 0},
{"type": "string", "pattern": NUM_VAR_PATTERN},
]
},
"memory": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "number", "minimum": 0},
{"type": "string", "pattern": NUM_VAR_PATTERN},
]
},
},
},
},
},
},
},
}
CONTAINER_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"image",
"imagePullPolicy",
"volumeMounts",
"ports",
"resources",
"livenessProbe",
"readinessProbe",
"lifecycle",
],
"properties": {
"name": {"type": "string", "minLength": 1},
"image": {"type": "string", "minLength": 1},
"imagePullPolicy": {"type": "string", "enum": ["Always", "IfNotPresent", "Never"]},
"volumeMounts": {
"type": "array",
"items": {
"type": "object",
"required": ["name", "mountPath", "readOnly"],
"properties": {
"name": {"type": "string", "pattern": VOLUMR_NAME_PATTERN},
"mountPath": {"type": "string", "pattern": FILE_DIR_PATTERN},
"readOnly": {"type": "boolean"},
},
},
},
"ports": {
"type": "array",
"items": {
"type": "object",
"required": ["name", "containerPort"],
"properties": {
"name": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": PORT_NAME_PATTERN},
]
},
"containerPort": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 1, "maximum": 65535},
]
},
},
},
},
"command": {"type": "string"},
"args": {"type": "string"},
# 环境变量前端统一存放在 webCache.env_list 中,有后台组装为 env & envFrom
"env": {
"type": "array",
"items": {
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string", "minLength": 1},
"value": {"type": "string"},
"valueFrom": {
"type": "object",
"properties": {
"fieldRef": {
"type": "object",
"required": ["fieldPath"],
"properties": {"fieldPath": {"type": "string"}},
},
"configMapKeyRef": {
"type": "object",
"required": ["name", "key"],
"properties": {
"name": {"type": "string", "minLength": 1},
"key": {"type": "string", "minLength": 1},
},
},
"secretKeyRef": {
"type": "object",
"required": ["name", "key"],
"properties": {
"name": {"type": "string", "minLength": 1},
"key": {"type": "string", "minLength": 1},
},
},
},
},
},
},
},
"envFrom": {
"type": "array",
"items": {
"type": "object",
"properties": {
"configMapRef": {
"type": "object",
"properties": {"name": {"type": "string", "minLength": 1}},
},
"secretRef": {"type": "object", "properties": {"name": {"type": "string", "minLength": 1}}},
},
},
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpu": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 0},
]
},
"memory": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "number", "minimum": 0},
{"type": "string", "pattern": NUM_VAR_PATTERN},
]
},
},
},
"requests": {
"type": "object",
"properties": {
"cpu": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "number", "minimum": 0},
{"type": "string", "pattern": NUM_VAR_PATTERN},
]
},
"memory": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "number", "minimum": 0},
{"type": "string", "pattern": NUM_VAR_PATTERN},
]
},
},
},
},
},
"livenessProbe": K8S_CHECK_SCHEMA,
"readinessProbe": K8S_CHECK_SCHEMA,
"lifecycle": {
"type": "object",
"required": ["preStop", "postStart"],
"properties": {
"preStop": {"type": "object", "required": ["exec"], "properties": {"command": {"type": "string"}}},
"postStart": {
"type": "object",
"required": ["exec"],
"properties": {"command": {"type": "string"}},
},
},
},
},
},
}
K8S_DEPLOYMENT_SCHEMA = {
"type": "object",
"required": ["metadata", "spec"],
"properties": {
"metadata": {
"type": "object",
"required": ["name"],
"properties": {"name": {"type": "string", "pattern": K8S_RES_NAME_PATTERN}},
},
"spec": {
"type": "object",
"required": ["replicas", "strategy", "template"],
"properties": {
"replicas": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 0}]
},
"strategy": {
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string", "enum": ["RollingUpdate", "Recreate"]},
"rollingUpdate": {"type": "object", "required": ["maxUnavailable", "maxSurge"]},
},
},
"template": {
"type": "object",
"required": ["metadata", "spec"],
"properties": {
"metadata": {
"type": "object",
"properties": {"lables": {"type": "object"}, "annotations": {"type": "object"}},
},
"spec": {
"type": "object",
"required": [
"restartPolicy",
"terminationGracePeriodSeconds",
"nodeSelector",
"hostNetwork",
"dnsPolicy",
"volumes",
"containers",
],
"properties": {
"restartPolicy": {"type": "string", "enum": ["Always", "OnFailure", "Never"]},
"terminationGracePeriodSeconds": {
"oneOf": [
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 0},
]
},
"nodeSelector": {"type": "object"},
"hostNetwork": {"oneOf": [{"type": "number"}, {"type": "string"}]},
"dnsPolicy": {
"type": "string",
"enum": ["ClusterFirst", "Default", "None", "ClusterFirstWithHostNet"],
},
"volumes": {
"type": "array",
"items": {
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string", "pattern": VOLUMR_NAME_PATTERN},
"hostPath": {
"type": "object",
"required": ["path"],
"properties": {
"path": {"type": "string", "pattern": FILE_DIR_PATTERN}
},
},
"emptyDir": {"type": "object"},
"configMap": {
"type": "object",
"required": ["name"],
"properties": {"name": {"type": "string", "minLength": 1}},
},
"secret": {
"type": "object",
"required": ["secretName"],
"properties": {"secretName": {"type": "string", "minLength": 1}},
},
"persistentVolumeClaim": {
"type": "object",
"required": ["claimName"],
"properties": {"claimName": {"type": "string", "minLength": 1}},
},
},
},
},
"containers": CONTAINER_SCHEMA,
"initContainers": INIT_CONTAINER_SCHEMA,
},
},
},
},
},
},
},
}
AFFINITY_SCHEMA = {
"type": "object",
"properties": {
"nodeAffinity": {
"type": "object",
"properties": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"type": "object",
"required": ["nodeSelectorTerms"],
"properties": {
"nodeSelectorTerms": {
"type": "array",
"items": {
"type": "object",
"required": ["matchExpressions"],
"properties": {"matchExpressions": AFFINITY_MATCH_EXPRESSION_SCHEMA},
},
}
},
},
"preferredDuringSchedulingIgnoredDuringExecution": {
"type": "array",
"items": {
"type": "object",
"required": ["preference"],
"properties": {
"weight": {
"oneOf": [
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 1, "maximum": 100},
]
},
"preference": {
"type": "object",
"required": ["matchExpressions"],
"properties": {"matchExpressions": AFFINITY_MATCH_EXPRESSION_SCHEMA},
},
},
},
},
},
"additionalProperties": False,
},
"podAffinity": POD_AFFINITY_SCHEMA,
"podAntiAffinity": POD_AFFINITY_SCHEMA,
},
"additionalProperties": False,
}
# DS 与 Deployment 的差异项:滚动升级策略 中 选择 RollingUpdate 时,只可以选择 maxUnavailable
# "required": ["replicas", "strategy", "template"],
K8S_DAEMONSET_DIFF = {
"properties": {
"spec": {
"required": ["updateStrategy", "template"],
"properties": {"updateStrategy": {"properties": {"rollingUpdate": {"required": ["maxUnavailable"]}}}},
}
}
}
K8S_DAEMONSET_SCHEMA = update_nested_dict(K8S_DEPLOYMENT_SCHEMA, K8S_DAEMONSET_DIFF)
# Job 与 Deployment 的差异项: Pod 运行时设置
# TODO: 确认 job 中 replicas 和 parallelism 怎么配置
K8S_JOB_DIFF = {
"properties": {
"spec": {
"type": "object",
"required": ["template", "completions", "parallelism", "backoffLimit", "activeDeadlineSeconds"],
"properties": {
"parallelism": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 0}]
},
"completions": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 0}]
},
"backoffLimit": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 0}]
},
"activeDeadlineSeconds": {
"oneOf": [{"type": "string", "pattern": NUM_VAR_PATTERN}, {"type": "number", "minimum": 0}]
},
},
}
}
}
K8S_JOB_SCHEMA = update_nested_dict(K8S_DEPLOYMENT_SCHEMA, K8S_JOB_DIFF)
# statefulset 与 Deployment 的差异项
K8S_STATEFULSET_DIFF = {
"properties": {
"spec": {
"required": ["template", "updateStrategy", "podManagementPolicy", "volumeClaimTemplates"],
"properties": {
"updateStrategy": {
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string", "enum": ["OnDelete", "RollingUpdate"]},
"rollingUpdate": {
"type": "object",
"required": ["partition"],
"properties": {
"partition": {
"oneOf": [
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 0},
]
}
},
},
},
},
"podManagementPolicy": {"type": "string", "enum": ["OrderedReady", "Parallel"]},
"serviceName": {"type": "string", "minLength": 1},
"volumeClaimTemplates": {
"type": "array",
"items": {
"type": "object",
"required": ["metadata", "spec"],
"properties": {
"metadata": {
"type": "object",
"required": ["name"],
"properties": {
# "name": {"type": "string", "minLength": 1}
},
},
"spec": {
"type": "object",
"required": ["accessModes", "storageClassName", "resources"],
"properties": {
# "storageClassName": {"type": "string", "minLength": 1},
"accessModes": {
"type": "array",
"items": {
"type": "string",
"enum": ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"],
},
},
"resources": {
"type": "object",
"required": ["requests"],
"properties": {
"requests": {
"type": "object",
"required": ["storage"],
"properties": {
"storage": {
"oneOf": [
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 0},
]
}
},
}
},
},
},
},
},
},
},
},
}
}
}
K8S_STATEFULSET_SCHEMA = update_nested_dict(K8S_DEPLOYMENT_SCHEMA, K8S_STATEFULSET_DIFF)
K8S_CONFIGMAP_SCHEMA = {
"type": "object",
"required": ["metadata", "data"],
"properties": {
"metadata": {
"type": "object",
"required": ["name"],
"properties": {"name": {"type": "string", "pattern": K8S_RES_NAME_PATTERN}},
},
"data": {
"type": "object",
"patternProperties": {KEY_NAME_PATTERN: {"type": "string"}},
"additionalProperties": False,
},
},
}
K8S_SECRET_SCHEMA = {
"type": "object",
"required": ["metadata", "data"],
"properties": {
"metadata": {
"type": "object",
"required": ["name"],
"properties": {"name": {"type": "string", "pattern": K8S_RES_NAME_PATTERN}},
},
"data": {
"type": "object",
"patternProperties": {KEY_NAME_PATTERN: {"type": "string"}},
"additionalProperties": False,
},
},
}
K8S_SERVICE_SCHEMA = {
"type": "object",
"required": ["metadata", "spec"],
"properties": {
"metadata": {"type": "object", "required": ["name"], "properties": {"name": {"type": "string"}}},
"spec": {
"type": "object",
"required": ["type", "clusterIP", "ports"],
"properties": {
"type": {"type": "string", "enum": ["ClusterIP", "NodePort"]},
"clusterIP": {"type": "string"},
"ports": {
"type": "array",
"items": {
"type": "object",
"required": ["port", "protocol"],
"properties": {
"name": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": PORT_NAME_PATTERN},
]
},
"port": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 1, "maximum": 65535},
]
},
"protocol": {"type": "string", "enum": ["TCP", "UDP"]},
"targetPort": {
"anyof": [
{"type": "number", "minimum": 1, "maximum": 65535},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "string", "minLength": 1},
]
},
"nodePort": {
"oneOf": [
{"type": "string", "pattern": "^$"},
{"type": "string", "pattern": NUM_VAR_PATTERN},
{"type": "number", "minimum": 30000, "maximum": 32767},
]
},
},
},
},
},
},
},
}
K8S_INGRESS_SCHEMA = {"type": "object", "required": ["metadata", "spec"]}
K8S_HPA_SCHNEA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "k8s_hpa",
"type": "object",
"required": ["apiVersion", "kind", "metadata", "spec"],
"properties": {
"apiVersion": {"type": "string", "enum": ["autoscaling/v2beta2"]},
"kind": {"type": "string", "enum": ["HorizontalPodAutoscaler"]},
"metadata": {
"type": "object",
"required": ["name"],
"properties": {"name": {"type": "string", "pattern": K8S_RES_NAME_PATTERN}},
},
"spec": {
"type": "object",
"required": ["scaleTargetRef", "minReplicas", "maxReplicas", "metrics"],
"properties": {
"scaleTargetRef": {
"type": "object",
"required": ["kind", "name"],
"properties": {
"kind": {"type": "string", "enum": ["Deployment"]},
"name": {"type": "string", "pattern": K8S_RES_NAME_PATTERN},
},
},
"minReplicas": {"type": "number", "minimum": 0},
"maxReplicas": {"type": "number", "minimum": 0},
"metrics": {
"type": "array",
"items": {
"type": "object",
"required": ["type", "resource"],
"properties": {
"type": {"type": "string", "enum": ["Resource"]},
"resource": {
"type": "object",
"required": ["name", "target"],
"properties": {
"name": {"type": "string", "enum": ["cpu", "memory"]},
"target": {
"type": "object",
"required": ["type", "averageUtilization"],
"properties": {
"type": {"type": "string", "enum": ["Utilization"]},
"averageUtilization": {"type": "number", "minimum": 0},
},
},
},
},
},
},
},
},
},
},
}
CONFIG_SCHEMA = [
K8S_DEPLOYMENT_SCHEMA,
K8S_DAEMONSET_SCHEMA,
K8S_JOB_SCHEMA,
K8S_STATEFULSET_SCHEMA,
K8S_SERVICE_SCHEMA,
K8S_CONFIGMAP_SCHEMA,
K8S_SECRET_SCHEMA,
K8S_INGRESS_SCHEMA,
K8S_HPA_SCHNEA,
]
CONFIG_SCHEMA_MAP = dict(zip(KRESOURCE_NAMES, CONFIG_SCHEMA))
|
modules/dbnd-airflow/test_dbnd_airflow/test_logging.py | ipattarapong/dbnd | 224 | 41299 | import logging
from logging.config import dictConfig
import dbnd
from dbnd.testing.helpers import run_dbnd_subprocess__with_home
from dbnd_airflow_contrib.dbnd_airflow_default_logger import DEFAULT_LOGGING_CONFIG
class TestDbndAirflowLogging(object):
def test_dbnd_airflow_logging_conifg(self):
# we implement it as a separte test, as we don't want to affect current logging system
dbnd_config = DEFAULT_LOGGING_CONFIG
assert dbnd_config
def test_can_be_loaded(self):
# we can't just load config, it will affect all future tests
output = run_dbnd_subprocess__with_home([__file__.replace(".pyc", ".py")])
assert "test_can_be_loaded OK" in output
logging.error("Done")
if __name__ == "__main__":
print(
dbnd.__version__
) # we need it first to import, before we import any airflow code
dbnd_config = DEFAULT_LOGGING_CONFIG
dictConfig(dbnd_config)
logging.info("test_can_be_loaded OK")
|
scrounger/modules/analysis/ios/app_transport_security.py | noraj/scrounger | 217 | 41316 | <filename>scrounger/modules/analysis/ios/app_transport_security.py
from scrounger.core.module import BaseModule
#helper functions
from scrounger.utils.ios import plist_dict_to_xml, plist
from scrounger.utils.config import Log
class Module(BaseModule):
meta = {
"author": "RDC",
"description": "Checks if there are any Application Transport Security \
misconfigurations",
"certainty": 90
}
options = [
{
"name": "info",
"description": "path to a local Info.plist file",
"required": True,
"default": None
}
]
_ats_key = "NSAppTransportSecurity"
_insecure_options = [
"NSAllowsArbitraryLoads",
"NSExceptionAllowsInsecureHTTPLoads",
"NSThirdPartyExceptionAllowsInsecureHTTPLoads"
]
def run(self):
result = {
"title": "Application Has Insecure ATS Configurations",
"details": "",
"severity": "Medium",
"report": False
}
info_content = plist(self.info)
Log.info("Parsing Info.plist file contents")
ats_xml = plist_dict_to_xml(info_content, self._ats_key)
Log.info("Analysing Info.plist file")
if self._ats_key not in info_content or not info_content[self._ats_key]:
result.update({
"report": True,
"details": "No evidence of ATS being implemented found."
})
if any(option in ats_xml for option in self._insecure_options):
result.update({
"report": True,
"details": "The following insecure ATS configuration was \
found : {}".format(ats_xml)
})
return {
"{}_result".format(self.name()): result
}
|
test/test_parsetron.py | clinc/parsetron | 123 | 41323 | <gh_stars>100-1000
from parsetron import * # NOQA
import re
import pytest
__author__ = '<NAME>'
class TestMul(object):
def test_mul(self):
s = String("t")('t')
# valid grammar:
class G(Grammar):
GOAL = s * 1
s_1 = RobustParser(G())
assert s_1.print_parse("t", strict_match=True)
assert False is s_1.print_parse("t t", strict_match=True)
class G(Grammar):
GOAL = s * (1, 2)
s_1_2 = RobustParser(G())
assert s_1_2.print_parse("t", strict_match=True)
assert s_1_2.print_parse("t t", strict_match=True)
assert False is s_1_2.print_parse("t t t", strict_match=True)
class G(Grammar):
GOAL = s * [1, ]
s_1_none = RobustParser(G())
assert s_1_none.print_parse("t", strict_match=True)
assert s_1_none.print_parse("t t", strict_match=True)
assert s_1_none.print_parse("t t t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [1, None]
s_1_none_a = RobustParser(G())
assert s_1_none_a.print_parse("t", strict_match=True)
assert s_1_none_a.print_parse("t t", strict_match=True)
assert s_1_none_a.print_parse("t t t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [3, ]
s_3_none = RobustParser(G())
assert False is s_3_none.print_parse("t", strict_match=True)
assert False is s_3_none.print_parse("t t", strict_match=True)
assert s_3_none.print_parse("t t t", strict_match=True)
assert s_3_none.print_parse("t t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [3, None]
s_3_none_a = RobustParser(G())
assert False is s_3_none_a.print_parse("t", strict_match=True)
assert False is s_3_none_a.print_parse("t t", strict_match=True)
assert s_3_none_a.print_parse("t t t", strict_match=True)
assert s_3_none_a.print_parse("t t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [None, 1]
s_none_1 = RobustParser(G())
assert s_none_1.print_parse("t", strict_match=True)
class G(Grammar):
GOAL = s * [None, 3]
s_none_3 = RobustParser(G())
assert s_none_3.print_parse("t", strict_match=True)
assert s_none_3.print_parse("t t", strict_match=True)
assert s_none_3.print_parse("t t t", strict_match=True)
assert False is s_none_3.print_parse("t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [0, 1]
s_0_1 = RobustParser(G())
assert s_0_1.print_parse("t", strict_match=True)
assert False is s_0_1.print_parse("a", strict_match=True)
class G(Grammar):
GOAL = s * [0, None]
s_0_1 = RobustParser(G())
assert s_0_1.print_parse("t", strict_match=True)
assert False is s_0_1.print_parse("a", strict_match=True)
class G(Grammar):
GOAL = s * [0, 1] + "b"
s_0_1 = RobustParser(G())
assert s_0_1.print_parse("t b", strict_match=True)
assert s_0_1.print_parse("b")
class G(Grammar):
GOAL = s * [0, 3]
s_0_3 = RobustParser(G())
assert s_0_3.print_parse("t", strict_match=True)
assert s_0_3.print_parse("t t", strict_match=True)
assert s_0_3.print_parse("t t t", strict_match=True)
assert False is s_0_3.print_parse("t t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [3, 5]
s_3_5 = RobustParser(G())
assert False is s_3_5.print_parse("t t", strict_match=True)
assert s_3_5.print_parse("t t t", strict_match=True)
assert s_3_5.print_parse("t t t t", strict_match=True)
assert s_3_5.print_parse("t t t t t", strict_match=True)
assert False is s_3_5.print_parse("t t t t t t", strict_match=True)
class G(Grammar):
GOAL = s * [3, 3]
s_3_3 = RobustParser(G())
assert False is s_3_3.print_parse("t t", strict_match=True)
assert s_3_3.print_parse("t t t", strict_match=True)
assert False is s_3_3.print_parse("t t t t", strict_match=True)
# invalid grammar:
with pytest.raises(ValueError):
s * [3, 2]
with pytest.raises(ValueError):
s * (3, 2)
with pytest.raises(ValueError):
s * 3.0
with pytest.raises(ValueError):
s * [3.0, 4]
with pytest.raises(ValueError):
s * (3, 4.5)
with pytest.raises(ValueError):
s * None
with pytest.raises(ValueError):
s * 0
with pytest.raises(ValueError):
s * -1
with pytest.raises(ValueError):
s * [-1, 3]
with pytest.raises(ValueError):
s * [-1, None]
with pytest.raises(ValueError):
s * [1, -1]
with pytest.raises(ValueError):
s * [None, -1]
with pytest.raises(ValueError):
s * [1, 2, 3]
class TestString(object):
def test_empty_init(self):
with pytest.raises(ValueError):
String("")
def test_parse(self):
s = StringCs("hello")
assert s.parse("hello")
with pytest.raises(ParseException):
s.parse("Hello")
with pytest.raises(ParseException):
s.parse("")
with pytest.raises(ParseException):
s.parse("helloo")
with pytest.raises(ParseException):
s.parse("hell")
class TestRegex(object):
def test_empty_init(self):
with pytest.raises(ValueError):
Regex("")
def test_equal(self):
assert Regex("ab") == "ab"
assert Regex("ab") == "AB"
assert Regex("ab") != "ac"
assert Regex("ab") != 1
def test_parse(self):
r = Regex(r"(ab|bc)")
repr(r)
assert r.parse("ab")
assert r.parse("bc")
with pytest.raises(ParseException):
assert r.parse("cd")
with pytest.raises(ParseException):
assert r.parse("abc")
r1 = Regex("ab", match_whole=True)
with pytest.raises(ParseException):
assert r1.parse("abc")
r2 = Regex(re.compile("ab"))
r2.parse("ab")
with pytest.raises(ValueError):
Regex(12)
r3 = Regex("ab", match_whole=False)
r3.parse("abc")
class TestSet(object):
def test_parse(self):
with pytest.raises(ValueError):
Set(123)
s1 = Set("a b c")
s2 = Set(["a", "b", "c"])
s3 = Set({"a", "b", "c"})
s4 = Set("abc")
for s in [s1, s2, s3, s4]:
s.parse("A")
s.parse("B")
s.parse("C")
with pytest.raises(ParseException):
s.parse("d")
def test_parse_casesensitive(self):
s1 = SetCs("a b c")
s2 = SetCs(["a", "b", "c"])
s3 = SetCs({"a", "b", "c"})
s4 = SetCs("abc")
for s in [s1, s2, s3, s4]:
s.parse("a")
s.parse("b")
s.parse("c")
with pytest.raises(ParseException):
s.parse("A")
class TestAnd(object):
def test_plus(self):
a = String("1") + Optional(String("1"))
assert len(a.exprs) == 2
a += Regex("3")
assert len(a.exprs) == 3
b = "1" + a
assert len(b.exprs) == 2
b += "3"
assert len(b.exprs) == 3
assert b[0].str == "1"
def test_streamline(self):
a = String("1") + Optional(String("1"))
b = String("1") + Optional(String("1"))
c = a + b
with pytest.raises(GrammarException):
c.parse("1 1")
class TestOr(object):
def test_or(self):
a = String("1") | Optional(String("1"))
assert len(a.exprs) == 2
a |= Regex("3")
assert len(a.exprs) == 3
a |= String("4") + Optional(String("4"))
assert len(a.exprs) == 4
a |= "3"
assert len(a.exprs) == 5
with pytest.raises(GrammarException):
print(a | 3.3)
class TestStr(object):
def test_str(self):
s = String("a string")
assert str(s) == "String(a string)"
o = Optional("a string")
assert str(o) == "Optional(String(a string))"
r = Regex(r"abc")('regex')
assert str(r) == 'regex'
r1 = r.set_name('xeger')
assert str(r1) == 'xeger'
class TestGrammar(object):
class LightGrammar(Grammar):
light_general = String("light")
action = Regex(r"(turn on|turn off|off|blink)")
times = Regex(r"(once|twice|three times)")
optional_times = Optional(times)
one_more_light = OneOrMore(light_general)
zero_more_action = ZeroOrMore(action)
GOAL = zero_more_action + optional_times + one_more_light + \
optional_times + OneOrMore(String("quickly")('quick'))
def test_constructor(self):
assert 2 == len(GrammarExpression(["1", "2"]).exprs)
assert 1 == len(GrammarExpression("2").exprs)
with pytest.raises(GrammarException):
GrammarExpression(1)
def test_grammar_str(self):
light = TestGrammar.LightGrammar()
repr(light)
assert light.name == "LightGrammar"
print(light)
parser = RobustParser(light)
parser_bu = RobustParser(light, BottomUpStrategy)
# TODO: this semantics is NOT tesing "grammar"
assert True == parser.print_parse("blink light light light quickly")
assert True == parser_bu.print_parse("blink light light light quickly")
assert True == parser.print_parse("light quickly")
assert True == parser_bu.print_parse("light quickly")
def test_reserved(self):
with pytest.raises(GrammarException):
class ReservedGrammar(Grammar):
_grammar_ = "reserved"
GOAL = _grammar_
ReservedGrammar()
def test_goal(self):
with pytest.raises(GrammarException):
class NoGoalGrammar(Grammar):
random = 'random'
NoGoalGrammar()
def test_test(self):
with pytest.raises(NotImplementedError):
TestGrammar.LightGrammar.test()
class TestZeroOrMore(object):
def test_or(self):
class OrGrammar(Grammar):
GOAL = "1" | ZeroOrMore("1")
p = RobustParser(OrGrammar())
p.parse("1 1")
class TestParser(object):
class LightGrammar(Grammar):
def times2int(r):
if r.get() == "once":
r.set(1)
elif r.get() == "twice":
r.set(2)
elif r.get() == "three times":
r.set(3)
light = String("light").ignore()
color = String("red").replace_result_with((255, 0, 0))
action = Regex(r"(turn on|turn off|off|blink)")
times = Regex(r"(once|twice|three times)").set_result_action(times2int)
GOAL = action + Optional(color) + light + ZeroOrMore(times) + \
String("quickly")('quick')
light = LightGrammar()
parser = RobustParser(light)
test_str = "blink red light once quickly ignore"
def test_parse(self):
parser = TestParser.parser
test_str = TestParser.test_str
assert True == parser.print_parse(test_str, print_json=True)
assert True == parser.print_parse(test_str, best_parse=False)
assert True == parser.print_parse(
test_str, best_parse=False, print_json=True)
assert (None, None) == parser.parse("can't parse")
assert (None, None) == parser.parse_string("can't parse")
t, r = parser.parse(test_str)
# test result
assert r.times == [1]
assert r.color == (255, 0, 0)
print(repr(r)) # test __repr__()
assert 'quickly' in r.values()
assert 'quick' in r.keys()
del r['quick']
assert 'quick' not in r.keys()
r.quick = 'quickly'
assert 'quickly' in r.values()
del r.quick
assert 'quick' not in r
r['quick'] = 'quickly'
assert 'quickly' == r.get('quick')
assert 'light' not in r
# test tree
d = t.get_flat_dict(key='GOAL', only_leaf=True)
assert 'action' in d[0]
d = t.get_flat_dict(key='GOAL', only_leaf=False)
assert 'action' in d[0]
d = t.get_flat_dict(key='action', only_leaf=False)
assert 'action' in d[0]
TreeNode.recursive_str_verbose(t)
with pytest.raises(ParseException):
parser.parse("")
with pytest.raises(ValueError):
parser._parse_multi_token("")
_, tokens = parser._parse_multi_token("can't parse")
assert len(tokens) == 0
with pytest.raises(TypeError):
parser._parse_multi_token(1)
def test_incremental_parse(self):
parser = TestParser.parser
test_str = TestParser.test_str
parser.print_incremental_parse(test_str)
assert (None, None) == parser.incremental_parse('blink', False, is_first=True)
assert (None, None) == parser.incremental_parse('light', False)
t, r = parser.incremental_parse('quickly', is_final=True)
assert t is not None
assert (None, None) == parser.incremental_parse('light', is_final=True)
parser.clear_cache()
def test_num_edges(self):
class BadRule(ChartRule):
NUM_EDGES = 2
with pytest.raises(ValueError):
ParsingStrategy([BadRule])
with pytest.raises(NotImplementedError):
BadRule().apply()
# def test_full_trees(self):
# class CatalanGrammar(Grammar):
# a = String("a")
# A = NULL | a
# A |= A + A
# GOAL = A
# p = RobustParser(CatalanGrammar(), TopDownStrategy)
# chart, tokens = p.parse_to_chart("a a a a")
# trees = list(chart.trees(tokens,
# all_trees=True,
# goal=CatalanGrammar.GOAL))
# assert len(trees) == 5
def test_full_trees(self):
class FullGrammar(Grammar):
a = String("a")
b = String("a")
GOAL = a + b | b + a | a + a | b + b
p = RobustParser(FullGrammar(), TopDownStrategy)
chart, tokens = p.parse_to_chart("a a")
trees = list(chart.trees(tokens,
all_trees=True,
goal=FullGrammar.GOAL))
print(chart) # test chart __str__()
assert len(trees) == 4
def test_lex_span(self):
parser = TestParser.parser
test_str = "please turn off the light once twice quickly"
_ = " 0 1 2 3 4 5 6 7"
tree, result = parser.parse(test_str)
# turn off
assert parser.chart.get_lexical_span(0) == (1, 3)
# light
assert parser.chart.get_lexical_span(1) == (4, 5)
# once
assert parser.chart.get_lexical_span(2) == (5, 6)
# twice
assert parser.chart.get_lexical_span(3) == (6, 7)
# quickly
assert parser.chart.get_lexical_span(4) == (7, 8)
# turn off (the) light
assert parser.chart.get_lexical_span(0, 2) == (1, 5)
assert result.action == "turn off"
assert result.times == [1, 2]
assert result.lex_span() == (1, 8)
assert result.lex_span('action') == (1, 3)
assert result.lex_span('times') == [(5, 6), (6, 7)]
assert result.lex_span('quick') == (7, 8)
class TestHierarchicalParser(object):
class LightGrammar(Grammar):
light = String("light").ignore()
color = String("red").replace_result_with((255, 0, 0))
action = Regex(r"(turn on|turn off|off|blink)")
times = Regex(r"(once|twice|three times)")
one_parse = action + Optional(color) + light + ZeroOrMore(times)
GOAL = OneOrMore(one_parse)
light = LightGrammar()
parser = RobustParser(light)
test_str = "blink the red light once turn off red the light twice"
_ = " 0 1 2 3 4 5 6 7 8 9 10 "
def test_parse(self):
parser = TestHierarchicalParser.parser
test_str = TestHierarchicalParser.test_str
tree, result = parser.parse(test_str)
assert len(result.one_parse) == 2
assert result.lex_span() == (0, 11)
assert result.one_parse[0].lex_span() == (0, 5)
assert result.one_parse[1].lex_span() == (5, 11)
assert result.one_parse[1].lex_span('action') == (5, 7)
def test_topdown_init_rule():
class CornerGrammar(Grammar):
GOAL = String("a") + String("b")
p = RobustParser(CornerGrammar(), TopDownStrategy)
assert (None, None) == p.parse("b")
t, _ = p.parse("a b")
assert t is not None
assert (None, None) == p.parse("b a")
class TestOptional(object):
def test_o2(self):
class OptionalGrammar(Grammar):
s = String("t")('t')
o1 = Optional(s)
o2 = Optional(o1)
GOAL = s + o2
parser = RobustParser(OptionalGrammar(), strategy=TopDownStrategy)
assert True == parser.print_parse("t t")
assert True == parser.print_parse("t")
OptionalGrammar.o2.parse("t")
def test_o3(self):
class OptionalGrammar(Grammar):
s = String("t")('t')
o3 = Optional(Optional(s))
GOAL = s + o3
parser = RobustParser(OptionalGrammar(), strategy=BottomUpStrategy)
assert True == parser.print_parse("t t")
assert True == parser.print_parse("t")
class TestNullAnd(object):
def test_o2(self):
class OptionalGrammar(Grammar):
s = String("t")('t')
o1 = Optional(s)
o2 = ZeroOrMore(s)
o3 = o1 + o2
GOAL = s + o3
parser = RobustParser(OptionalGrammar(), strategy=TopDownStrategy)
assert True == parser.print_parse("t t")
# known bug, FIXME
assert False == parser.print_parse("t")
class TestDocGrammar(object):
def test_o2(self):
class LightGrammar(Grammar):
action = Set(['change', 'flash', 'set', 'blink'])
light = Set(['top', 'middle', 'bottom'])
color = Regex(r'(red|yellow|blue|orange|purple|...)')
times = Set(['once', 'twice', 'three times']) | Regex(r'\d+ times')
one_parse = action + light + Optional(times) + color
GOAL = OneOrMore(one_parse)
parser = RobustParser(LightGrammar(), strategy=TopDownStrategy)
# assert parser.print_parse("set my top light to red")
# assert parser.print_parse("set my top light to red and change "
# "middle light to yellow")
# assert parser.print_parse("set my top light to red and change "
# "middle light to yellow and flash bottom light twice in blue")
sents = [
"set my top light to red",
"set my top light to red and change middle light to yellow",
"set my top light to red and change middle light to yellow and "
"flash bottom light twice in blue"
]
for sent in sents:
tree, result = parser.parse_string(sent)
print('"%s"' % sent)
print("parse tree:")
print(tree)
print("parse result:")
print(result)
assert type(result.one_parse) is list
print()
def test_find_word_boundaries():
boundaries, starts, ends = find_word_boundaries(strip_string(
"my lights are off"))
assert boundaries == [(0, 2), (3, 9), (10, 13), (14, 17)]
assert [0, 3, 10, 14] == sorted(list(starts))
assert [2, 9, 13, 17] == sorted(list(ends))
boundaries, starts, ends = find_word_boundaries(strip_string(""))
assert len(boundaries) == 0
assert len(starts) == 0
assert len(ends) == 0
|
combo/blm/basis/__init__.py | zhangkunliang/BayesOptimization | 139 | 41334 | <reponame>zhangkunliang/BayesOptimization
from fourier import fourier
|
Chapter 06/code/prime.py | shivampotdar/Artificial-Intelligence-with-Python | 387 | 41352 | <gh_stars>100-1000
import itertools as it
import logpy.core as lc
from sympy.ntheory.generate import prime, isprime
# Check if the elements of x are prime
def check_prime(x):
if lc.isvar(x):
return lc.condeseq([(lc.eq, x, p)] for p in map(prime, it.count(1)))
else:
return lc.success if isprime(x) else lc.fail
# Declate the variable
x = lc.var()
# Check if an element in the list is a prime number
list_nums = (23, 4, 27, 17, 13, 10, 21, 29, 3, 32, 11, 19)
print('\nList of primes in the list:')
print(set(lc.run(0, x, (lc.membero, x, list_nums), (check_prime, x))))
# Print first 7 prime numbers
print('\nList of first 7 prime numbers:')
print(lc.run(7, x, check_prime(x)))
|
transpyle/general/misc.py | EraYaN/transpyle | 107 | 41361 | <gh_stars>100-1000
"""Various utility functions."""
import ast
import collections.abc
import typing as t
import typed_ast.ast3 as typed_ast3
def dict_mirror(dict_: dict):
return {value: key for key, value in dict_.items() if value is not None}
def flatten_sequence(sequence: t.MutableSequence[t.Any]) -> None:
"""Transform a given list of lists of lists (...) of lists into a flat list in-place."""
assert isinstance(sequence, collections.abc.MutableSequence), type(sequence)
for i, elem in enumerate(sequence):
if isinstance(elem, collections.abc.MutableSequence):
flatten_sequence(elem)
for value in reversed(elem):
sequence.insert(i, value)
del sequence[i + len(elem)]
def make_flatten_syntax(ast_module):
def flatten_syntax(syntax: t.Union[ast_module.AST, t.MutableSequence[t.Any]]) -> None:
"""Flatten all lists of lists within the given syntax in-place."""
if isinstance(syntax, (ast_module.Module, ast_module.FunctionDef, ast_module.ClassDef,
ast_module.For, ast_module.While, ast_module.If, ast_module.With,
ast_module.Try, ast_module.ExceptHandler,
ast_module.AsyncFunctionDef, ast_module.AsyncFor,
ast_module.AsyncWith)):
for node in syntax.body:
flatten_syntax(node)
flatten_sequence(syntax.body)
return
if isinstance(syntax, (ast_module.For, ast_module.While, ast_module.If, ast_module.Try,
ast_module.AsyncFor)):
for node in syntax.orelse:
flatten_syntax(node)
flatten_sequence(syntax.orelse)
return
if isinstance(syntax, ast_module.Try):
for node in syntax.handlers:
flatten_syntax(node)
# flatten_sequence(syntax.handlers) # unnecessary
for node in syntax.finalbody:
flatten_syntax(node)
flatten_sequence(syntax.finalbody)
return
if not isinstance(syntax, collections.abc.MutableSequence):
return
for node in syntax:
flatten_syntax(node)
flatten_sequence(syntax)
return flatten_syntax
flatten_syntax = {ast_module: make_flatten_syntax(ast_module) for ast_module in (ast, typed_ast3)}
|
alipay/aop/api/domain/EntityBasicInfo.py | antopen/alipay-sdk-python-all | 213 | 41366 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class EntityBasicInfo(object):
def __init__(self):
self._address_desc = None
self._category_code = None
self._city = None
self._contact_number = None
self._entity_code = None
self._entity_name = None
self._latitude = None
self._longitude = None
self._office_hours_desc = None
self._open_day = None
self._province = None
self._rent_free_time = None
self._rent_max_price = None
self._rent_price = None
self._rent_price_unit = None
self._rent_price_unit_cnt = None
self._suburb = None
@property
def address_desc(self):
return self._address_desc
@address_desc.setter
def address_desc(self, value):
self._address_desc = value
@property
def category_code(self):
return self._category_code
@category_code.setter
def category_code(self, value):
self._category_code = value
@property
def city(self):
return self._city
@city.setter
def city(self, value):
self._city = value
@property
def contact_number(self):
return self._contact_number
@contact_number.setter
def contact_number(self, value):
self._contact_number = value
@property
def entity_code(self):
return self._entity_code
@entity_code.setter
def entity_code(self, value):
self._entity_code = value
@property
def entity_name(self):
return self._entity_name
@entity_name.setter
def entity_name(self, value):
self._entity_name = value
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def office_hours_desc(self):
return self._office_hours_desc
@office_hours_desc.setter
def office_hours_desc(self, value):
self._office_hours_desc = value
@property
def open_day(self):
return self._open_day
@open_day.setter
def open_day(self, value):
if isinstance(value, list):
self._open_day = list()
for i in value:
self._open_day.append(i)
@property
def province(self):
return self._province
@province.setter
def province(self, value):
self._province = value
@property
def rent_free_time(self):
return self._rent_free_time
@rent_free_time.setter
def rent_free_time(self, value):
self._rent_free_time = value
@property
def rent_max_price(self):
return self._rent_max_price
@rent_max_price.setter
def rent_max_price(self, value):
self._rent_max_price = value
@property
def rent_price(self):
return self._rent_price
@rent_price.setter
def rent_price(self, value):
self._rent_price = value
@property
def rent_price_unit(self):
return self._rent_price_unit
@rent_price_unit.setter
def rent_price_unit(self, value):
self._rent_price_unit = value
@property
def rent_price_unit_cnt(self):
return self._rent_price_unit_cnt
@rent_price_unit_cnt.setter
def rent_price_unit_cnt(self, value):
self._rent_price_unit_cnt = value
@property
def suburb(self):
return self._suburb
@suburb.setter
def suburb(self, value):
self._suburb = value
def to_alipay_dict(self):
params = dict()
if self.address_desc:
if hasattr(self.address_desc, 'to_alipay_dict'):
params['address_desc'] = self.address_desc.to_alipay_dict()
else:
params['address_desc'] = self.address_desc
if self.category_code:
if hasattr(self.category_code, 'to_alipay_dict'):
params['category_code'] = self.category_code.to_alipay_dict()
else:
params['category_code'] = self.category_code
if self.city:
if hasattr(self.city, 'to_alipay_dict'):
params['city'] = self.city.to_alipay_dict()
else:
params['city'] = self.city
if self.contact_number:
if hasattr(self.contact_number, 'to_alipay_dict'):
params['contact_number'] = self.contact_number.to_alipay_dict()
else:
params['contact_number'] = self.contact_number
if self.entity_code:
if hasattr(self.entity_code, 'to_alipay_dict'):
params['entity_code'] = self.entity_code.to_alipay_dict()
else:
params['entity_code'] = self.entity_code
if self.entity_name:
if hasattr(self.entity_name, 'to_alipay_dict'):
params['entity_name'] = self.entity_name.to_alipay_dict()
else:
params['entity_name'] = self.entity_name
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.office_hours_desc:
if hasattr(self.office_hours_desc, 'to_alipay_dict'):
params['office_hours_desc'] = self.office_hours_desc.to_alipay_dict()
else:
params['office_hours_desc'] = self.office_hours_desc
if self.open_day:
if isinstance(self.open_day, list):
for i in range(0, len(self.open_day)):
element = self.open_day[i]
if hasattr(element, 'to_alipay_dict'):
self.open_day[i] = element.to_alipay_dict()
if hasattr(self.open_day, 'to_alipay_dict'):
params['open_day'] = self.open_day.to_alipay_dict()
else:
params['open_day'] = self.open_day
if self.province:
if hasattr(self.province, 'to_alipay_dict'):
params['province'] = self.province.to_alipay_dict()
else:
params['province'] = self.province
if self.rent_free_time:
if hasattr(self.rent_free_time, 'to_alipay_dict'):
params['rent_free_time'] = self.rent_free_time.to_alipay_dict()
else:
params['rent_free_time'] = self.rent_free_time
if self.rent_max_price:
if hasattr(self.rent_max_price, 'to_alipay_dict'):
params['rent_max_price'] = self.rent_max_price.to_alipay_dict()
else:
params['rent_max_price'] = self.rent_max_price
if self.rent_price:
if hasattr(self.rent_price, 'to_alipay_dict'):
params['rent_price'] = self.rent_price.to_alipay_dict()
else:
params['rent_price'] = self.rent_price
if self.rent_price_unit:
if hasattr(self.rent_price_unit, 'to_alipay_dict'):
params['rent_price_unit'] = self.rent_price_unit.to_alipay_dict()
else:
params['rent_price_unit'] = self.rent_price_unit
if self.rent_price_unit_cnt:
if hasattr(self.rent_price_unit_cnt, 'to_alipay_dict'):
params['rent_price_unit_cnt'] = self.rent_price_unit_cnt.to_alipay_dict()
else:
params['rent_price_unit_cnt'] = self.rent_price_unit_cnt
if self.suburb:
if hasattr(self.suburb, 'to_alipay_dict'):
params['suburb'] = self.suburb.to_alipay_dict()
else:
params['suburb'] = self.suburb
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EntityBasicInfo()
if 'address_desc' in d:
o.address_desc = d['address_desc']
if 'category_code' in d:
o.category_code = d['category_code']
if 'city' in d:
o.city = d['city']
if 'contact_number' in d:
o.contact_number = d['contact_number']
if 'entity_code' in d:
o.entity_code = d['entity_code']
if 'entity_name' in d:
o.entity_name = d['entity_name']
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'office_hours_desc' in d:
o.office_hours_desc = d['office_hours_desc']
if 'open_day' in d:
o.open_day = d['open_day']
if 'province' in d:
o.province = d['province']
if 'rent_free_time' in d:
o.rent_free_time = d['rent_free_time']
if 'rent_max_price' in d:
o.rent_max_price = d['rent_max_price']
if 'rent_price' in d:
o.rent_price = d['rent_price']
if 'rent_price_unit' in d:
o.rent_price_unit = d['rent_price_unit']
if 'rent_price_unit_cnt' in d:
o.rent_price_unit_cnt = d['rent_price_unit_cnt']
if 'suburb' in d:
o.suburb = d['suburb']
return o
|
old/eval_scripts/evaluate_model.py | konatasick/face-of-art | 220 | 41385 | <filename>old/eval_scripts/evaluate_model.py
from evaluation_functions import *
flags = tf.app.flags
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
model_path = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/tests/primary/old/no_flip/basic/' \
'tests_lr_primary_basic_no_flip/0.01/model/deep_heatmaps-80000'
# define paths
flags.DEFINE_string('img_dir', data_dir, 'data directory')
flags.DEFINE_string('test_data', 'test', 'test set to use full/common/challenging/test/art')
flags.DEFINE_string('model_path', model_path, 'model path')
# parameters used to train network
flags.DEFINE_string('network_type', 'Primary', 'network architecture Fusion/Primary')
flags.DEFINE_integer('image_size', 256, 'image size')
flags.DEFINE_integer('c_dim', 3, 'color channels')
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
# choose batch size and debug data size
flags.DEFINE_integer('batch_size', 2, 'batch size')
flags.DEFINE_bool('debug', True, 'run in debug mode - use subset of the data')
flags.DEFINE_integer('debug_data_size', 4, 'subset data size to test in debug mode')
# statistics parameters
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'directory for saving NME statistics')
FLAGS = flags.FLAGS
def main(_):
# create directories if not exist
if not tf.gfile.Exists(FLAGS.log_path):
tf.gfile.MakeDirs(FLAGS.log_path)
err = evaluate_heatmap_network(
model_path=FLAGS.model_path, network_type=FLAGS.network_type, img_path=FLAGS.img_dir,
test_data=FLAGS.test_data, batch_size=FLAGS.batch_size, image_size=FLAGS.image_size, margin=FLAGS.margin,
bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale, num_landmarks=FLAGS.num_landmarks,
debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
print_nme_statistics(
errors=err, model_path=FLAGS.model_path, network_type=FLAGS.network_type, test_data=FLAGS.test_data,
max_error=FLAGS.max_error, save_log=FLAGS.save_log, log_path=FLAGS.log_path)
if __name__ == '__main__':
tf.app.run()
|
pywick/models/segmentation/testnets/drnet/__init__.py | achaiah/pywick | 408 | 41402 | from .drnet import DRNet |
src/logos.py | NitikaGupta16/logohunter | 128 | 41415 | <filename>src/logos.py
import cv2
import numpy as np
import os
from PIL import Image
from timeit import default_timer as timer
import utils
from utils import contents_of_bbox, features_from_image
from similarity import load_brands_compute_cutoffs, similar_matches, similarity_cutoff, draw_matches
def detect_logo(yolo, img_path, save_img, save_img_path='./', postfix=''):
"""
Call YOLO logo detector on input image, optionally save resulting image.
Args:
yolo: keras-yolo3 initialized YOLO instance
img_path: path to image file
save_img: bool to save annotated image
save_img_path: path to directory where to save image
postfix: string to add to filenames
Returns:
prediction: list of bounding boxes in format (xmin,ymin,xmax,ymax,class_id,confidence)
image: unaltered input image as (H,W,C) array
"""
try:
image = Image.open(img_path)
if image.mode != "RGB":
image = image.convert("RGB")
image_array = np.array(image)
except:
print('File Open Error! Try again!')
return None, None
prediction, new_image = yolo.detect_image(image)
img_out = postfix.join(os.path.splitext(os.path.basename(img_path)))
if save_img:
new_image.save(os.path.join(save_img_path, img_out))
return prediction, image_array
def match_logo(img_test, prediction, model_preproc, outtxt, input_features_cdf_cutoff_labels,
save_img, save_img_path='./', timing=False):
"""
Given an a path to an image and a list of predicted bounding boxes,
extract features and check each against input brand features. Declare
a match if the cosine similarity is smaller than an input-dependent
cutoff. Draw and annotate resulting boxes on image.
Args:
img_test: input image
prediction: bounding box candidates
model_preproc: (model, preprocess) tuple of the feature extractor model
and the preprocessing function to be applied to image before the model
input_features_cdf_cutoff_labels = (feat_input, sim_cutoff, bins, cdf_list, input_labels)
tuple of lists related to input brand, giving pre-computed features,
similarity cutoffs, cumulative similarity distribution and relative bins
specifications, and labels to be drawn when matches are found.
save_img: bool flag to save annotated image
save_img_path: path to directory where to save image
timing: bool flag to output timing information for each step, make plot
Returns:
outtxt: one line detailing input file path and resulting matched bounding
boxes, space-separated in format
(xmin,ymin,xmax,ymax,class_label,logo_confidence,similarity_percentile)
timing: timing for each step of the pipeline, namely image read, logog candidate
extraction, feature computation, matching to input brands
(optional, only if timing=True)
"""
start = timer()
model, my_preprocess = model_preproc
feat_input, sim_cutoff, bins, cdf_list, input_labels = input_features_cdf_cutoff_labels
# from PIL image to np array
#img_test = np.array(image)
# img_test = cv2.imread(img_path) # could be removed by passing previous PIL image
t_read = timer()-start
candidates, i_candidates_too_small = contents_of_bbox(img_test, prediction)
# filter predicted bboxes to discard small logos
prediction = [ pred for i, pred in enumerate(prediction) if i not in i_candidates_too_small]
t_box = timer()-start
features_cand = features_from_image(candidates, model, my_preprocess)
t_feat = timer()-start
matches, cos_sim = similar_matches(feat_input, features_cand, sim_cutoff, bins, cdf_list)
t_match = timer()-start
img_path = outtxt
for idx in matches:
bb = prediction[idx]
label = input_labels[matches[idx][0]]
print('Logo #{} - {} {} - classified as {} {:.2f}'.format(idx,
tuple(bb[:2]), tuple(bb[2:4]), label, matches[idx][1]))
outtxt += ' {},{},{},{},{},{:.2f},{:.3f}'.format(*bb[:4], label,bb[-1], matches[idx][1])
outtxt += '\n'
new_img = draw_matches(img_test, input_labels, prediction, matches)
t_draw = timer()-start
if save_img == True:
save_img_path = os.path.abspath(save_img_path)
saved = Image.fromarray(new_img).save(os.path.join(save_img_path, os.path.basename(img_path)))
# save with opencv, remember to flip RGB->BGR
# saved = cv2.imwrite(os.path.join(save_img_path, os.path.basename(img_path)), new_img[...,::-1])
t_save = timer()-start
if timing:
return outtxt, (t_read, t_box-t_read, t_feat-t_box, t_match-t_feat,
t_draw-t_match, t_save-t_draw)
return outtxt
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open video")
video_FourCC = cv2.VideoWriter_fourcc(*'mp4v') #int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print(output_path, video_FourCC, video_fps, video_size)
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while vid.isOpened():
return_value, frame = vid.read()
if not return_value:
break
# opencv images are BGR, translate to RGB
frame = frame[:,:,::-1]
image = Image.fromarray(frame)
out_pred, image = yolo.detect_image(image)
result = np.asarray(image)
if isOutput:
out.write(result[:,:,::-1])
vid.release()
out.release()
yolo.close_session()
|
RecoLocalCalo/Configuration/python/ecalLocalRecoSequenceCosmics_cff.py | Purva-Chaudhari/cmssw | 852 | 41439 | <reponame>Purva-Chaudhari/cmssw
import FWCore.ParameterSet.Config as cms
# Calo geometry service model
#
# removed by tommaso
#
#ECAL conditions
# include "CalibCalorimetry/EcalTrivialCondModules/data/EcalTrivialCondRetriever.cfi"
#
#TPG condition needed by ecalRecHit producer if TT recovery is ON
from RecoLocalCalo.EcalRecProducers.ecalRecHitTPGConditions_cff import *
#ECAL reconstruction
from RecoLocalCalo.EcalRecProducers.ecalWeightUncalibRecHit_cfi import *
from RecoLocalCalo.EcalRecProducers.ecalFixedAlphaBetaFitUncalibRecHit_cfi import *
from RecoLocalCalo.EcalRecProducers.ecalRecHit_cff import *
ecalRecHit.cpu.EBuncalibRecHitCollection = 'ecalFixedAlphaBetaFitUncalibRecHit:EcalUncalibRecHitsEB'
ecalRecHit.cpu.EEuncalibRecHitCollection = 'ecalFixedAlphaBetaFitUncalibRecHit:EcalUncalibRecHitsEE'
ecalRecHit.cpu.ChannelStatusToBeExcluded = [
'kDAC',
'kNoLaser',
'kNoisy',
'kNNoisy',
'kFixedG6',
'kFixedG1',
'kFixedG0',
'kNonRespondingIsolated',
'kDeadVFE',
'kDeadFE',
'kNoDataNoTP'
]
from RecoLocalCalo.EcalRecProducers.ecalPreshowerRecHit_cfi import *
from RecoLocalCalo.EcalRecProducers.ecalDetIdToBeRecovered_cfi import *
ecalLocalRecoTaskCosmics = cms.Task(
ecalFixedAlphaBetaFitUncalibRecHit,
ecalWeightUncalibRecHit,
ecalDetIdToBeRecovered,
ecalCalibratedRecHitTask,
ecalPreshowerRecHit
)
ecalLocalRecoSequenceCosmics = cms.Sequence(ecalLocalRecoTaskCosmics)
|
onnxruntime/test/python/quantization/test_op_relu.py | jamill/onnxruntime | 669 | 41446 | #!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import numpy as np
import onnx
from onnx import TensorProto, helper
from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type
from onnxruntime.quantization import QuantFormat, QuantType, quantize_static
class TestOpRelu(unittest.TestCase):
def input_feeds(self, n, name2shape):
input_data_list = []
for i in range(n):
inputs = {}
for name, shape in name2shape.items():
inputs.update({name: np.random.randint(-1, 2, shape).astype(np.float32)})
input_data_list.extend([inputs])
dr = TestDataFeeds(input_data_list)
return dr
def construct_model_gemm(self, output_model_path):
# (input)
# |
# Gemm
# |
# Relu
# |
# Gemm
# |
# (output)
input_name = "input"
output_name = "output"
initializers = []
def make_gemm(input_name, weight_shape, weight_name, bias_shape, bias_name, output_name):
weight_data = np.random.normal(0, 0.1, weight_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(weight_data, name=weight_name))
bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(bias_data, name=bias_name))
return onnx.helper.make_node(
"Gemm",
[input_name, weight_name, bias_name],
[output_name],
alpha=1.0,
beta=1.0,
transB=1,
)
# make gemm1 node
gemm1_output_name = "gemm1_output"
gemm1_node = make_gemm(
input_name,
[100, 10],
"linear1.weight",
[100],
"linear1.bias",
gemm1_output_name,
)
# make Relu
relu_output = "relu_output"
relu_node = onnx.helper.make_node("Relu", [gemm1_output_name], [relu_output])
# make gemm2 node
gemm2_node = make_gemm(
relu_output,
[10, 100],
"linear2.weight",
[10],
"linear2.bias",
output_name,
)
# make graph
input_tensor = helper.make_tensor_value_info(input_name, TensorProto.FLOAT, [-1, 10])
output_tensor = helper.make_tensor_value_info(output_name, TensorProto.FLOAT, [-1, 10])
graph_name = "relu_test"
graph = helper.make_graph(
[gemm1_node, relu_node, gemm2_node],
graph_name,
[input_tensor],
[output_tensor],
initializer=initializers,
)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
model.ir_version = onnx.IR_VERSION
onnx.save(model, output_model_path)
def static_quant_test(
self,
model_fp32_path,
data_reader,
activation_type,
weight_type,
extra_options={},
):
activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8
activation_type_str = "u8" if (activation_type == QuantType.QUInt8) else "s8"
weight_type_str = "u8" if (weight_type == QuantType.QUInt8) else "s8"
model_int8_path = "relu_fp32.quant_{}{}.onnx".format(activation_type_str, weight_type_str)
data_reader.rewind()
quantize_static(
model_fp32_path,
model_int8_path,
data_reader,
quant_format=QuantFormat.QOperator,
activation_type=activation_type,
weight_type=weight_type,
extra_options=extra_options,
)
qdq_count = 1 if activation_type == QuantType.QUInt8 else 2
relu_count = 0 if activation_type == QuantType.QUInt8 else 1
quant_nodes = {"QGemm": 2, "QuantizeLinear": qdq_count, "DequantizeLinear": qdq_count, "Relu": relu_count}
check_op_type_count(self, model_int8_path, **quant_nodes)
qnode_io_qtypes = {
"QuantizeLinear": [
["i", 2, activation_proto_qtype],
["o", 0, activation_proto_qtype],
]
}
qnode_io_qtypes.update({"DequantizeLinear": [["i", 2, activation_proto_qtype]]})
check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())
def static_quant_test_qdq(
self,
model_fp32_path,
data_reader,
activation_type,
weight_type,
extra_options={},
):
activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8
activation_type_str = "u8" if (activation_type == QuantType.QUInt8) else "s8"
weight_type_str = "u8" if (weight_type == QuantType.QUInt8) else "s8"
model_int8_path = "relu_fp32.quant_dqd_{}{}.onnx".format(activation_type_str, weight_type_str)
data_reader.rewind()
quantize_static(
model_fp32_path,
model_int8_path,
data_reader,
quant_format=QuantFormat.QDQ,
activation_type=activation_type,
weight_type=weight_type,
extra_options=extra_options,
)
relu_count = 0 if activation_type == QuantType.QUInt8 else 1
q_count = 3 if activation_type == QuantType.QUInt8 else 4
dq_count = 7 if activation_type == QuantType.QUInt8 else 8
quant_nodes = {"Gemm": 2, "QuantizeLinear": q_count, "DequantizeLinear": dq_count, "Relu": relu_count}
check_op_type_count(self, model_int8_path, **quant_nodes)
qnode_io_qtypes = {
"QuantizeLinear": [
["i", 2, activation_proto_qtype],
["o", 0, activation_proto_qtype],
]
}
check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())
def test_quantize_gemm(self):
np.random.seed(1)
model_fp32_path = "relu_fp32.onnx"
self.construct_model_gemm(model_fp32_path)
data_reader = self.input_feeds(1, {"input": [5, 10]})
self.static_quant_test(
model_fp32_path,
data_reader,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QUInt8,
)
self.static_quant_test_qdq(
model_fp32_path,
data_reader,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QUInt8,
)
def test_quantize_relu_s8s8(self):
np.random.seed(1)
model_fp32_path = "relu_fp32.onnx"
self.construct_model_gemm(model_fp32_path)
data_reader = self.input_feeds(1, {"input": [5, 10]})
self.static_quant_test(
model_fp32_path,
data_reader,
activation_type=QuantType.QInt8,
weight_type=QuantType.QInt8,
extra_options={"ActivationSymmetric": True},
)
self.static_quant_test_qdq(
model_fp32_path,
data_reader,
activation_type=QuantType.QInt8,
weight_type=QuantType.QInt8,
extra_options={"ActivationSymmetric": True},
)
if __name__ == "__main__":
unittest.main()
|
tests/config/test_structure.py | gcollard/lightbus | 178 | 41452 | <filename>tests/config/test_structure.py
import pytest
from lightbus.config.structure import make_transport_selector_structure, ApiConfig, RootConfig
pytestmark = pytest.mark.unit
def test_make_transport_config_structure():
EventTransportSelector = make_transport_selector_structure("event")
assert "redis" in EventTransportSelector.__annotations__
def test_make_api_config_structure():
assert "event_transport" in ApiConfig.__annotations__
assert "rpc_transport" in ApiConfig.__annotations__
assert "result_transport" in ApiConfig.__annotations__
assert "validate" in ApiConfig.__annotations__
def test_root_config_service_name():
service_name = RootConfig().service_name
assert service_name
assert type(service_name) == str
assert len(service_name) > 3
# No format parameters in there, should have been formatted upon instantiation
assert "{" not in service_name
def test_root_config_process_name():
process_name = RootConfig().process_name
assert process_name
assert type(process_name) == str
assert len(process_name) > 3
# No format parameters in there, should have been formatted upon instantiation
assert "{" not in process_name
|
mathics/builtin/files_io/__init__.py | skirpichev/Mathics | 1,920 | 41454 | <filename>mathics/builtin/files_io/__init__.py
"""
Input/Output, Files, and Filesystem
"""
from mathics.version import __version__ # noqa used in loading to check consistency.
|
SimGeneral/MixingModule/python/pileupVtxDigitizer_cfi.py | ckamtsikis/cmssw | 852 | 41459 | import FWCore.ParameterSet.Config as cms
pileupVtxDigitizer = cms.PSet(
accumulatorType = cms.string("PileupVertexAccumulator"),
hitsProducer = cms.string('generator'),
vtxTag = cms.InputTag("generatorSmeared"),
vtxFallbackTag = cms.InputTag("generator"),
makeDigiSimLinks = cms.untracked.bool(False),
saveVtxTimes = cms.bool(False))
from Configuration.Eras.Modifier_phase2_timing_cff import phase2_timing
phase2_timing.toModify( pileupVtxDigitizer, saveVtxTimes = cms.bool(True) )
|
basisnet/personalization/centralized_so_nwp/stackoverflow_basis_models.py | xxdreck/google-research | 23,901 | 41461 | <reponame>xxdreck/google-research<filename>basisnet/personalization/centralized_so_nwp/stackoverflow_basis_models.py<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RNN models of BasisNet with LSTM cells.
It implements a RNN wrapper with specialized LSTM cell with bases
for the kernels.
"""
import functools
from typing import Optional
import tensorflow as tf
CLIENT_SZIE = 500000
class BasisRNNLayer(tf.keras.layers.Layer):
"""A RNN wrapper using LSTM cell with Basis kernels."""
def __init__(self,
cell,
num_units,
num_basis,
recurrent_initializer,
kernel_initializer,
return_sequences=False):
super().__init__()
self.rnn_cell = cell(
num_units=num_units,
num_basis=num_basis,
recurrent_initializer=recurrent_initializer,
kernel_initializer=kernel_initializer)
self.rnn = tf.keras.layers.RNN(
self.rnn_cell, return_sequences=return_sequences)
def call(self, input_tensor):
return self.rnn(input_tensor)
class BasisLSTMCell(tf.keras.layers.Layer):
"""A LSTM cell with Basis kernels."""
def __init__(self,
num_units,
num_basis,
kernel_initializer,
recurrent_initializer,
word_emb_size=96,
use_bias=True,
activation=None,
):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
num_basis: The number of bases to learn.
kernel_initializer: The initializer of the input/output kernels.
recurrent_initializer: The initializer of the recurrent kernels.
word_emb_size: The word embedding size.
use_bias: Add bias or not.
activation: Activation function of the inner states. Default: `tanh`.
"""
super().__init__()
self._num_basis = num_basis
self.kernel_initializer = kernel_initializer
self.recurrent_initializer = recurrent_initializer
self._num_units = num_units
self.word_emb_size = word_emb_size
self.activation = activation or tf.tanh
self.recurrent_activation = tf.sigmoid
self.use_bias = use_bias
def build(self, input_shape):
# the basis embedding is concatenated to the input embedding,
# then split out in call().
weight_shape = [self.word_emb_size, self._num_basis, 4 * self._num_units]
self.basis_kernel = self.add_weight(
shape=weight_shape,
name='kernel',
initializer=self.kernel_initializer,)
self.basis_recurrent_kernel = self.add_weight(
shape=(self._num_units, self._num_basis, self._num_units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
)
self.bias = tf.Variable([0.0]*weight_shape[-1], name='bias')
@property
def state_size(self):
return tf.compat.v1.nn.rnn_cell.LSTMStateTuple(self._num_units,
self._num_units)
@property
def output_size(self):
return self._num_units
def compose_basis(self, c_prob):
"""Compose bases into a kernel."""
composed_kernel = tf.keras.backend.sum(
tf.expand_dims(self.basis_kernel, 0) * c_prob, axis=2)
composed_recurrent_kernel = tf.keras.backend.sum(
tf.expand_dims(self.basis_recurrent_kernel, 0) * c_prob, axis=2)
return composed_kernel, composed_recurrent_kernel
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
inputs, c_prob = tf.split(inputs, [self.word_emb_size, self._num_basis], -1)
c_prob = tf.reshape(c_prob, [-1, 1, self._num_basis, 1])
composed_kernel, composed_recurrent_kernel = self.compose_basis(c_prob)
# inputs:
# [batch_size, 1, self.word_emb_size]
# composed_kernel:
# [batch_size, self.word_emb_size, self._num_units]
# outputs (need to be squeezed):
# [batch_size, 1, self._num_units]
z = tf.matmul(tf.expand_dims(inputs, 1), composed_kernel)
z += tf.matmul(tf.expand_dims(h_tm1, 1), composed_recurrent_kernel)
if self.use_bias:
z = tf.keras.backend.bias_add(z, self.bias)
z = tf.squeeze(z)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
class TransposableEmbedding(tf.keras.layers.Layer):
"""A Keras layer implements a transposed projection for output."""
def __init__(self, embedding_layer):
super().__init__()
self.embeddings = embedding_layer.embeddings
# Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
def call(self, inputs):
return tf.matmul(inputs, self.embeddings, transpose_b=True)
def create_basis_recurrent_model(vocab_size = 10000,
num_oov_buckets = 1,
embedding_size = 96,
latent_size = 670,
num_basis = 1,
seqeunce_length = 20,
name = 'rnn',
shared_embedding = False,
global_embedding_only = False,
seed = 0):
"""Constructs zero-padded keras model with the given parameters and cell.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_basis: The number of bases to learn.
seqeunce_length: The seqeunce length of an input.
name: (Optional) string to name the returned `tf.keras.Model`.
shared_embedding: (Optional) Whether to tie the input and output
embeddings.
global_embedding_only: use the global embedding only or not.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
`tf.keras.Model`.
"""
extended_vocab_size = vocab_size + 3 + num_oov_buckets # For pad/bos/eos/oov.
input_x = tf.keras.layers.Input(shape=(None,), name='input_x')
input_id = tf.keras.layers.Input(shape=(1,), dtype=tf.int64, name='input_id')
input_embedding = tf.keras.layers.Embedding(
input_dim=extended_vocab_size,
output_dim=embedding_size,
mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomUniform(seed=seed),
)
embedded = input_embedding(input_x)
projected = embedded
# Somehow if the vocabulary size is too small,
# no out-of-range error will be reported and the model is still good
basis_embeddings = tf.keras.layers.Embedding(
CLIENT_SZIE, num_basis, name='client_embedding')
if global_embedding_only:
# using id = 0 for the global embedding
basis_vec = basis_embeddings(tf.zeros_like(input_id))
else:
basis_vec = basis_embeddings(input_id)
# [batch_size, 1, num_basis]
basis_vec = tf.reshape(basis_vec, shape=[-1, 1, num_basis])
basis_prob = tf.keras.layers.Softmax()(basis_vec)
basis_tensor = tf.tile(
basis_prob,
tf.constant([1, seqeunce_length, 1], tf.int32))
projected = tf.concat([projected, basis_tensor], -1)
recurrent_initializer = tf.keras.initializers.Orthogonal(seed=seed)
kernel_initializer = tf.keras.initializers.HeNormal(seed=seed)
lstm_layer_builder = functools.partial(
BasisRNNLayer,
cell=BasisLSTMCell,
num_units=latent_size,
num_basis=num_basis,
recurrent_initializer=recurrent_initializer,
kernel_initializer=kernel_initializer,
return_sequences=True,)
dense_layer_builder = functools.partial(
tf.keras.layers.Dense,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=seed))
layer = lstm_layer_builder()
processed = layer(projected)
# A projection changes dimension from rnn_layer_size to input_embedding_size
dense_layer = dense_layer_builder(units=embedding_size)
projected = dense_layer(processed)
projected = tf.concat([projected, basis_tensor], -1)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
final_dense_layer = dense_layer_builder(
units=extended_vocab_size, activation=None)
logits = final_dense_layer(projected)
return tf.keras.Model(inputs=[input_x, input_id], outputs=logits, name=name)
def create_recurrent_model(vocab_size = 10000,
num_oov_buckets = 1,
embedding_size = 96,
latent_size = 670,
num_layers = 1,
name = 'rnn',
shared_embedding = False,
seed = 0):
"""Constructs zero-padded keras model with the given parameters and cell.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_layers: The number of layers.
name: (Optional) string to name the returned `tf.keras.Model`.
shared_embedding: (Optional) Whether to tie the input and output
embeddings.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
`tf.keras.Model`.
"""
extended_vocab_size = vocab_size + 3 + num_oov_buckets # For pad/bos/eos/oov.
input_x = tf.keras.layers.Input(shape=(None,), name='input_x')
# To be consistent with BasisNet pipeline, not using client id
input_id = tf.keras.layers.Input(shape=(1,), dtype=tf.int64, name='input_id')
input_embedding = tf.keras.layers.Embedding(
input_dim=extended_vocab_size,
output_dim=embedding_size,
mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomUniform(seed=seed),
)
embedded = input_embedding(input_x)
projected = embedded
lstm_layer_builder = functools.partial(
tf.keras.layers.LSTM,
units=latent_size,
return_sequences=True,
recurrent_initializer=tf.keras.initializers.Orthogonal(seed=seed),
kernel_initializer=tf.keras.initializers.HeNormal(seed=seed))
dense_layer_builder = functools.partial(
tf.keras.layers.Dense,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=seed))
for _ in range(num_layers):
layer = lstm_layer_builder()
processed = layer(projected)
# A projection changes dimension from rnn_layer_size to input_embedding_size
dense_layer = dense_layer_builder(units=embedding_size)
projected = dense_layer(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
final_dense_layer = dense_layer_builder(
units=extended_vocab_size, activation=None)
logits = final_dense_layer(projected)
return tf.keras.Model(inputs=[input_x, input_id], outputs=logits, name=name)
|
tests/test_node_symlink.py | odidev/anytree | 700 | 41502 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from nose.tools import eq_
from anytree import AnyNode
from anytree import Node
from anytree import NodeMixin
from anytree import PostOrderIter
from anytree import PreOrderIter
from anytree import SymlinkNode
def test_symlink():
root = Node("root")
s0 = Node("sub0", parent=root)
s0b = Node("sub0B", parent=s0)
s0a = Node("sub0A", parent=s0)
s1 = Node("sub1", parent=root, foo=4)
s1a = Node("sub1A", parent=s1)
s1b = Node("sub1B", parent=s1)
s1c = Node("sub1C", parent=s1)
s1ca = Node("sub1Ca", parent=s1c)
ln = SymlinkNode(s1, parent=root, blub=17)
l0 = Node("l0", parent=ln)
eq_(root.parent, None)
eq_(root.children, tuple([s0, s1, ln]))
eq_(s0.parent, root)
eq_(s0.children, tuple([s0b, s0a]))
eq_(s0b.parent, s0)
eq_(s0b.children, tuple())
eq_(s0a.parent, s0)
eq_(s0a.children, tuple())
eq_(s1.parent, root)
eq_(s1.children, tuple([s1a, s1b, s1c]))
eq_(s1.foo, 4)
eq_(s1a.parent, s1)
eq_(s1a.children, tuple())
eq_(s1b.parent, s1)
eq_(s1b.children, tuple())
eq_(s1c.parent, s1)
eq_(s1c.children, tuple([s1ca]))
eq_(s1ca.parent, s1c)
eq_(s1ca.children, tuple())
eq_(ln.parent, root)
eq_(ln.children, tuple([l0]))
eq_(ln.foo, 4)
eq_(s1.blub, 17)
eq_(ln.blub, 17)
ln.bar = 9
eq_(ln.bar, 9)
eq_(s1.bar, 9)
result = [node.name for node in PreOrderIter(root)]
eq_(result, ['root', 'sub0', 'sub0B', 'sub0A', 'sub1', 'sub1A', 'sub1B', 'sub1C', 'sub1Ca', 'sub1', 'l0'])
result = [node.name for node in PostOrderIter(root)]
eq_(result, ['sub0B', 'sub0A', 'sub0', 'sub1A', 'sub1B', 'sub1Ca', 'sub1C', 'sub1', 'l0', 'sub1', 'root'])
|
python/akg/ops/array/tile.py | tianjiashuo/akg | 286 | 41503 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: tile"""
import akg.tvm
import akg.topi
import akg.utils as utils
@utils.check_input_type(akg.tvm.tensor.Tensor, (list, tuple), (str, type(None)))
def Tile(data, multiples, target=utils.CCE):
"""
Repeats the data in the specified dimensions according to the multiples.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
multiples (Union[list, tuple]): Elements must be int. The number of repetitions.
Returns:
tvm.tensor.Tensor, has the same dtype as data.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
shape = [x.value for x in data.shape]
dtype = data.dtype
utils.check_shape(shape)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES)
utils.check_int_list(multiples, "multiples")
output = akg.topi.tile(data, multiples)
return output |
test_bot/cogs/misc.py | Enegg/disnake | 290 | 41508 | <filename>test_bot/cogs/misc.py
import io
from base64 import b64decode
import disnake
from disnake.ext import commands
class Misc(commands.Cog):
def __init__(self, bot):
self.bot: commands.Bot = bot
def _get_file(self, description: str) -> disnake.File:
# just a white 100x100 png
data = b64decode(
"<KEY>"
)
return disnake.File(io.BytesIO(data), "image.png", description=description)
@commands.slash_command()
async def attachment_desc(self, inter: disnake.AppCmdInter, desc: str = "test") -> None:
"""
Send an attachment with the given description (or the default)
Parameters
----------
desc: The attachment description
"""
await inter.response.send_message(file=self._get_file(desc))
@commands.slash_command()
async def attachment_desc_edit(self, inter: disnake.AppCmdInter, desc: str = "test") -> None:
"""
Send a message with a button, which sends an attachment with the given description (or the default)
Parameters
----------
desc: The attachment description
"""
button = disnake.ui.Button(label="edit")
button.callback = lambda interaction: interaction.response.edit_message(
file=self._get_file(desc)
)
view = disnake.ui.View()
view.add_item(button)
await inter.response.send_message(".", view=view)
def setup(bot):
bot.add_cog(Misc(bot))
print(f"> Extension {__name__} is ready\n")
|
tests/test_config.py | DuXiao1997/lyrebird | 223 | 41525 | <gh_stars>100-1000
from lyrebird import config
from pathlib import Path
import json
import codecs
def test_create(tmpdir):
custom_config = {"myconf":"myval"}
conf_path = Path(tmpdir)/'conf.json'
with codecs.open(conf_path, 'w', 'utf-8') as f:
f.write(json.dumps(custom_config, indent=4, ensure_ascii=False))
cm = config.ConfigManager(conf_path=conf_path)
assert str(cm.conf_file) == str(tmpdir) + '/conf.json'
assert cm.conf_file.exists()
assert cm.config
assert cm.config['myconf'] == 'myval'
|
scvi/external/__init__.py | njbernstein/scvi-tools | 398 | 41533 | <gh_stars>100-1000
from .cellassign import CellAssign
from .gimvi import GIMVI
from .solo import SOLO
from .stereoscope import RNAStereoscope, SpatialStereoscope
__all__ = ["SOLO", "GIMVI", "RNAStereoscope", "SpatialStereoscope", "CellAssign"]
|
edb/common/binwrapper.py | aaronbrighton/edgedb | 7,302 | 41552 | <filename>edb/common/binwrapper.py
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import io
import struct
class BinWrapper:
"""A utility binary-reader wrapper over any io.BytesIO object."""
i64 = struct.Struct('!q')
i32 = struct.Struct('!l')
i16 = struct.Struct('!h')
i8 = struct.Struct('!b')
ui64 = struct.Struct('!Q')
ui32 = struct.Struct('!L')
ui16 = struct.Struct('!H')
ui8 = struct.Struct('!B')
def __init__(self, buf: io.BytesIO) -> None:
self.buf = buf
def write_ui64(self, val: int) -> None:
self.buf.write(self.ui64.pack(val))
def write_ui32(self, val: int) -> None:
self.buf.write(self.ui32.pack(val))
def write_ui16(self, val: int) -> None:
self.buf.write(self.ui16.pack(val))
def write_ui8(self, val: int) -> None:
self.buf.write(self.ui8.pack(val))
def write_i64(self, val: int) -> None:
self.buf.write(self.i64.pack(val))
def write_i32(self, val: int) -> None:
self.buf.write(self.i32.pack(val))
def write_i16(self, val: int) -> None:
self.buf.write(self.i16.pack(val))
def write_i8(self, val: int) -> None:
self.buf.write(self.i8.pack(val))
def write_len32_prefixed_bytes(self, val: bytes) -> None:
self.write_ui32(len(val))
self.buf.write(val)
def write_bytes(self, val: bytes) -> None:
self.buf.write(val)
def read_ui64(self) -> int:
data = self.buf.read(8)
return self.ui64.unpack(data)[0]
def read_ui32(self) -> int:
data = self.buf.read(4)
return self.ui32.unpack(data)[0]
def read_ui16(self) -> int:
data = self.buf.read(2)
return self.ui16.unpack(data)[0]
def read_ui8(self) -> int:
data = self.buf.read(1)
return self.ui8.unpack(data)[0]
def read_i64(self) -> int:
data = self.buf.read(8)
return self.i64.unpack(data)[0]
def read_i32(self) -> int:
data = self.buf.read(4)
return self.i32.unpack(data)[0]
def read_i16(self) -> int:
data = self.buf.read(2)
return self.i16.unpack(data)[0]
def read_i8(self) -> int:
data = self.buf.read(1)
return self.i8.unpack(data)[0]
def read_bytes(self, size: int) -> bytes:
data = self.buf.read(size)
if len(data) != size:
raise BufferError(f'cannot read bytes with len={size}')
return data
def read_len32_prefixed_bytes(self) -> bytes:
size = self.read_ui32()
return self.read_bytes(size)
|
tests/test_resource_library_parser.py | tervay/the-blue-alliance | 266 | 41553 | import unittest2
import json
from datafeeds.resource_library_parser import ResourceLibraryParser
class TestResourceLibraryParser(unittest2.TestCase):
def test_parse_hall_of_fame(self):
with open('test_data/hall_of_fame.html', 'r') as f:
teams, _ = ResourceLibraryParser.parse(f.read())
# Test number of teams
self.assertEqual(len(teams), 14)
# Test team 987
team = teams[0]
self.assertEqual(team["team_id"], "frc987")
self.assertEqual(team["team_number"], 987)
self.assertEqual(team["year"], 2016)
self.assertEqual(team["video"], "wpv-9yd_CJk")
self.assertEqual(team["presentation"], "ILxVggTpXhs")
self.assertEqual(team["essay"], "https://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/awards/2016/chairmans/week-five/team-987.pdf")
# Test team 597
team = teams[1]
self.assertEqual(team["team_id"], "frc597")
self.assertEqual(team["team_number"], 597)
self.assertEqual(team["year"], 2015)
self.assertEqual(team["video"], "2FKks-d6LOo")
self.assertEqual(team["presentation"], "RBXj490clow")
self.assertEqual(team["essay"], None)
# Test team 27
team = teams[2]
self.assertEqual(team["team_id"], "frc27")
self.assertEqual(team["team_number"], 27)
self.assertEqual(team["year"], 2014)
self.assertEqual(team["video"], "BCz2yTVPxbM")
self.assertEqual(team["presentation"], "1rE67fTRl98")
self.assertEqual(team["essay"], "https://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/awards/2015/2014-67-chairmans-handout.pdf")
# Test team 1538
team = teams[3]
self.assertEqual(team["team_id"], "frc1538")
self.assertEqual(team["team_number"], 1538)
self.assertEqual(team["year"], 2013)
self.assertEqual(team["video"], "p62jRCMkoiw")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 1114
team = teams[4]
self.assertEqual(team["team_id"], "frc1114")
self.assertEqual(team["team_number"], 1114)
self.assertEqual(team["year"], 2012)
self.assertEqual(team["video"], "VqciMgjw-SY")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 359
team = teams[5]
self.assertEqual(team["team_id"], "frc359")
self.assertEqual(team["team_number"], 359)
self.assertEqual(team["year"], 2011)
self.assertEqual(team["video"], "e9IV1chHJtg")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 341
team = teams[6]
self.assertEqual(team["team_id"], "frc341")
self.assertEqual(team["team_number"], 341)
self.assertEqual(team["year"], 2010)
self.assertEqual(team["video"], "-AzvT02ZCNk")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 236
team = teams[7]
self.assertEqual(team["team_id"], "frc236")
self.assertEqual(team["team_number"], 236)
self.assertEqual(team["year"], 2009)
self.assertEqual(team["video"], "NmzCLohIZLg")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 842
team = teams[8]
self.assertEqual(team["team_id"], "frc842")
self.assertEqual(team["team_number"], 842)
self.assertEqual(team["year"], 2008)
self.assertEqual(team["video"], "N0LMLz6LK7U")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 365
team = teams[9]
self.assertEqual(team["team_id"], "frc365")
self.assertEqual(team["team_number"], 365)
self.assertEqual(team["year"], 2007)
self.assertEqual(team["video"], "f8MT7pSRXtg")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 111
team = teams[10]
self.assertEqual(team["team_id"], "frc111")
self.assertEqual(team["team_number"], 111)
self.assertEqual(team["year"], 2006)
self.assertEqual(team["video"], "SfCjZMMIt0k")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
|
atlas-aapt/external/libcxx/run-tests.py | MaTriXy/atlas | 8,865 | 41578 | #
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import os
import subprocess
import sys
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
ANDROID_DIR = os.path.realpath(os.path.join(THIS_DIR, '../..'))
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument(
'--compiler', choices=('clang', 'gcc'), default='clang')
self.add_argument(
'--bitness', choices=(32, 64), type=int, default=32)
self.add_argument('--host', action='store_true')
def gen_test_config(bitness, compiler, host):
testconfig_mk_path = os.path.join(THIS_DIR, 'buildcmds/testconfig.mk')
with open(testconfig_mk_path, 'w') as test_config:
if compiler == 'clang':
print('LOCAL_CLANG := true', file=test_config)
elif compiler == 'gcc':
print('LOCAL_CLANG := false', file=test_config)
if bitness == 32:
print('LOCAL_MULTILIB := 32', file=test_config)
elif bitness == 64:
print('LOCAL_MULTILIB := 64', file=test_config)
if compiler == 'clang':
print('LOCAL_CXX := $(LOCAL_PATH)/buildcmdscc $(CLANG_CXX)',
file=test_config)
else:
if host:
prefix = 'HOST_'
else:
prefix = 'TARGET_'
print('LOCAL_CXX := $(LOCAL_PATH)/buildcmdscc '
'$($(LOCAL_2ND_ARCH_VAR_PREFIX){}CXX)'.format(prefix),
file=test_config)
if host:
print('include $(BUILD_HOST_EXECUTABLE)', file=test_config)
else:
print('include $(BUILD_EXECUTABLE)', file=test_config)
def mmm(path):
makefile = os.path.join(path, 'Android.mk')
main_mk = 'build/core/main.mk'
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
env['LIBCXX_TESTING'] = 'true'
cmd = ['make', '-C', ANDROID_DIR, '-f', main_mk, 'all_modules']
subprocess.check_call(cmd, env=env)
def gen_build_cmds(bitness, compiler, host):
gen_test_config(bitness, compiler, host)
mmm(os.path.join(THIS_DIR, 'buildcmds'))
def main():
args, lit_args = ArgParser().parse_known_args()
lit_path = os.path.join(ANDROID_DIR, 'external/llvm/utils/lit/lit.py')
gen_build_cmds(args.bitness, args.compiler, args.host)
mode_str = 'host' if args.host else 'device'
android_mode_arg = '--param=android_mode=' + mode_str
test_path = os.path.join(THIS_DIR, 'test')
lit_args = ['-sv', android_mode_arg] + lit_args
cmd = ['python', lit_path] + lit_args + [test_path]
sys.exit(subprocess.call(cmd))
if __name__ == '__main__':
main()
|
opennre/tokenization/basic_tokenizer.py | WinterSoHot/OpenNRE | 3,284 | 41592 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BasicTokenizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .utils import (convert_to_unicode,
clean_text,
split_on_whitespace,
split_on_punctuation,
tokenize_chinese_chars,
strip_accents)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = clean_text(text)
text = tokenize_chinese_chars(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
orig_tokens = split_on_whitespace(text)
split_tokens = []
current_positions = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = strip_accents(token)
current_positions.append([])
current_positions[-1].append(len(split_tokens))
split_tokens.extend(split_on_punctuation(token))
current_positions[-1].append(len(split_tokens))
return split_tokens, current_positions
|
djangae/models.py | bocribbz/djangae | 467 | 41612 | from django.db import models
from djangae import patches # noqa
class DeferIterationMarker(models.Model):
"""
Marker to keep track of sharded defer
iteration tasks
"""
# Set to True when all shards have been deferred
is_ready = models.BooleanField(default=False)
shard_count = models.PositiveIntegerField(default=0)
shards_complete = models.PositiveIntegerField(default=0)
delete_on_completion = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
callback_name = models.CharField(max_length=100)
finalize_name = models.CharField(max_length=100)
class Meta:
app_label = "djangae"
@property
def is_finished(self):
return self.is_ready and self.shard_count == self.shards_complete
def __unicode__(self):
return "Background Task (%s -> %s) at %s" % (
self.callback_name,
self.finalize_name,
self.created
)
|
aws-blog-campanile/bin/objectcopy.py | securityscorecard/aws-big-data-blog | 305 | 41629 | #!/usr/bin/python2.7
import sys
import os
import fileinput
import argparse
import random
import tempfile
import ConfigParser
# -----------------------------------------------------------------------------
# Support for Hadoop Streaming Sandbox Env
# -----------------------------------------------------------------------------
sys.path.append(os.environ.get('PWD'))
os.environ["BOTO_PATH"] = '/etc/boto.cfg:~/.boto:./.boto'
import campanile
import boto
from boto.s3.connection import S3Connection
# -----------------------------------------------------------------------------
# Global
# -----------------------------------------------------------------------------
# cfgfiles Config file search path
# -----------------------------------------------------------------------------
cfgfiles = [
"/etc/campanile.cfg",
"./campanile.cfg"
]
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def main():
## Args
parser = argparse.ArgumentParser()
parser.add_argument('--src-bucket', required=True, dest='src',
help='Source S3 bucket')
parser.add_argument('--dst-bucket', required=True, dest='dst',
help='Destination S3 bucket')
parser.add_argument('--src-endpoint',
default=boto.s3.connection.NoHostProvided,
help='S3 source endpoint')
parser.add_argument('--dst-endpoint',
default=boto.s3.connection.NoHostProvided,
help='S3 destination endpoint')
parser.add_argument('--src-profile',
help='Boto profile used for source connection')
parser.add_argument('--dst-profile',
help='Boto profile used for destination connection')
parser.add_argument('--config', '-c', default="./campanile.cfg",
help='Path to config file')
args = parser.parse_args()
## Config Object
cfgfiles = campanile.cfg_file_locations()
cfgfiles.insert(0, args.config)
c = ConfigParser.SafeConfigParser({'ephemeral':'/tmp'})
c.read(cfgfiles)
## S3 Bucket Connections
src_bucket = S3Connection(suppress_consec_slashes=False,\
host=args.src_endpoint,is_secure=True,
profile_name=args.src_profile).\
get_bucket(args.src,validate=False)
dst_bucket = S3Connection(suppress_consec_slashes=False,\
host=args.dst_endpoint,is_secure=True,
profile_name=args.dst_profile).\
get_bucket(args.dst,validate=False)
## Reporting Counters
files = 0
movedbytes = 0
## Select random tmpdir to distribute load across disks
tmpdir = random.choice(c.get('DEFAULT',"ephemeral").split(','))
start_index = campanile.stream_index()
for line in fileinput.input("-"):
name, etag, size, mtime, mid, part, partcount, startbyte, stopbyte \
= line.rstrip('\n').split('\t')[start_index:]
srckey = src_bucket.get_key(name, validate=False)
dstkey = dst_bucket.get_key(name, validate=False)
if mid == campanile.NULL:
headers={}
report_name = name
expected_size = int(size)
else:
headers={'Range' : "bytes=%s-%s" % (startbyte, stopbyte)}
report_name = "%s-%s" % (name, 'part')
expected_size = int(stopbyte) - int(startbyte) + 1
with tempfile.SpooledTemporaryFile(max_size=c.getint('DEFAULT',\
'maxtmpsize'),dir=tmpdir) as fp:
## Download
p = campanile.FileProgress(name, verbose=1)
srckey.get_contents_to_file(fp, headers=headers, cb=p.progress)
if fp.tell() != expected_size:
raise Exception("Something bad happened for %s. \
Expecting %s, but got %s" % \
(report_name, expected_size, fp.tell()))
campanile.counter(args.src, "OutputBytes", size)
fp.flush
fp.seek(0)
if mid == campanile.NULL:
dstkey.cache_control= srckey.cache_control
dstkey.content_type = srckey.content_type
dstkey.content_encoding = srckey.content_encoding
dstkey.content_disposition = srckey.content_disposition
dstkey.content_language = srckey.content_language
dstkey.metadata = srckey.metadata
dstkey.md5 = srckey.md5
report_name = name
else:
mp = boto.s3.multipart.MultiPartUpload(bucket=dst_bucket)
mp.id = mid
mp.key_name = name
report_name = "%s-%s" % (name, part)
## Upload
p = campanile.FileProgress(report_name, verbose=1)
if mid == campanile.NULL:
dstkey.set_contents_from_file(fp,
encrypt_key=srckey.encrypted, cb=p.progress)
newetag = dstkey.etag.replace("\"","")
else:
mpart = mp.upload_part_from_file(fp,part_num=int(part),
cb=p.progress)
newetag = mpart.etag.replace("\"","")
if newetag != srckey.md5:
## Add alert
raise Exception("Something bad happened for %s. \
Expecting %s md5, but got %s" % \
(report_name, srckey.md5, newetag))
if mid != campanile.NULL:
print "%s\t%s\t%s\t%s\t%s\t%s\t%s" % \
(name, etag, mid, newetag, part, startbyte, stopbyte)
campanile.counter(args.dst, "InputBytes", expected_size)
campanile.status("%s/%s:OK" % (args.dst,report_name))
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
smart_open/tests/test_package.py | sivchand/smart_open | 2,047 | 41635 | <reponame>sivchand/smart_open<filename>smart_open/tests/test_package.py
# -*- coding: utf-8 -*-
import os
import unittest
import pytest
from smart_open import open
skip_tests = "SMART_OPEN_TEST_MISSING_DEPS" not in os.environ
class PackageTests(unittest.TestCase):
@pytest.mark.skipif(skip_tests, reason="requires missing dependencies")
def test_azure_raises_helpful_error_with_missing_deps(self):
with pytest.raises(ImportError, match=r"pip install smart_open\[azure\]"):
open("azure://foo/bar")
@pytest.mark.skipif(skip_tests, reason="requires missing dependencies")
def test_aws_raises_helpful_error_with_missing_deps(self):
match = r"pip install smart_open\[s3\]"
with pytest.raises(ImportError, match=match):
open("s3://foo/bar")
@pytest.mark.skipif(skip_tests, reason="requires missing dependencies")
def test_gcs_raises_helpful_error_with_missing_deps(self):
with pytest.raises(ImportError, match=r"pip install smart_open\[gcs\]"):
open("gs://foo/bar")
|
src/commands/jsdoc/generate_jsdoc.py | PranjalPansuriya/JavaScriptEnhancements | 690 | 41661 | import sublime, sublime_plugin
import os
from ...libs import util
from ...libs import JavascriptEnhancementsExecuteOnTerminalCommand
class JavascriptEnhancementsGenerateJsdocCommand(JavascriptEnhancementsExecuteOnTerminalCommand, sublime_plugin.WindowCommand):
is_node = True
is_bin_path = True
def prepare_command(self):
jsdoc_conf_file = os.path.join(self.settings['project_dir_name'], self.settings['project_settings']['jsdoc']['conf_file'])
if os.path.isfile(jsdoc_conf_file) :
self.command = ["jsdoc", "-c", jsdoc_conf_file]
else :
sublime.error_message("JSDOC ERROR: Can't load "+jsdoc_conf_file+" file!\nConfiguration file REQUIRED!")
return
self._run()
def _run(self):
super(JavascriptEnhancementsGenerateJsdocCommand, self)._run()
def is_enabled(self):
return True if util.is_javascript_project() else False |
src/architectures/transformer_encoder.py | francismontalbo/attention-is-all-you-need-paper | 167 | 41665 | import torch.nn as nn
from architectures.position_wise_feed_forward_net import PositionWiseFeedForwardNet
from architectures.multi_head_attention import MultiHeadAttention
from architectures.add_and_norm import AddAndNorm
class TransformerEncoderBlock(nn.Module):
def __init__(self, d_model, n_heads, d_ff, dropout_proba):
super(TransformerEncoderBlock, self).__init__()
self.W_q = nn.Linear(d_model, d_model)
self.W_k = nn.Linear(d_model, d_model)
self.W_v = nn.Linear(d_model, d_model)
self.mha_layer=MultiHeadAttention(d_model, n_heads)
self.dropout_layer_1=nn.Dropout(dropout_proba)
self.add_and_norm_layer_1 = AddAndNorm(d_model)
self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff)
self.dropout_layer_2=nn.Dropout(dropout_proba)
self.add_and_norm_layer_2 = AddAndNorm(d_model)
def forward(self, x, mask):
# x dims: (batch_size, src_seq_len, d_model)
# mask dim: (batch_size, 1, 1, src_seq_len)
q = self.W_q(x) # (batch_size, src_seq_len, d_model)
k = self.W_k(x) # (batch_size, src_seq_len, d_model)
v = self.W_v(x) # (batch_size, src_seq_len, d_model)
mha_out = self.mha_layer(q, k, v, mask) # (batch_size, src_seq_len, d_model)
mha_out= self.dropout_layer_1(mha_out) # (batch_size, src_seq_len, d_model)
mha_out = self.add_and_norm_layer_1(x, mha_out) # (batch_size, src_seq_len, d_model)
ffn_out = self.ffn_layer(mha_out) # (batch_size, src_seq_len, d_model)
ffn_out= self.dropout_layer_2(ffn_out) # (batch_size, src_seq_len, d_model)
ffn_out = self.add_and_norm_layer_2(mha_out, ffn_out) # (batch_size, src_seq_len, d_model)
return ffn_out
class TransformerEncoder(nn.Module):
def __init__(self, n_blocks, n_heads, d_model, d_ff, dropout_proba=0.1):
super(TransformerEncoder, self).__init__()
self.encoder_blocks=nn.ModuleList([TransformerEncoderBlock(d_model, n_heads, d_ff, dropout_proba) for _ in range(n_blocks)])
def forward(self, x, mask):
for encoder_block in self.encoder_blocks:
x = encoder_block(x, mask)
return x |
Payloads/Zip-Traversal/make.py | 5tr1x/SecLists | 39,901 | 41668 | #!/usr/bin/env python3
import zipfile
# The file to USE inside the zip, before compression
filein = "index.php"
print("[i] FileIn: %s\n" % filein)
# How deep are we going?
depth = ""
# Loop 11 times (00-10)
for i in range(11):
# The .zip file to use
zipname = "depth-%02d.zip" % i
print("[i] ZipName: %s" % zipname)
# Get the zip file out ready
with zipfile.ZipFile(zipname , 'w') as zip:
# The file INSIDDE the zip
filezip = "%s%s" % (depth, filein)
print("[i] ZipFile: %s" % filezip)
# Write the zip file out
zip.write(filein, filezip)
# Increase depth for next loop
depth += "../"
print("\n[i] Done")
|
addons/Sprytile-6b68d00/rx/linq/observable/transduce.py | trisadmeslek/V-Sekai-Blender-tools | 733 | 41679 | """Transducers for RxPY.
There are several different implementations of transducers in Python.
This implementation is currently targeted for:
- http://code.sixty-north.com/python-transducers
You should also read the excellent article series "Understanding
Transducers through Python" at:
- http://sixty-north.com/blog/series/understanding-transducers-through-python
Other implementations of transducers in Python are:
- https://github.com/cognitect-labs/transducers-python
"""
from rx.core import Observable, AnonymousObservable
from rx.internal import extensionmethod
class Observing(object):
"""An observing transducer."""
def __init__(self, observer):
self.observer = observer
def initial(self):
return self.observer
def step(self, obs, input):
return obs.on_next(input)
def complete(self, obs):
return obs.on_completed()
def __call__(self, result, item):
return self.step(result, item)
@extensionmethod(Observable)
def transduce(self, transducer):
"""Execute a transducer to transform the observable sequence.
Keyword arguments:
:param Transducer transducer: A transducer to execute.
:returns: An Observable sequence containing the results from the
transducer.
:rtype: Observable
"""
source = self
def subscribe(observer):
xform = transducer(Observing(observer))
def on_next(v):
try:
xform.step(observer, v)
except Exception as e:
observer.on_error(e)
def on_completed():
xform.complete(observer)
return source.subscribe(on_next, observer.on_error, on_completed)
return AnonymousObservable(subscribe)
|
tests/api/endpoints/admin/test_institution_users.py | weimens/seahub | 420 | 41706 | import json
import logging
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from tests.common.utils import randstring
from seahub.institutions.models import Institution, InstitutionAdmin
from seahub.profile.models import Profile
logger = logging.getLogger(__name__)
class AdminInstitutionUsersTest(BaseTestCase):
def setUp(self):
pass
def _add_institution(self, name=''):
return Institution.objects.create(name=name)
def _delete_institution(self, name=''):
try:
institution = Institution.objects.get(name=name)
institution.delete()
except Exception as e:
logger.error(e)
def test_can_get(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert type(json_resp['user_list']) is list
inst.delete()
def test_no_permission(self):
self.logout()
self.login_as(self.admin_no_other_permission)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
def test_can_create(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
data = {
'email': 'invalid_email_string',
}
resp = self.client.post(url, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert type(json_resp['success']) is list
assert type(json_resp['failed']) is list
class AdminInstitutionUserTest(BaseTestCase):
def setUp(self):
pass
def _add_institution(self, name=''):
return Institution.objects.create(name=name)
def _delete_institution(self, name=''):
try:
institution = Institution.objects.get(name=name)
institution.delete()
except Exception as e:
logger.error(e)
def _add_user_in_institution(self, email, inst_name):
profile = Profile.objects.get_profile_by_user(email)
if not profile:
profile = Profile.objects.add_or_update(username=email, institution=inst_name)
else:
profile.institution = inst_name
profile.save()
def test_can_update(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
self._add_user_in_institution(self.user.email, inst.name)
url = reverse('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])
data = 'is_institution_admin=True'
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['is_institution_admin'] is True
inst.delete()
def test_can_delete(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
self._add_user_in_institution(self.user.email, inst.name)
url = reverse('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])
resp = self.client.delete(url)
self.assertEqual(200, resp.status_code)
inst.delete()
|
sdk/python/pulumi_azure/appservice/public_certificate.py | henriktao/pulumi-azure | 109 | 41716 | <reponame>henriktao/pulumi-azure<filename>sdk/python/pulumi_azure/appservice/public_certificate.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['PublicCertificateArgs', 'PublicCertificate']
@pulumi.input_type
class PublicCertificateArgs:
def __init__(__self__, *,
app_service_name: pulumi.Input[str],
blob: pulumi.Input[str],
certificate_location: pulumi.Input[str],
certificate_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str]):
"""
The set of arguments for constructing a PublicCertificate resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
pulumi.set(__self__, "app_service_name", app_service_name)
pulumi.set(__self__, "blob", blob)
pulumi.set(__self__, "certificate_location", certificate_location)
pulumi.set(__self__, "certificate_name", certificate_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> pulumi.Input[str]:
"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "app_service_name")
@app_service_name.setter
def app_service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "app_service_name", value)
@property
@pulumi.getter
def blob(self) -> pulumi.Input[str]:
"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "blob")
@blob.setter
def blob(self, value: pulumi.Input[str]):
pulumi.set(self, "blob", value)
@property
@pulumi.getter(name="certificateLocation")
def certificate_location(self) -> pulumi.Input[str]:
"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""
return pulumi.get(self, "certificate_location")
@certificate_location.setter
def certificate_location(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_location", value)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> pulumi.Input[str]:
"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class _PublicCertificateState:
def __init__(__self__, *,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PublicCertificate resources.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] thumbprint: The thumbprint of the public certificate.
"""
if app_service_name is not None:
pulumi.set(__self__, "app_service_name", app_service_name)
if blob is not None:
pulumi.set(__self__, "blob", blob)
if certificate_location is not None:
pulumi.set(__self__, "certificate_location", certificate_location)
if certificate_name is not None:
pulumi.set(__self__, "certificate_name", certificate_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "app_service_name")
@app_service_name.setter
def app_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_service_name", value)
@property
@pulumi.getter
def blob(self) -> Optional[pulumi.Input[str]]:
"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "blob")
@blob.setter
def blob(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "blob", value)
@property
@pulumi.getter(name="certificateLocation")
def certificate_location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""
return pulumi.get(self, "certificate_location")
@certificate_location.setter
def certificate_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_location", value)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
The thumbprint of the public certificate.
"""
return pulumi.get(self, "thumbprint")
@thumbprint.setter
def thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint", value)
class PublicCertificate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an App Service Public Certificate.
## Example Usage
```python
import pulumi
import base64
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_public_certificate = azure.appservice.PublicCertificate("examplePublicCertificate",
resource_group_name=example_resource_group.name,
app_service_name=example_app_service.name,
certificate_name="example-public-certificate",
certificate_location="Unknown",
blob=(lambda path: base64.b64encode(open(path).read().encode()).decode())("app_service_public_certificate.cer"))
```
## Import
App Service Public Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/publicCertificate:PublicCertificate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Web/sites/site1/publicCertificates/publicCertificate1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PublicCertificateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an App Service Public Certificate.
## Example Usage
```python
import pulumi
import base64
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_public_certificate = azure.appservice.PublicCertificate("examplePublicCertificate",
resource_group_name=example_resource_group.name,
app_service_name=example_app_service.name,
certificate_name="example-public-certificate",
certificate_location="Unknown",
blob=(lambda path: base64.b64encode(open(path).read().encode()).decode())("app_service_public_certificate.cer"))
```
## Import
App Service Public Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/publicCertificate:PublicCertificate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Web/sites/site1/publicCertificates/publicCertificate1
```
:param str resource_name: The name of the resource.
:param PublicCertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PublicCertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PublicCertificateArgs.__new__(PublicCertificateArgs)
if app_service_name is None and not opts.urn:
raise TypeError("Missing required property 'app_service_name'")
__props__.__dict__["app_service_name"] = app_service_name
if blob is None and not opts.urn:
raise TypeError("Missing required property 'blob'")
__props__.__dict__["blob"] = blob
if certificate_location is None and not opts.urn:
raise TypeError("Missing required property 'certificate_location'")
__props__.__dict__["certificate_location"] = certificate_location
if certificate_name is None and not opts.urn:
raise TypeError("Missing required property 'certificate_name'")
__props__.__dict__["certificate_name"] = certificate_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["thumbprint"] = None
super(PublicCertificate, __self__).__init__(
'azure:appservice/publicCertificate:PublicCertificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_service_name: Optional[pulumi.Input[str]] = None,
blob: Optional[pulumi.Input[str]] = None,
certificate_location: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None) -> 'PublicCertificate':
"""
Get an existing PublicCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] thumbprint: The thumbprint of the public certificate.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PublicCertificateState.__new__(_PublicCertificateState)
__props__.__dict__["app_service_name"] = app_service_name
__props__.__dict__["blob"] = blob
__props__.__dict__["certificate_location"] = certificate_location
__props__.__dict__["certificate_name"] = certificate_name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["thumbprint"] = thumbprint
return PublicCertificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appServiceName")
def app_service_name(self) -> pulumi.Output[str]:
"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "app_service_name")
@property
@pulumi.getter
def blob(self) -> pulumi.Output[str]:
"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "blob")
@property
@pulumi.getter(name="certificateLocation")
def certificate_location(self) -> pulumi.Output[str]:
"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""
return pulumi.get(self, "certificate_location")
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> pulumi.Output[str]:
"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "certificate_name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[str]:
"""
The thumbprint of the public certificate.
"""
return pulumi.get(self, "thumbprint")
|
stackoverflow/spiders/items.py | Janeho454199/stackoverflow-spider | 131 | 41725 | <filename>stackoverflow/spiders/items.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import scrapy
class StackoverflowItem(scrapy.Item):
links = scrapy.Field()
views = scrapy.Field()
votes = scrapy.Field()
answers = scrapy.Field()
tags = scrapy.Field()
questions = scrapy.Field()
|
entity/query_item.py | will4906/PatentCrawler | 136 | 41735 | <reponame>will4906/PatentCrawler<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Created on 2017/3/19
@author: will4906
"""
import re
def handle_item_group(item_group):
"""
处理item_group函数
:param item_group:
:return:
"""
AND = ' AND '
OR = ' OR '
NOT = ' NOT '
exp_str = ""
keyand = item_group.__getattribute__('And')
keyor = item_group.__getattribute__('Or')
keynot = item_group.__getattribute__('Not')
if keyand is not None:
parms = keyand.__getattribute__('parm')
for parm in parms:
exp_str += AND + parm
exp_str = exp_str.replace(AND, '', 1)
if keyor is not None:
parms = keyor.__getattribute__('parm')
for parm in parms:
exp_str += OR + parm
if keyand is None:
exp_str = exp_str.replace(OR, '', 1)
if keynot is not None:
parms = keynot.__getattribute__('parm')
for parm in parms:
exp_str += NOT + parm
if keyand is None and keyor is None:
exp_str = exp_str.replace(NOT, '', 1)
return exp_str
# 处理申请号的函数
def handle_number(title, request_number):
word_reg = '[a-zA-Z]'
if isinstance(request_number, ItemGroup):
search_exp = handle_item_group(request_number)
is_word = re.search(word_reg, search_exp[:2])
if is_word is not None:
return title + '=(' + search_exp + '+)'
else:
return title + '=(+' + search_exp + '+)'
else:
is_word = re.search(word_reg, request_number[:2])
if is_word is not None:
return title + '=(' + request_number + '+)'
else:
return title + '=(+' + request_number + '+)'
def handle_date_element(title, date_element):
"""
处理日期元素的函数
:param title:
:param date_element:
:return:
"""
if isinstance(date_element, DateSelect):
return title + date_element.__getattribute__('search_exp')
else:
raise Exception('We just support DateSelect for date element!')
def handle_invention_type(title, invention_type):
"""
处理发明类型的函数
:param title:
:param invention_type:
:return:
"""
exp_str = ""
if isinstance(invention_type, Or):
OR = ' OR '
keyor = invention_type
if keyor is not None:
parms = keyor.__getattribute__('parm')
for parm in parms:
if parm == 'I' or parm == 'U' or parm == 'D':
parm = '\"' + parm + '\"'
elif parm.find('发明申请') != -1:
parm = '\"I\"'
elif parm.find('实用新型') != -1:
parm = '\"U\"'
elif parm.find('外观设计') != -1:
parm = '\"D\"'
exp_str += OR + parm
exp_str = exp_str.replace(OR, '', 1)
elif isinstance(invention_type, str):
if invention_type == 'I' or invention_type == 'U' or invention_type == 'D':
exp_str = '\"' + invention_type + '\"'
elif invention_type.find('发明申请') != -1:
exp_str = '\"I\"'
elif invention_type.find('实用新型') != -1:
exp_str = '\"U\"'
elif invention_type.find('外观设计') != -1:
exp_str = '\"D\"'
else:
raise Exception('We just support string or Or for invention_type element!')
return title + "=(" + exp_str + ")"
def default_handle(title, default):
"""
默认处理函数
:param title:
:param default:
:return:
"""
if isinstance(default, ItemGroup):
return title + '=(' + handle_item_group(default) + ')'
elif isinstance(default, str):
return title + '=(' + default + ')'
else:
raise Exception('We just support string or ItemGroup!')
def find_element_in_item_group(element, item_group):
"""
在ItemGroup里面寻找相应的的element
:param element:
:param item_group:
:return:
"""
keyand = item_group.__getattribute__('And')
keyor = item_group.__getattribute__('Or')
keynot = item_group.__getattribute__('Not')
if keyand is not None:
parms = keyand.__getattribute__('parm')
try:
return parms.index(element)
except:
pass
if keyor is not None:
parms = keyor.__getattribute__('parm')
try:
return parms.index(element)
except:
pass
if keynot is not None:
parms = keynot.__getattribute__('parm')
try:
return parms.index(element)
except:
pass
return None
title_case = {
'request_number': handle_number,
'request_date': handle_date_element,
'publish_number': handle_number,
'publish_date': handle_date_element,
'invention_name': default_handle,
'ipc_class_number': default_handle,
'proposer': default_handle,
'inventor': default_handle,
'priority_number': default_handle,
'priority_date': handle_date_element,
'abstract': default_handle,
'claim': default_handle,
'instructions': default_handle,
'key_word': default_handle,
'locarno_class_number': default_handle,
'description_of_the_design': default_handle,
'agent': default_handle,
'agency': default_handle,
'proposer_post_code': default_handle,
'proposer_address': default_handle,
'proposer_location': default_handle,
'FT_class_number': default_handle,
'UC_class_number': default_handle,
'ECLA_class_number': default_handle,
'FI_class_number': default_handle,
'English_invention_name': default_handle,
'French_invention_name': default_handle,
'German_invention_name': default_handle,
'other_invention_name': default_handle,
'English_abstract': default_handle,
'PCT_enters_national_phase_date': handle_date_element,
'PCT_international_application_number': handle_number,
'French_abstract': default_handle,
'German_abstract': default_handle,
'other_abstract': default_handle,
'PCT_international_application_date': handle_date_element,
'PCT_international_publish_number': handle_number,
'PCT_international_publish_date': handle_date_element,
'CPC_class_number': default_handle,
'C-SETS': default_handle,
'invention_type': handle_invention_type,
'publish_country': default_handle,
}
title_define = {
'patent_id': '专利id',
'request_number': '申请号',
'request_date': '申请日',
'publish_number': '公开(公告)号',
'publish_date': '公开(公告)日',
'invention_name': '发明名称',
'ipc_class_number': 'IPC分类号',
'proposer': '申请(专利权)人',
'inventor': '发明人',
'priority_number': '优先权号',
'priority_date': '优先权日',
'abstract': '摘要',
'claim': '权利要求',
'instructions': '说明书',
'key_word': '关键词',
'locarno_class_number': '外观设计洛迦诺分类号',
'description_of_the_design': '外观设计简要说明',
'agent': '代理人',
'agency': '代理机构',
'proposer_post_code': '申请人邮编',
'proposer_address': '申请人地址',
'proposer_location': '申请人所在国(省)',
'FT_class_number': 'FT分类号',
'UC_class_number': 'UC分类号',
'ECLA_class_number': 'ECLA分类号',
'FI_class_number': 'FI分类号',
'English_invention_name': '发明名称(英)',
'French_invention_name': '发明名称(法)',
'German_invention_name': '发明名称(德)',
'other_invention_name': '发明名称(其他)',
'English_abstract': '摘要(英)',
'PCT_enters_national_phase_date': 'PCT进入国家阶段日期',
'PCT_international_application_number': 'PCT国际申请号',
'French_abstract': '摘要(法)',
'German_abstract': '摘要(德)',
'other_abstract': '摘要(其他)',
'PCT_international_application_date': 'PCT国际申请日期',
'PCT_international_publish_number': 'PCT国际申请公开号',
'PCT_international_publish_date': 'PCT国际申请公开日期',
'CPC_class_number': 'CPC分类号',
'C-SETS': 'C-SETS',
'invention_type': '发明类型',
'publish_country': '公开国',
'legal_status': '法律状态',
'legal_status_effective_date': '法律状态生效日'
}
# 日期选择器
class DateSelect:
def __init__(self, select='=', date='2001-01-01', enddate=None):
# 符号:'=', '>', '>=', '<', '<=', ':'
self.select = select
# 日期(固定格式),eg: 2001-01-01
self.date = date
# 结束日期,当符号位为":"时,此变量有效,只从date开始到enddate结束
self.enddate = enddate
self.search_exp = ''
if self.select != ':':
self.search_exp = self.select + self.date
else:
self.search_exp = '=' + self.date + self.select + self.enddate
def __repr__(self):
return 'DateSelect{select=' + str(self.select) + ',date=' + str(self.date) + ',enddate=' + str(
self.enddate) + '}'
def __str__(self):
return 'DateSelect{select=' + str(self.select) + ',date=' + str(self.date) + ',enddate=' + str(
self.enddate) + '}'
class ItemGroup:
def __init__(self, And=None, Or=None, Not=None):
self.And = And
self.Or = Or
self.Not = Not
def add_or(self, *parm):
if self.Or is None:
self.Or = Or(*parm)
else:
self.Or.add_parm(*parm)
def __repr__(self):
whole = ''
if self.And is not None:
whole += str(self.And)
if self.Or is not None:
whole += str(self.Or)
if self.Not is not None:
whole += str(self.Not)
return whole
class And:
def __init__(self, *parm):
self.parm = list(parm)
def add_parm(self, *ps):
self.parm = self.parm + list(ps)
def __repr__(self):
andStr = ''
for p in self.parm:
andStr += str(p) + ';'
return andStr
class Or:
def __init__(self, *parm):
self.parm = list(parm)
def add_parm(self, *ps):
self.parm = self.parm + ps
def __repr__(self):
andStr = ''
for p in self.parm:
andStr += str(p) + ';'
return andStr
class Not:
def __init__(self, *parm):
self.parm = list(parm)
def __repr__(self):
andStr = ''
for p in self.parm:
andStr += str(p) + ';'
return andStr
class SipoItem:
"""
一个用来解析专利网站专利检索表达式的实例
"""
def __init__(self, **kwargs):
self.startIndex = 0
self.__queryAnd = And()
self.target_parm = {} # 经过整理后的目标参数
self.__prepare_item(kwargs)
for title, value in title_define.items():
key = kwargs.get(title)
if key is not None:
self.__queryAnd.add_parm(title_case.get(title)(value, key))
self.__itemGroup = ItemGroup(And=self.__queryAnd)
self.search_exp_cn = handle_item_group(self.__itemGroup) # 生成的检索表达式
self.target_parm = self.__check_target_parm(kwargs)
def __prepare_item(self, items):
invention_type = items.get('invention_type')
if invention_type is not None:
publish_country = items.get('publish_country')
if publish_country is None:
items['publish_country'] = 'CN'
else:
if isinstance(publish_country, str):
if publish_country != 'CN':
items['publish_country'] = ItemGroup(Or=Or(publish_country, 'CN'))
elif isinstance(publish_country, ItemGroup):
if find_element_in_item_group('CN', publish_country) is None:
publish_country.add_or('CN')
def __check_target_parm(self, parm):
target = {}
if isinstance(parm, dict):
for key, value in parm.items():
if key == 'invention_type':
if isinstance(value, Or):
for index, pvalue in enumerate(value.parm):
if pvalue == '"I"' or pvalue == '发明申请':
pvalue = {'en': '"I"', 'cn': '发明申请'}
elif pvalue == '"U"' or pvalue == '实用新型':
pvalue = {'en': '"U"', 'cn': '实用新型'}
elif pvalue == '"D"' or pvalue == '外观设计':
pvalue = {'en': '"D"', 'cn': '外观设计'}
else:
raise Exception('Please check the inventor_type')
value.parm[index] = pvalue
else:
if value == '"I"' or value == '发明申请':
value = {'en': '"I"', 'cn': '发明申请'}
elif value == '"U"' or value == '实用新型':
value = {'en': '"U"', 'cn': '实用新型'}
elif value == '"D"' or value == '外观设计':
value = {'en': '"D"', 'cn': '外观设计'}
else:
raise Exception('Please check the inventor_type')
target[key] = value
return target
def __repr__(self):
return self.search_exp_cn
|
feature_engine/imputation/drop_missing_data.py | kylegilde/feature_engine | 196 | 41746 | # Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from typing import List, Optional, Union
import pandas as pd
from feature_engine.dataframe_checks import _is_dataframe
from feature_engine.imputation.base_imputer import BaseImputer
from feature_engine.variable_manipulation import _check_input_parameter_variables
class DropMissingData(BaseImputer):
"""
DropMissingData() will delete rows containing missing values. It provides
similar functionality to pandas.drop_na().
It works for numerical and categorical variables. You can enter the list of
variables for which missing values should be evaluated. Alternatively, the imputer
will evaluate missing data in all variables in the dataframe.
More details in the :ref:`User Guide <drop_missing_data>`.
Parameters
----------
missing_only: bool, default=True
If `True`, rows will be dropped when they show missing data in variables with
missing data in the train set, that is, in the data set used in `fit()`. If
`False`, rows will be dropped if there is missing data in any of the variables.
This parameter only works when `threshold=None`, otherwise it is ignored.
variables: list, default=None
The list of variables to consider for the imputation. If None, the imputer will
evaluate missing data in all variables in the dataframe. Alternatively, the
imputer will evaluate missing data only in the variables in the list.
Note that if `missing_only=True` only variables with missing data in the train
set will be considered to drop a row, which might be a subset of the indicated
list.
threshold: int or float, default=None
Require that percentage of non-NA values in a row to keep it. If
`threshold=1`, all variables need to have data to keep the row. If
`threshold=0.5`, 50% of the variables need to have data to keep the row.
If `threshold=0.01`, 10% of the variables need to have data to keep the row.
If `thresh=None`, rows with NA in any of the variables will be dropped.
Attributes
----------
variables_:
The variables for which missing data will be examined to decide if a row is
dropped. The attribute `variables_` is different from the parameter `variables`
when the latter is `None`, or when only a subset of the indicated variables
show NA in the train set if `missing_only=True`.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Find the variables for which missing data should be evaluated.
transform:
Remove rows with missing data.
fit_transform:
Fit to the data, then transform it.
return_na_data:
Returns a dataframe with the rows that contain missing data.
"""
def __init__(
self,
missing_only: bool = True,
threshold: Union[None, int, float] = None,
variables: Union[None, int, str, List[Union[str, int]]] = None,
) -> None:
if not isinstance(missing_only, bool):
raise ValueError(
"missing_only takes values True or False. "
f"Got {missing_only} instead."
)
if threshold is not None:
if not isinstance(threshold, (int, float)) or not (0 < threshold <= 1):
raise ValueError(
"threshold must be a value between 0 < x <= 1. "
f"Got {threshold} instead."
)
self.variables = _check_input_parameter_variables(variables)
self.missing_only = missing_only
self.threshold = threshold
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
Find the variables for which missing data should be evaluated to decide if a
row should be dropped.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training data set.
y: pandas Series, default=None
y is not needed in this imputation. You can pass None or y.
"""
# check input dataframe
X = _is_dataframe(X)
# find variables for which indicator should be added
# if threshold, then missing_only is ignored:
if self.threshold is not None:
if not self.variables:
self.variables_ = [var for var in X.columns]
else:
self.variables_ = self.variables
# if threshold is None, we have the option to identify
# variables with NA only.
else:
if self.missing_only:
if not self.variables:
self.variables_ = [
var for var in X.columns if X[var].isnull().sum() > 0
]
else:
self.variables_ = [
var for var in self.variables if X[var].isnull().sum() > 0
]
else:
if not self.variables:
self.variables_ = [var for var in X.columns]
else:
self.variables_ = self.variables
self.n_features_in_ = X.shape[1]
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Remove rows with missing data.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataframe to be transformed.
Returns
-------
X_new: pandas dataframe
The complete case dataframe for the selected variables, of shape
[n_samples - n_samples_with_na, n_features]
"""
X = self._check_transform_input_and_state(X)
if self.threshold:
X.dropna(
thresh=len(self.variables_) * self.threshold,
subset=self.variables_,
axis=0,
inplace=True,
)
else:
X.dropna(axis=0, how="any", subset=self.variables_, inplace=True)
return X
def return_na_data(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Returns the subset of the dataframe with the rows with missing values. That is,
the subset of the dataframe that would be removed with the `transform()` method.
This method may be useful in production, for example if we want to store or log
the removed observations, that is, rows that will not be fed into the model.
Parameters
----------
X_na: pandas dataframe of shape = [n_samples_with_na, features]
The subset of the dataframe with the rows with missing data.
"""
X = self._check_transform_input_and_state(X)
if self.threshold:
idx = pd.isnull(X[self.variables_]).mean(axis=1) >= self.threshold
idx = idx[idx]
else:
idx = pd.isnull(X[self.variables_]).any(1)
idx = idx[idx]
return X.loc[idx.index, :]
|
dev/examples/tungraph.py | Cam2337/snap-python | 242 | 41768 | <gh_stars>100-1000
import random
import sys
sys.path.append("../swig-r")
import snap
def PrintGStats(s, Graph):
'''
Print graph statistics
'''
print "graph %s, nodes %d, edges %d, empty %s" % (
s, Graph.GetNodes(), Graph.GetEdges(),
"yes" if Graph.Empty() else "no")
def DefaultConstructor():
'''
Test the default constructor
'''
Graph = snap.TUNGraph()
PrintGStats("DefaultConstructor:Graph",Graph)
def ManipulateNodesEdges():
'''
Test node, edge creation
'''
NNodes = 10000
NEdges = 100000
FName = "test.graph"
Graph = snap.TUNGraph()
t = Graph.Empty()
# create the nodes
for i in range(0, NNodes):
Graph.AddNode(i)
t = Graph.Empty()
n = Graph.GetNodes()
# create random edges
NCount = NEdges
while NCount > 0:
x = int(random.random() * NNodes)
y = int(random.random() * NNodes)
# skip the loops in this test
if x != y and not Graph.IsEdge(x,y):
n = Graph.AddEdge(x, y)
NCount -= 1
PrintGStats("ManipulateNodesEdges:Graph1",Graph)
# get all the nodes
NCount = 0
NI = Graph.BegNI()
while NI < Graph.EndNI():
NCount += 1
NI.Next()
# get all the edges for all the nodes
ECount1 = 0;
NI = Graph.BegNI()
while NI < Graph.EndNI():
ECount1 += NI.GetOutDeg()
NI.Next()
ECount1 = ECount1 / 2
# get all the edges directly
ECount2 = 0;
EI = Graph.BegEI()
while EI < Graph.EndEI():
ECount2 += 1
EI.Next()
print "graph ManipulateNodesEdges:Graph2, nodes %d, edges1 %d, edges2 %d" % (
NCount, ECount1, ECount2)
# assignment
Graph1 = Graph;
PrintGStats("ManipulateNodesEdges:Graph3",Graph1)
# save the graph
FOut = snap.TFOut(snap.TStr(FName))
Graph.Save(FOut)
FOut.Flush()
# load the graph
FIn = snap.TFIn(snap.TStr(FName))
Graph2 = snap.TUNGraph(FIn)
PrintGStats("ManipulateNodesEdges:Graph4",Graph2)
# remove all the nodes and edges
for i in range(0, NNodes):
n = Graph.GetRndNId()
Graph.DelNode(n)
PrintGStats("ManipulateNodesEdges:Graph5",Graph)
Graph1.Clr()
PrintGStats("ManipulateNodesEdges:Graph6",Graph1)
def GetSmallGraph():
'''
Test small graph
'''
Graph = snap.TUNGraph()
Graph.GetSmallGraph()
PrintGStats("GetSmallGraph:Graph",Graph)
if __name__ == '__main__':
print "----- DefaultConstructor -----"
DefaultConstructor()
print "----- ManipulateNodesEdges -----"
ManipulateNodesEdges()
print "----- GetSmallGraph -----"
GetSmallGraph()
|
doc/source/conf.py | Steap/glance | 309 | 41775 | # Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance documentation build configuration file
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../bin'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'stevedore.sphinxext',
'sphinx.ext.viewcode',
'oslo_config.sphinxext',
'oslo_config.sphinxconfiggen',
'oslo_policy.sphinxpolicygen',
'openstackdocstheme',
'sphinxcontrib.apidoc',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/glance'
openstackdocs_bug_project = 'glance'
openstackdocs_bug_tag = 'documentation'
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../glance'
apidoc_output_dir = 'contributor/api'
apidoc_excluded_paths = [
'hacking/*',
'hacking',
'tests/*',
'tests',
'db/sqlalchemy/*',
'db/sqlalchemy']
apidoc_separate_modules = True
config_generator_config_file = [
('../../etc/oslo-config-generator/glance-api.conf',
'_static/glance-api'),
('../../etc/oslo-config-generator/glance-cache.conf',
'_static/glance-cache'),
('../../etc/oslo-config-generator/glance-manage.conf',
'_static/glance-manage'),
('../../etc/oslo-config-generator/glance-scrubber.conf',
'_static/glance-scrubber'),
]
policy_generator_config_file = [
('../../etc/glance-policy-generator.conf', '_static/glance'),
]
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2010-present, OpenStack Foundation.'
exclude_patterns = [
# The man directory includes some snippet files that are included
# in other documents during the build but that should not be
# included in the toctree themselves, so tell Sphinx to ignore
# them when scanning for input files.
'cli/footer.txt',
'cli/general_options.txt',
'cli/openstack_options.txt',
]
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['glance.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('cli/glanceapi', 'glance-api', u'Glance API Server',
[u'OpenStack'], 1),
('cli/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner',
[u'OpenStack'], 1),
('cli/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager',
[u'OpenStack'], 1),
('cli/glancecacheprefetcher', 'glance-cache-prefetcher',
u'Glance Cache Pre-fetcher', [u'OpenStack'], 1),
('cli/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner',
[u'OpenStack'], 1),
('cli/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ',
[u'OpenStack'], 1),
('cli/glancemanage', 'glance-manage', u'Glance Management Utility',
[u'OpenStack'], 1),
('cli/glancereplicator', 'glance-replicator', u'Glance Replicator',
[u'OpenStack'], 1),
('cli/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service',
[u'OpenStack'], 1)
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Glance.tex', u'Glance Documentation',
u'Glance Team', 'manual'),
]
|
test_frame/test_redis_ack_able/test_redis_ack_able_consumer.py | DJMIN/funboost | 120 | 41829 | """
这个是用来测试,以redis为中间件,随意关闭代码会不会造成任务丢失的。
"""
import time
from funboost import boost,BrokerEnum
@boost('test_cost_long_time_fun_queue2', broker_kind=BrokerEnum.REDIS_ACK_ABLE, concurrent_num=5)
def cost_long_time_fun(x):
print(f'正在消费 {x} 中 。。。。')
time.sleep(3)
print(f'消费完成 {x} ')
if __name__ == '__main__':
cost_long_time_fun.consume() |
observations/r/edc_t.py | hajime9652/observations | 199 | 41839 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def edc_t(path):
"""EPICA Dome C Ice Core 800KYr Temperature Estimates
Temperature record, using Deuterium as a proxy, from the EPICA (European
Project for Ice Coring in Antarctica) Dome C ice core covering 0 to 800
kyr BP.
A data frame with 5788 observations on the following 5 variables.
`Bag`
Bag number
`ztop`
Top depth (m)
`Age`
Years before 1950
`Deuterium`
Deuterium dD data
`dT`
Temperature difference from the average of the last 1000 years ~
-54.5degC
http://www.ncdc.noaa.gov/paleo/icecore/antarctica/domec/domec_epica_data.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `edc_t.csv`.
Returns:
Tuple of np.ndarray `x_train` with 5788 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'edc_t.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/edcT.csv'
maybe_download_and_extract(path, url,
save_file_name='edc_t.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
exercises/es/test_01_02_02.py | Jette16/spacy-course | 2,085 | 41851 | <filename>exercises/es/test_01_02_02.py<gh_stars>1000+
def test():
import spacy.tokens
import spacy.lang.de
assert isinstance(
nlp, spacy.lang.de.German
), "El objeto nlp debería ser un instance de la clase de alemán."
assert isinstance(
doc, spacy.tokens.Doc
), "¿Procesaste el texto con el objeto nlp para crear un doc?"
assert "print(doc.text)" in __solution__, "¿Imprimiste en pantalla el doc.text?"
__msg__.good("Sehr gut! :)")
|
modules/core/dbshell/dbshell.py | petabyteboy/nixcloud-webservices | 121 | 41905 | <filename>modules/core/dbshell/dbshell.py<gh_stars>100-1000
#!@interpreter@
import os
from pwd import getpwnam
from argparse import ArgumentParser
DBSHELL_CONFIG = @dbshellConfig@ # noqa
WEBSERVICES_PREFIX = "/var/lib/nixcloud/webservices"
def run_shell(dbname, user, command):
os.setuid(getpwnam(user).pw_uid)
os.execl(command, command, user, dbname)
def determine_wsname():
rel_to_ws_dir = os.path.relpath(os.getcwd(), WEBSERVICES_PREFIX)
components = rel_to_ws_dir.split(os.sep, 1)
if len(components) != 2:
return None
wsname_canidate = components[0]
if wsname_canidate in [os.curdir, os.pardir]:
return None
if wsname_canidate not in DBSHELL_CONFIG:
return None
return wsname_canidate
if __name__ == '__main__':
desc = "Connect to a database within a web service instance"
parser = ArgumentParser(description=desc)
parser.add_argument("webservice_name", nargs='?',
help="The web service name. If the argument is"
" omitted, the service name is determined"
" by inspecting the current directory.")
parser.add_argument("database", help="The database name to connect to.")
options = parser.parse_args()
if options.webservice_name is None:
wsname = determine_wsname()
if wsname is None:
parser.error("Unable to determine web service name.")
elif options.webservice_name not in DBSHELL_CONFIG:
msg = "Web service {!r} does not exist."
parser.error(msg.format(options.webservice_name))
else:
wsname = options.webservice_name
wsdef = DBSHELL_CONFIG[wsname]
if options.database not in wsdef:
msg = "Database {!r} does not exist for web service {!r}."
parser.error(msg.format(options.database, wsname))
else:
run_shell(options.database, **wsdef[options.database])
|
wxpusher/tests/test_send_message.py | hnauto/wxpusher-sdk-python | 124 | 41914 | <filename>wxpusher/tests/test_send_message.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unittest for sending message.
File: test_send_message.py
Author: huxuan
Email: <EMAIL>
"""
import unittest
from wxpusher import WxPusher
from . import config
class TestSendMessage(unittest.TestCase):
"""Unittest for sending message."""
@classmethod
def setUpClass(cls):
"""Set up for class."""
WxPusher.default_token = config.TOKEN
def test_send_message_uid(self):
"""Positive case for sending message with uid."""
res = WxPusher.send_message(
self.test_send_message_uid.__doc__,
uids=config.UIDS,
url='http://example.com/',
)
self.assertIsInstance(res, dict)
self.assertIn('code', res)
self.assertEqual(1000, res['code'])
def test_send_message_topic_id(self):
"""Positive case for sending message with topic_id."""
res = WxPusher.send_message(
self.test_send_message_topic_id.__doc__,
topic_ids=config.TOPIC_IDS,
url='http://example.com/',
)
self.assertIsInstance(res, dict)
self.assertIn('code', res)
self.assertEqual(1000, res['code'])
|
apps/jobs/migrations/0014_rename_job_id_field_in_submission_model_to_job_name.py | kaustubh-s1/EvalAI | 1,470 | 41937 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-20 05:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("jobs", "0013_add_job_id_field_in_submission_model")]
operations = [
migrations.RenameField(
model_name="submission", old_name="job_id", new_name="job_name"
)
]
|
rotkehlchen/tests/exchanges/test_independentreserve.py | rotkehlchenio/rotkehlchen | 137 | 41963 | <reponame>rotkehlchenio/rotkehlchen<gh_stars>100-1000
import warnings as test_warnings
from unittest.mock import patch
import pytest
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.constants.assets import A_AUD, A_ETC, A_ETH
from rotkehlchen.errors.asset import UnknownAsset
from rotkehlchen.exchanges.data_structures import Location, Trade, TradeType
from rotkehlchen.exchanges.independentreserve import (
IR_TO_WORLD,
Independentreserve,
independentreserve_asset,
)
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.mock import MockResponse
def test_location():
exchange = Independentreserve('independentreserve1', 'a', b'a', object(), object())
assert exchange.location == Location.INDEPENDENTRESERVE
assert exchange.name == 'independentreserve1'
def test_assets_are_known():
exchange = Independentreserve('independentreserve1', 'a', b'a', object(), object())
response = exchange._api_query('get', 'Public', 'GetValidPrimaryCurrencyCodes')
for currency in response:
try:
independentreserve_asset(currency)
except UnknownAsset:
test_warnings.warn(UserWarning(
f'Found unknown primary asset {currency} in IndependentReserve. '
f'Support for it has to be added',
))
response = exchange._api_query('get', 'Public', 'GetValidSecondaryCurrencyCodes')
for currency in response:
try:
independentreserve_asset(currency)
except UnknownAsset:
test_warnings.warn(UserWarning(
f'Found unknown secondary asset {currency} in IndependentReserve. '
f'Support for it has to be added',
))
@pytest.mark.parametrize('should_mock_current_price_queries', [True])
def test_query_balances(
function_scope_independentreserve,
inquirer, # pylint: disable=unused-argument
):
"""Test all balances returned by IndependentReserve are proccessed properly"""
exchange = function_scope_independentreserve
def mock_api_return(method, url, **kwargs): # pylint: disable=unused-argument
assert method == 'post'
response = """[{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Aud", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Nzd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Sgd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xbt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Eth", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xrp", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Ada", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Dot", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Uni", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Link", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usdt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usdc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Bch", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Ltc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Mkr", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Dai", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Comp", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Snx", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Grt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Eos", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xlm", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Etc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Bat", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Pmgt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Yfi", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Aave", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Zrx", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Omg", "TotalBalance": 150.55}]""" # noqa: E501
return MockResponse(200, response)
with patch.object(exchange.session, 'request', side_effect=mock_api_return):
balances, msg = exchange.query_balances()
assert msg == ''
assets_seen = {0}
for asset, balance in balances.items():
assert asset in IR_TO_WORLD.values()
assert asset not in assets_seen
assets_seen.add(asset)
assert balance.amount == FVal('150.55')
@pytest.mark.parametrize('should_mock_current_price_queries', [True])
def test_query_some_balances(
function_scope_independentreserve,
inquirer, # pylint: disable=unused-argument
):
"""Just like test_query_balances but make sure 0 balances are skipped"""
exchange = function_scope_independentreserve
def mock_api_return(method, url, **kwargs): # pylint: disable=unused-argument
assert method == 'post'
response = """[{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 1.2, "CurrencyCode": "Aud", "TotalBalance": 2.5},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Nzd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Sgd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xbt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Eth", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xrp", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Ada", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Dot", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Uni", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Link", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usdt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usdc", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Bch", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Ltc", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Mkr", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Dai", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Comp", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Snx", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Grt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Eos", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xlm", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Etc", "TotalBalance": 100.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Bat", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Pmgt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Yfi", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Aave", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Zrx", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Omg", "TotalBalance": 0.0}]""" # noqa: E501
return MockResponse(200, response)
with patch.object(exchange.session, 'request', side_effect=mock_api_return):
balances, msg = exchange.query_balances()
assert msg == ''
assert balances == {
A_AUD: Balance(amount=FVal(2.5), usd_value=FVal(3.75)),
A_ETC: Balance(amount=FVal(100), usd_value=FVal(150)),
}
def test_query_trade_history(function_scope_independentreserve):
"""Happy path test for independentreserve trade history querying"""
exchange = function_scope_independentreserve
def mock_api_return(method, url, **kwargs): # pylint: disable=unused-argument
assert method == 'post'
response = """{"Data": [
{"AvgPrice": 603.7,
"CreatedTimestampUtc": "2017-11-22T22:54:40.3249401Z",
"FeePercent": 0.005,
"OrderGuid": "foo1",
"OrderType": "MarketOffer",
"Original": {"Outstanding": 0.0, "Volume": 0.5, "VolumeCurrencyType": "Primary"},
"Outstanding": 0.0,
"Price": null,
"PrimaryCurrencyCode": "Eth",
"SecondaryCurrencyCode": "Aud",
"Status": "Filled",
"Value": 301.85,
"Volume": 0.5
}, {
"AvgPrice": 257.25,
"CreatedTimestampUtc": "2017-07-28T09:39:19.8799244Z",
"FeePercent": 0.005,
"OrderGuid": "foo2",
"OrderType": "MarketBid",
"Original": {"Outstanding": 0.0, "Volume": 2.64117379, "VolumeCurrencyType": "Primary"},
"Outstanding": 0.0,
"Price": null,
"PrimaryCurrencyCode": "Eth",
"SecondaryCurrencyCode": "Aud",
"Status": "Filled",
"Value": 679.44,
"Volume": 2.64117379
}],
"PageSize": 50,
"TotalItems": 2,
"TotalPages": 1}
""" # noqa: E501
return MockResponse(200, response)
with patch.object(exchange.session, 'request', side_effect=mock_api_return):
trades = exchange.query_trade_history(
start_ts=0,
end_ts=1565732120,
only_cache=False,
)
expected_trades = [
Trade(
timestamp=1501234760,
location=Location.INDEPENDENTRESERVE,
base_asset=A_ETH,
quote_asset=A_AUD,
trade_type=TradeType.BUY,
amount=FVal('2.64117379'),
rate=FVal('257.25'),
fee=FVal('0.01320586895'),
fee_currency=A_ETH,
link='foo2',
), Trade(
timestamp=1511391280,
location=Location.INDEPENDENTRESERVE,
base_asset=A_ETH,
quote_asset=A_AUD,
trade_type=TradeType.SELL,
amount=FVal('0.5'),
rate=FVal('603.7'),
fee=FVal('0.0025'),
fee_currency=A_ETH,
link='foo1',
)]
assert trades == expected_trades[::-1]
# TODO: Make a test for asset movements.
# Would need more mocking as it would require mocking of multiple calls
|
ctpbee/interface/ctp_mini/lib.py | mcFore/ctpbee | 461 | 41967 | <reponame>mcFore/ctpbee<filename>ctpbee/interface/ctp_mini/lib.py<gh_stars>100-1000
import pytz
from ctpbee_api.ctp_mini import *
from ctpbee.constant import *
STATUS_MINI2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2MINI = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_MINI2VT = {v: k for k, v in DIRECTION_VT2MINI.items()}
DIRECTION_MINI2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_MINI2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2MINI = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_MINI2VT = {v: k for k, v in ORDERTYPE_VT2MINI.items()}
OFFSET_VT2MINI = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_MINI2VT = {v: k for k, v in OFFSET_VT2MINI.items()}
EXCHANGE_MINI2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE
}
PRODUCT_MINI2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_MINI2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
|
app/update_logs_test.py | limshengli/tinypilot | 1,334 | 41977 | import unittest
import update_logs
class UpdateLogsTest(unittest.TestCase):
def test_get_new_logs_with_more_next_logs(self):
self.assertEqual(
"56789",
update_logs.get_new_logs(prev_logs="01234", next_logs="0123456789"))
def test_get_new_logs_with_more_prev_logs(self):
self.assertEqual(
"",
update_logs.get_new_logs(prev_logs="0123456789", next_logs="01234"))
def test_get_new_logs_with_no_common_logs(self):
self.assertEqual(
"56789",
update_logs.get_new_logs(prev_logs="01234", next_logs="56789"))
def test_get_new_logs_with_no_prev_logs(self):
self.assertEqual(
"0123456789",
update_logs.get_new_logs(prev_logs="", next_logs="0123456789"))
def test_get_new_logs_with_no_next_logs(self):
self.assertEqual(
"", update_logs.get_new_logs(prev_logs="01234", next_logs=""))
|
sdk/python/pulumi_gcp/compute/region_instance_group_manager.py | sisisin/pulumi-gcp | 121 | 42004 | <filename>sdk/python/pulumi_gcp/compute/region_instance_group_manager.py<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RegionInstanceGroupManagerArgs', 'RegionInstanceGroupManager']
@pulumi.input_type
class RegionInstanceGroupManagerArgs:
def __init__(__self__, *,
base_instance_name: pulumi.Input[str],
versions: pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]],
auto_healing_policies: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RegionInstanceGroupManager resource.
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs'] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs'] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
pulumi.set(__self__, "base_instance_name", base_instance_name)
pulumi.set(__self__, "versions", versions)
if auto_healing_policies is not None:
pulumi.set(__self__, "auto_healing_policies", auto_healing_policies)
if description is not None:
pulumi.set(__self__, "description", description)
if distribution_policy_target_shape is not None:
pulumi.set(__self__, "distribution_policy_target_shape", distribution_policy_target_shape)
if distribution_policy_zones is not None:
pulumi.set(__self__, "distribution_policy_zones", distribution_policy_zones)
if name is not None:
pulumi.set(__self__, "name", name)
if named_ports is not None:
pulumi.set(__self__, "named_ports", named_ports)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if stateful_disks is not None:
pulumi.set(__self__, "stateful_disks", stateful_disks)
if target_pools is not None:
pulumi.set(__self__, "target_pools", target_pools)
if target_size is not None:
pulumi.set(__self__, "target_size", target_size)
if update_policy is not None:
pulumi.set(__self__, "update_policy", update_policy)
if wait_for_instances is not None:
pulumi.set(__self__, "wait_for_instances", wait_for_instances)
if wait_for_instances_status is not None:
pulumi.set(__self__, "wait_for_instances_status", wait_for_instances_status)
@property
@pulumi.getter(name="baseInstanceName")
def base_instance_name(self) -> pulumi.Input[str]:
"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""
return pulumi.get(self, "base_instance_name")
@base_instance_name.setter
def base_instance_name(self, value: pulumi.Input[str]):
pulumi.set(self, "base_instance_name", value)
@property
@pulumi.getter
def versions(self) -> pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]:
"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""
return pulumi.get(self, "versions")
@versions.setter
def versions(self, value: pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]):
pulumi.set(self, "versions", value)
@property
@pulumi.getter(name="autoHealingPolicies")
def auto_healing_policies(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]:
"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""
return pulumi.get(self, "auto_healing_policies")
@auto_healing_policies.setter
def auto_healing_policies(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]):
pulumi.set(self, "auto_healing_policies", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional textual description of the instance
group manager.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="distributionPolicyTargetShape")
def distribution_policy_target_shape(self) -> Optional[pulumi.Input[str]]:
"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""
return pulumi.get(self, "distribution_policy_target_shape")
@distribution_policy_target_shape.setter
def distribution_policy_target_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "distribution_policy_target_shape", value)
@property
@pulumi.getter(name="distributionPolicyZones")
def distribution_policy_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""
return pulumi.get(self, "distribution_policy_zones")
@distribution_policy_zones.setter
def distribution_policy_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "distribution_policy_zones", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
- Version name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedPorts")
def named_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]:
"""
The named port configuration. See the section below
for details on configuration.
"""
return pulumi.get(self, "named_ports")
@named_ports.setter
def named_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]):
pulumi.set(self, "named_ports", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="statefulDisks")
def stateful_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]:
"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""
return pulumi.get(self, "stateful_disks")
@stateful_disks.setter
def stateful_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]):
pulumi.set(self, "stateful_disks", value)
@property
@pulumi.getter(name="targetPools")
def target_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""
return pulumi.get(self, "target_pools")
@target_pools.setter
def target_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_pools", value)
@property
@pulumi.getter(name="targetSize")
def target_size(self) -> Optional[pulumi.Input[int]]:
"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""
return pulumi.get(self, "target_size")
@target_size.setter
def target_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_size", value)
@property
@pulumi.getter(name="updatePolicy")
def update_policy(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]:
"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""
return pulumi.get(self, "update_policy")
@update_policy.setter
def update_policy(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]):
pulumi.set(self, "update_policy", value)
@property
@pulumi.getter(name="waitForInstances")
def wait_for_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""
return pulumi.get(self, "wait_for_instances")
@wait_for_instances.setter
def wait_for_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_instances", value)
@property
@pulumi.getter(name="waitForInstancesStatus")
def wait_for_instances_status(self) -> Optional[pulumi.Input[str]]:
"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
return pulumi.get(self, "wait_for_instances_status")
@wait_for_instances_status.setter
def wait_for_instances_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_for_instances_status", value)
@pulumi.input_type
class _RegionInstanceGroupManagerState:
def __init__(__self__, *,
auto_healing_policies: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
instance_group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]] = None,
statuses: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RegionInstanceGroupManager resources.
:param pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs'] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] fingerprint: The fingerprint of the instance group manager.
:param pulumi.Input[str] instance_group: The full URL of the instance group created by the manager.
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[str] self_link: The URL of the created resource.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]] statuses: The status of this managed instance group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs'] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
if auto_healing_policies is not None:
pulumi.set(__self__, "auto_healing_policies", auto_healing_policies)
if base_instance_name is not None:
pulumi.set(__self__, "base_instance_name", base_instance_name)
if description is not None:
pulumi.set(__self__, "description", description)
if distribution_policy_target_shape is not None:
pulumi.set(__self__, "distribution_policy_target_shape", distribution_policy_target_shape)
if distribution_policy_zones is not None:
pulumi.set(__self__, "distribution_policy_zones", distribution_policy_zones)
if fingerprint is not None:
pulumi.set(__self__, "fingerprint", fingerprint)
if instance_group is not None:
pulumi.set(__self__, "instance_group", instance_group)
if name is not None:
pulumi.set(__self__, "name", name)
if named_ports is not None:
pulumi.set(__self__, "named_ports", named_ports)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if stateful_disks is not None:
pulumi.set(__self__, "stateful_disks", stateful_disks)
if statuses is not None:
pulumi.set(__self__, "statuses", statuses)
if target_pools is not None:
pulumi.set(__self__, "target_pools", target_pools)
if target_size is not None:
pulumi.set(__self__, "target_size", target_size)
if update_policy is not None:
pulumi.set(__self__, "update_policy", update_policy)
if versions is not None:
pulumi.set(__self__, "versions", versions)
if wait_for_instances is not None:
pulumi.set(__self__, "wait_for_instances", wait_for_instances)
if wait_for_instances_status is not None:
pulumi.set(__self__, "wait_for_instances_status", wait_for_instances_status)
@property
@pulumi.getter(name="autoHealingPolicies")
def auto_healing_policies(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]:
"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""
return pulumi.get(self, "auto_healing_policies")
@auto_healing_policies.setter
def auto_healing_policies(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]):
pulumi.set(self, "auto_healing_policies", value)
@property
@pulumi.getter(name="baseInstanceName")
def base_instance_name(self) -> Optional[pulumi.Input[str]]:
"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""
return pulumi.get(self, "base_instance_name")
@base_instance_name.setter
def base_instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_instance_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional textual description of the instance
group manager.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="distributionPolicyTargetShape")
def distribution_policy_target_shape(self) -> Optional[pulumi.Input[str]]:
"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""
return pulumi.get(self, "distribution_policy_target_shape")
@distribution_policy_target_shape.setter
def distribution_policy_target_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "distribution_policy_target_shape", value)
@property
@pulumi.getter(name="distributionPolicyZones")
def distribution_policy_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""
return pulumi.get(self, "distribution_policy_zones")
@distribution_policy_zones.setter
def distribution_policy_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "distribution_policy_zones", value)
@property
@pulumi.getter
def fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The fingerprint of the instance group manager.
"""
return pulumi.get(self, "fingerprint")
@fingerprint.setter
def fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fingerprint", value)
@property
@pulumi.getter(name="instanceGroup")
def instance_group(self) -> Optional[pulumi.Input[str]]:
"""
The full URL of the instance group created by the manager.
"""
return pulumi.get(self, "instance_group")
@instance_group.setter
def instance_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_group", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
- Version name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedPorts")
def named_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]:
"""
The named port configuration. See the section below
for details on configuration.
"""
return pulumi.get(self, "named_ports")
@named_ports.setter
def named_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]):
pulumi.set(self, "named_ports", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the created resource.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter(name="statefulDisks")
def stateful_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]:
"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""
return pulumi.get(self, "stateful_disks")
@stateful_disks.setter
def stateful_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]):
pulumi.set(self, "stateful_disks", value)
@property
@pulumi.getter
def statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]:
"""
The status of this managed instance group.
"""
return pulumi.get(self, "statuses")
@statuses.setter
def statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]):
pulumi.set(self, "statuses", value)
@property
@pulumi.getter(name="targetPools")
def target_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""
return pulumi.get(self, "target_pools")
@target_pools.setter
def target_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_pools", value)
@property
@pulumi.getter(name="targetSize")
def target_size(self) -> Optional[pulumi.Input[int]]:
"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""
return pulumi.get(self, "target_size")
@target_size.setter
def target_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_size", value)
@property
@pulumi.getter(name="updatePolicy")
def update_policy(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]:
"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""
return pulumi.get(self, "update_policy")
@update_policy.setter
def update_policy(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]):
pulumi.set(self, "update_policy", value)
@property
@pulumi.getter
def versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]:
"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""
return pulumi.get(self, "versions")
@versions.setter
def versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]):
pulumi.set(self, "versions", value)
@property
@pulumi.getter(name="waitForInstances")
def wait_for_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""
return pulumi.get(self, "wait_for_instances")
@wait_for_instances.setter
def wait_for_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_instances", value)
@property
@pulumi.getter(name="waitForInstancesStatus")
def wait_for_instances_status(self) -> Optional[pulumi.Input[str]]:
"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
return pulumi.get(self, "wait_for_instances_status")
@wait_for_instances_status.setter
def wait_for_instances_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_for_instances_status", value)
class RegionInstanceGroupManager(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template.
To get more information about regionInstanceGroupManagers, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
* How-to Guides
* [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
> **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
## Example Usage
### With Top Level Instance Template (`Google` Provider)
```python
import pulumi
import pulumi_gcp as gcp
autohealing = gcp.compute.HealthCheck("autohealing",
check_interval_sec=5,
timeout_sec=5,
healthy_threshold=2,
unhealthy_threshold=10,
http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
request_path="/healthz",
port=8080,
))
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
distribution_policy_zones=[
"us-central1-a",
"us-central1-f",
],
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
)],
target_pools=[google_compute_target_pool["appserver"]["id"]],
target_size=2,
named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
name="custom",
port=8888,
)],
auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
health_check=autohealing.id,
initial_delay_sec=300,
))
```
### With Multiple Versions
```python
import pulumi
import pulumi_gcp as gcp
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
target_size=5,
versions=[
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
),
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver-canary"]["id"],
target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
fixed=1,
),
),
])
```
## Import
Instance group managers can be imported using the `name`, e.g.
```sh
$ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegionInstanceGroupManagerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template.
To get more information about regionInstanceGroupManagers, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
* How-to Guides
* [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
> **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
## Example Usage
### With Top Level Instance Template (`Google` Provider)
```python
import pulumi
import pulumi_gcp as gcp
autohealing = gcp.compute.HealthCheck("autohealing",
check_interval_sec=5,
timeout_sec=5,
healthy_threshold=2,
unhealthy_threshold=10,
http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
request_path="/healthz",
port=8080,
))
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
distribution_policy_zones=[
"us-central1-a",
"us-central1-f",
],
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
)],
target_pools=[google_compute_target_pool["appserver"]["id"]],
target_size=2,
named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
name="custom",
port=8888,
)],
auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
health_check=autohealing.id,
initial_delay_sec=300,
))
```
### With Multiple Versions
```python
import pulumi
import pulumi_gcp as gcp
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
target_size=5,
versions=[
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
),
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver-canary"]["id"],
target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
fixed=1,
),
),
])
```
## Import
Instance group managers can be imported using the `name`, e.g.
```sh
$ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
```
:param str resource_name: The name of the resource.
:param RegionInstanceGroupManagerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegionInstanceGroupManagerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegionInstanceGroupManagerArgs.__new__(RegionInstanceGroupManagerArgs)
__props__.__dict__["auto_healing_policies"] = auto_healing_policies
if base_instance_name is None and not opts.urn:
raise TypeError("Missing required property 'base_instance_name'")
__props__.__dict__["base_instance_name"] = base_instance_name
__props__.__dict__["description"] = description
__props__.__dict__["distribution_policy_target_shape"] = distribution_policy_target_shape
__props__.__dict__["distribution_policy_zones"] = distribution_policy_zones
__props__.__dict__["name"] = name
__props__.__dict__["named_ports"] = named_ports
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["stateful_disks"] = stateful_disks
__props__.__dict__["target_pools"] = target_pools
__props__.__dict__["target_size"] = target_size
__props__.__dict__["update_policy"] = update_policy
if versions is None and not opts.urn:
raise TypeError("Missing required property 'versions'")
__props__.__dict__["versions"] = versions
__props__.__dict__["wait_for_instances"] = wait_for_instances
__props__.__dict__["wait_for_instances_status"] = wait_for_instances_status
__props__.__dict__["fingerprint"] = None
__props__.__dict__["instance_group"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["statuses"] = None
super(RegionInstanceGroupManager, __self__).__init__(
'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
base_instance_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
instance_group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatusArgs']]]]] = None,
target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_size: Optional[pulumi.Input[int]] = None,
update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
wait_for_instances: Optional[pulumi.Input[bool]] = None,
wait_for_instances_status: Optional[pulumi.Input[str]] = None) -> 'RegionInstanceGroupManager':
"""
Get an existing RegionInstanceGroupManager resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] fingerprint: The fingerprint of the instance group manager.
:param pulumi.Input[str] instance_group: The full URL of the instance group created by the manager.
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[str] self_link: The URL of the created resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatusArgs']]]] statuses: The status of this managed instance group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegionInstanceGroupManagerState.__new__(_RegionInstanceGroupManagerState)
__props__.__dict__["auto_healing_policies"] = auto_healing_policies
__props__.__dict__["base_instance_name"] = base_instance_name
__props__.__dict__["description"] = description
__props__.__dict__["distribution_policy_target_shape"] = distribution_policy_target_shape
__props__.__dict__["distribution_policy_zones"] = distribution_policy_zones
__props__.__dict__["fingerprint"] = fingerprint
__props__.__dict__["instance_group"] = instance_group
__props__.__dict__["name"] = name
__props__.__dict__["named_ports"] = named_ports
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["self_link"] = self_link
__props__.__dict__["stateful_disks"] = stateful_disks
__props__.__dict__["statuses"] = statuses
__props__.__dict__["target_pools"] = target_pools
__props__.__dict__["target_size"] = target_size
__props__.__dict__["update_policy"] = update_policy
__props__.__dict__["versions"] = versions
__props__.__dict__["wait_for_instances"] = wait_for_instances
__props__.__dict__["wait_for_instances_status"] = wait_for_instances_status
return RegionInstanceGroupManager(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoHealingPolicies")
def auto_healing_policies(self) -> pulumi.Output[Optional['outputs.RegionInstanceGroupManagerAutoHealingPolicies']]:
"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""
return pulumi.get(self, "auto_healing_policies")
@property
@pulumi.getter(name="baseInstanceName")
def base_instance_name(self) -> pulumi.Output[str]:
"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""
return pulumi.get(self, "base_instance_name")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
An optional textual description of the instance
group manager.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="distributionPolicyTargetShape")
def distribution_policy_target_shape(self) -> pulumi.Output[str]:
"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""
return pulumi.get(self, "distribution_policy_target_shape")
@property
@pulumi.getter(name="distributionPolicyZones")
def distribution_policy_zones(self) -> pulumi.Output[Sequence[str]]:
"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""
return pulumi.get(self, "distribution_policy_zones")
@property
@pulumi.getter
def fingerprint(self) -> pulumi.Output[str]:
"""
The fingerprint of the instance group manager.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter(name="instanceGroup")
def instance_group(self) -> pulumi.Output[str]:
"""
The full URL of the instance group created by the manager.
"""
return pulumi.get(self, "instance_group")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
- Version name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namedPorts")
def named_ports(self) -> pulumi.Output[Optional[Sequence['outputs.RegionInstanceGroupManagerNamedPort']]]:
"""
The named port configuration. See the section below
for details on configuration.
"""
return pulumi.get(self, "named_ports")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
The URL of the created resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="statefulDisks")
def stateful_disks(self) -> pulumi.Output[Optional[Sequence['outputs.RegionInstanceGroupManagerStatefulDisk']]]:
"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""
return pulumi.get(self, "stateful_disks")
@property
@pulumi.getter
def statuses(self) -> pulumi.Output[Sequence['outputs.RegionInstanceGroupManagerStatus']]:
"""
The status of this managed instance group.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter(name="targetPools")
def target_pools(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""
return pulumi.get(self, "target_pools")
@property
@pulumi.getter(name="targetSize")
def target_size(self) -> pulumi.Output[int]:
"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""
return pulumi.get(self, "target_size")
@property
@pulumi.getter(name="updatePolicy")
def update_policy(self) -> pulumi.Output['outputs.RegionInstanceGroupManagerUpdatePolicy']:
"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""
return pulumi.get(self, "update_policy")
@property
@pulumi.getter
def versions(self) -> pulumi.Output[Sequence['outputs.RegionInstanceGroupManagerVersion']]:
"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""
return pulumi.get(self, "versions")
@property
@pulumi.getter(name="waitForInstances")
def wait_for_instances(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""
return pulumi.get(self, "wait_for_instances")
@property
@pulumi.getter(name="waitForInstancesStatus")
def wait_for_instances_status(self) -> pulumi.Output[Optional[str]]:
"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""
return pulumi.get(self, "wait_for_instances_status")
|
webapp/models/mnist_model.py | dushik/AdversarialDNN-Playground | 125 | 42013 | <filename>webapp/models/mnist_model.py<gh_stars>100-1000
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial=tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1,2,2,1], padding='SAME')
def train_and_save_model(filename):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Placeholders:
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
# Model Parameters
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1]) # x is a [picture_ct, 28*28], so x_image is [picture_ct, 28, 28, 1]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder_with_default(1.0, ())
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2=weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_count = tf.count_nonzero(correct_prediction)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Set up training criterion
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Initializer step
init_op = tf.global_variables_initializer() # must be after adamoptimizer, since that creates more vars
# Configure saver
saver = tf.train.Saver()
tf.add_to_collection('mnist', x)
tf.add_to_collection('mnist', y_)
tf.add_to_collection('mnist', keep_prob)
tf.add_to_collection('mnist', y_conv)
tf.add_to_collection('mnist', correct_count)
tf.add_to_collection('mnist', cross_entropy)
# Train the model
with tf.Session() as sess:
sess.run(init_op)
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_:batch[1], keep_prob:1.0})
print("Step {}: Training accuracy {}".format(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_:batch[1], keep_prob:0.5})
save_path = saver.save(sess, filename)
print('Model saved to: {}'.format(filename))
if __name__ == '__main__':
train_and_save_model('./mnist-model')
|
eosfactory/core/vscode.py | tuan-tl/eosfactory | 255 | 42091 | '''
.. module:: eosfactory.core.vscode
:platform: Unix, Darwin
:synopsis: Default configuration items of a contract project.
.. moduleauthor:: Tokenika
'''
import json
import argparse
import eosfactory.core.config as config
INCLUDE_PATH = "includePath"
LIBS = "libs"
CODE_OPTIONS = "codeOptions"
TEST_OPTIONS = "testOptions"
def get_includes():
includes = config.eosio_cpp_includes()
retval = []
root = config.wsl_root()
for include in includes:
retval.append(root + include)
retval.append("${workspaceFolder}")
retval.append("${workspaceFolder}/include")
return retval
LIB_LIST = [
]
OPTIONS = [
]
TASKS = '''
{
"version": "2.0.0",
"tasks": [
{
"label": "Compile",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"osx": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"linux": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "Build",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"osx": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"linux": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [
]
},
{
"label": "Test",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "python3 ./tests/test1.py"
},
"osx": {
"command": "python3 ./tests/test1.py"
},
"linux": {
"command": "python3 ./tests/test1.py"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "Unittest",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "python3 ./tests/unittest1.py"
},
"osx": {
"command": "python3 ./tests/unittest1.py"
},
"linux": {
"command": "python3 ./tests/unittest1.py"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "EOSIO API",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "explorer.exe"
},
"osx": {
"command": "open"
},
"linux": {
"command": "sensible-browser"
},
"args": [
"https://developers.eos.io/"
],
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
}
]
}
'''
def c_cpp_properties():
includes = get_includes()
retval = """
{
"configurations": [
{
"%s": %s,
"%s": %s,
"%s": %s,
"%s": %s,
"defines": [],
"intelliSenseMode": "clang-x64",
"browse": {
"path": %s,
"limitSymbolsToIncludedHeaders": true,
"databaseFilename": ""
}
}
],
"version": 4
}
""" % (
INCLUDE_PATH,
json.dumps(includes, indent=4),
LIBS,
json.dumps(LIB_LIST, indent=4),
CODE_OPTIONS,
json.dumps(OPTIONS, indent=4),
TEST_OPTIONS,
json.dumps(OPTIONS, indent=4),
json.dumps(includes, indent=4))
return retval
def main(c_cpp_properties_path=None):
if c_cpp_properties_path:
config.update_vscode(c_cpp_properties_path)
else:
print(c_cpp_properties())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--c_cpp_prop_path", default="")
args = parser.parse_args()
main(args.c_cpp_prop_path) |
tests/functional/services/policy_engine/utils/api/query_vulnerabilities.py | rbrady/anchore-engine | 1,484 | 42092 | from tests.functional.services.policy_engine.utils.api.conf import (
policy_engine_api_conf,
)
from tests.functional.services.utils import http_utils
def get_vulnerabilities(
vulnerability_ids=[],
affected_package=None,
affected_package_version=None,
namespace=None,
):
if not vulnerability_ids:
raise ValueError("Cannot fetch vulnerabilities without ids")
query = {
"id": ",".join(vulnerability_ids),
"affected_package": affected_package,
"affected_package_version": affected_package_version,
"namespace": namespace,
}
vulnerabilities_resp = http_utils.http_get(
["query", "vulnerabilities"], query, config=policy_engine_api_conf
)
if vulnerabilities_resp.code != 200:
raise http_utils.RequestFailedError(
vulnerabilities_resp.url,
vulnerabilities_resp.code,
vulnerabilities_resp.body,
)
return vulnerabilities_resp
|
admin_tools/theming/apps.py | asherf/django-admin-tools | 711 | 42093 | <gh_stars>100-1000
# coding: utf-8
from django.apps import AppConfig
class ThemingConfig(AppConfig):
name = 'admin_tools.theming'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.